From 1abe9e23c8267952025395124ebe4b386c55d858 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Thu, 28 Jan 2016 15:14:44 -0700 Subject: [PATCH 001/183] Remove (unused) ipc-contains-obj-shared-with hack from sharing. --- .../terrain/services/filesystem/sharing.clj | 22 ------------------- 1 file changed, 22 deletions(-) diff --git a/services/terrain/src/terrain/services/filesystem/sharing.clj b/services/terrain/src/terrain/services/filesystem/sharing.clj index ffc1952dc..5ca5d48c3 100644 --- a/services/terrain/src/terrain/services/filesystem/sharing.clj +++ b/services/terrain/src/terrain/services/filesystem/sharing.clj @@ -15,20 +15,6 @@ [terrain.services.filesystem.icat :as icat] [terrain.services.filesystem.validators :as validators])) -(def shared-with-attr "ipc-contains-obj-shared-with") - -(defn- add-user-shared-with - "Adds 'ipc-contains-obj-shared-with' AVU for a user to an object if it's not there." - [cm fpath shared-with] - (when (empty? (get-avus-by-collection cm fpath shared-with shared-with-attr)) - (set-metadata cm fpath shared-with shared-with shared-with-attr))) - -(defn- remove-user-shared-with - "Removes 'ipc-contains-obj-shared-with' AVU for a user from an object if it's there." - [cm fpath shared-with] - (when-not (empty? (get-avus-by-collection cm fpath shared-with shared-with-attr)) - (delete-metadata cm fpath shared-with))) - (defn- shared? ([cm share-with fpath] (:read (permissions cm share-with fpath))) @@ -101,7 +87,6 @@ share-recs (group-by keyfn (share-paths cm user share-withs fpaths perm)) sharees (map :user (:succeeded share-recs)) home-dir (paths/user-home-dir user)] - (dorun (map (partial add-user-shared-with cm (paths/user-home-dir user)) sharees)) {:user sharees :path fpaths :skipped (map #(dissoc % :skipped) (:skipped share-recs)) @@ -155,12 +140,6 @@ (shared? cm unshare-with fpath) (unshare-path cm user unshare-with fpath) :else (skip-share unshare-with fpath :not-shared)))) -(defn- clean-up-unsharee-avus - [cm fpath unshare-with] - (when-not (shared? cm unshare-with fpath) - (log/warn "Removing shared with AVU on" fpath "for" unshare-with) - (remove-user-shared-with cm fpath unshare-with))) - (defn unshare "Allows 'user' to unshare file 'fpath' with user 'unshare-with'." [user unshare-withs fpaths] @@ -181,7 +160,6 @@ unshare-recs (group-by keyfn (unshare-paths cm user unshare-withs fpaths)) unsharees (map :user (:succeeded unshare-recs)) home-dir (paths/user-home-dir user)] - (dorun (map (partial clean-up-unsharee-avus cm home-dir) unsharees)) {:user unsharees :path fpaths :skipped (map #(dissoc % :skipped) (:skipped unshare-recs))}))) From 6e417ef88558e44e10f3ea00ab51315dd798b960 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Thu, 4 Feb 2016 12:56:23 -0700 Subject: [PATCH 002/183] Bump version to 2.5. --- ansible/inventories/group_vars/all | 2 +- lein-plugins/lein-iplant-cmdtar/project.clj | 2 +- libs/authy/project.clj | 2 +- libs/clj-cas/project.clj | 2 +- libs/clj-icat-direct/project.clj | 2 +- libs/clj-jargon/project.clj | 4 ++-- libs/common-cfg/project.clj | 2 +- libs/common-cli/project.clj | 2 +- libs/common-swagger-api/project.clj | 2 +- libs/heuristomancer/project.clj | 4 ++-- libs/iplant-clojure-commons/project.clj | 4 ++-- libs/kameleon/project.clj | 2 +- libs/mescal/project.clj | 8 ++++---- libs/service-logging/project.clj | 2 +- services/Infosquito/project.clj | 8 ++++---- services/Infosquito/version | 2 +- services/JEX/project.clj | 10 +++++----- services/JEX/version | 2 +- services/NotificationAgent/project.clj | 10 +++++----- services/NotificationAgent/version | 2 +- services/anon-files/project.clj | 10 +++++----- services/anon-files/version | 2 +- services/apps/project.clj | 18 +++++++++--------- services/apps/version | 2 +- services/clockwork/project.clj | 12 ++++++------ services/clockwork/version | 2 +- services/condor-log-monitor/version | 2 +- services/data-info/project.clj | 20 ++++++++++---------- services/data-info/version | 2 +- services/dewey/project.clj | 10 +++++----- services/dewey/version | 2 +- services/info-typer/project.clj | 12 ++++++------ services/info-typer/version | 2 +- services/iplant-email/project.clj | 8 ++++---- services/iplant-email/version | 2 +- services/iplant-groups/project.clj | 12 ++++++------ services/iplant-groups/version | 2 +- services/jex-events/version | 2 +- services/kifshare/project.clj | 10 +++++----- services/kifshare/version | 2 +- services/metadata/project.clj | 14 +++++++------- services/metadata/version | 2 +- services/monkey/project.clj | 8 ++++---- services/monkey/version | 2 +- services/saved-searches/project.clj | 10 +++++----- services/saved-searches/version | 2 +- services/terrain/project.clj | 20 ++++++++++---------- services/terrain/version | 2 +- services/tree-urls/project.clj | 10 +++++----- services/tree-urls/version | 2 +- services/user-preferences/project.clj | 10 +++++----- services/user-preferences/version | 2 +- services/user-sessions/project.clj | 10 +++++----- services/user-sessions/version | 2 +- tools/facepalm/project.clj | 8 ++++---- tools/facepalm/version | 2 +- tools/filetool/project.clj | 8 ++++---- tools/filetool/version | 2 +- tools/job-preserver/project.clj | 4 ++-- tools/sharkbait/project.clj | 6 +++--- tools/sharkbait/version | 2 +- tools/template-mover/project.clj | 4 ++-- tools/template-mover/version | 2 +- ui/gradle.properties | 2 +- 64 files changed, 172 insertions(+), 172 deletions(-) diff --git a/ansible/inventories/group_vars/all b/ansible/inventories/group_vars/all index e955277b2..3d77d5963 100644 --- a/ansible/inventories/group_vars/all +++ b/ansible/inventories/group_vars/all @@ -136,7 +136,7 @@ de: # ssl_certificate_key: "/etc/ssl/example.com.key" app_version_name: Phthalo -app_version: 2.4.0 +app_version: 2.5.0 de_feedback_to_addr: "" de_mail_from_addr: "{{ de_feedback_to_addr }}" diff --git a/lein-plugins/lein-iplant-cmdtar/project.clj b/lein-plugins/lein-iplant-cmdtar/project.clj index 2dcca17f8..96c89d1d8 100644 --- a/lein-plugins/lein-iplant-cmdtar/project.clj +++ b/lein-plugins/lein-iplant-cmdtar/project.clj @@ -1,4 +1,4 @@ -(defproject org.iplantc/lein-iplant-cmdtar "5.2.4.0" +(defproject org.iplantc/lein-iplant-cmdtar "5.2.5.0" :eval-in-leiningen true :description "Leiningen plugin for generating tarball command distributions." :url "https://github.com/iPlantCollaborativeOpenSource/DE" diff --git a/libs/authy/project.clj b/libs/authy/project.clj index 87b2d7196..063f9e557 100644 --- a/libs/authy/project.clj +++ b/libs/authy/project.clj @@ -1,4 +1,4 @@ -(defproject org.iplantc/authy "5.2.4.0" +(defproject org.iplantc/authy "5.2.5.0" :description "An OAuth 2.0 client clibrary written in Clojure." :url "http://www.iplantcollaborative.org" :license {:name "BSD Standard License" diff --git a/libs/clj-cas/project.clj b/libs/clj-cas/project.clj index 1c4cc22fb..5d99faccd 100644 --- a/libs/clj-cas/project.clj +++ b/libs/clj-cas/project.clj @@ -1,4 +1,4 @@ -(defproject org.iplantc/clj-cas "5.2.4.0" +(defproject org.iplantc/clj-cas "5.2.5.0" :description "A CAS Client library written in Clojure." :url "https://github.com/iPlantCollaborativeOpenSource/DE" :license {:name "BSD" diff --git a/libs/clj-icat-direct/project.clj b/libs/clj-icat-direct/project.clj index 375954f07..6c106911b 100644 --- a/libs/clj-icat-direct/project.clj +++ b/libs/clj-icat-direct/project.clj @@ -1,4 +1,4 @@ -(defproject org.iplantc/clj-icat-direct "5.2.4.0" +(defproject org.iplantc/clj-icat-direct "5.2.5.0" :description "A Clojure library for accessing the iRODS ICAT database directly." :url "https://github.com/iPlantCollaborativeOpenSource/DE" :license {:name "BSD Standard License" diff --git a/libs/clj-jargon/project.clj b/libs/clj-jargon/project.clj index 1ae4ec637..601a4f497 100644 --- a/libs/clj-jargon/project.clj +++ b/libs/clj-jargon/project.clj @@ -1,4 +1,4 @@ -(defproject org.iplantc/clj-jargon "5.2.4.0" +(defproject org.iplantc/clj-jargon "5.2.5.0" :description "Clojure API on top of iRODS's jargon-core." :url "https://github.com/iPlantCollaborativeOpenSource/DE" :license {:name "BSD" @@ -17,7 +17,7 @@ :exclusions [[org.slf4j/slf4j-api] [org.slf4j/slf4j-log4j12]]] [slingshot "0.12.2"] - [org.iplantc/clojure-commons "5.2.4.0"]] + [org.iplantc/clojure-commons "5.2.5.0"]] :repositories [["dice.repository" {:url "https://raw.github.com/DICE-UNC/DICE-Maven/master/releases"}] ["renci-snapshot.repository" diff --git a/libs/common-cfg/project.clj b/libs/common-cfg/project.clj index 3e075f5f4..28d8c2f7f 100644 --- a/libs/common-cfg/project.clj +++ b/libs/common-cfg/project.clj @@ -1,4 +1,4 @@ -(defproject org.iplantc/common-cfg "5.2.4.0" +(defproject org.iplantc/common-cfg "5.2.5.0" :description "DE services code for managing configurations." :url "https://github.com/iPlantCollaborativeOpenSource/DE" :license {:name "BSD"} diff --git a/libs/common-cli/project.clj b/libs/common-cli/project.clj index ec2b2f58e..d26fcb512 100644 --- a/libs/common-cli/project.clj +++ b/libs/common-cli/project.clj @@ -1,4 +1,4 @@ -(defproject org.iplantc/common-cli "5.2.4.0" +(defproject org.iplantc/common-cli "5.2.5.0" :description "Common CLI functions for the DE backend services and tools" :url "http://github.com/iPlantCollaborativeOpenSource/DiscoveryEnvironmentBackend/" :license {:name "BSD"} diff --git a/libs/common-swagger-api/project.clj b/libs/common-swagger-api/project.clj index 8c5a9b6b2..3570641bb 100644 --- a/libs/common-swagger-api/project.clj +++ b/libs/common-swagger-api/project.clj @@ -1,4 +1,4 @@ -(defproject org.iplantc/common-swagger-api "5.2.4.0" +(defproject org.iplantc/common-swagger-api "5.2.5.0" :description "Common library for Swagger documented RESTful APIs" :url "https://github.com/iPlantCollaborativeOpenSource/DE" :license {:name "BSD" diff --git a/libs/heuristomancer/project.clj b/libs/heuristomancer/project.clj index f2b0601e2..f01ad0394 100644 --- a/libs/heuristomancer/project.clj +++ b/libs/heuristomancer/project.clj @@ -1,4 +1,4 @@ -(defproject org.iplantc/heuristomancer "5.2.4.0" +(defproject org.iplantc/heuristomancer "5.2.5.0" :description "Clojure library for attempting to guess file types." :url "https://github.com/iPlantCollaborativeOpenSource/DE" :license {:name "BSD Standard License" @@ -8,7 +8,7 @@ [org.clojure/tools.cli "0.3.2"] [org.clojure/tools.logging "0.3.1"] [instaparse "1.4.1"]] - :plugins [[org.iplantc/lein-iplant-cmdtar "5.2.4.0"] + :plugins [[org.iplantc/lein-iplant-cmdtar "5.2.5.0"] [test2junit "1.1.3"]] :aot [heuristomancer.core] :main heuristomancer.core) diff --git a/libs/iplant-clojure-commons/project.clj b/libs/iplant-clojure-commons/project.clj index 65146fa02..05054d71f 100755 --- a/libs/iplant-clojure-commons/project.clj +++ b/libs/iplant-clojure-commons/project.clj @@ -1,4 +1,4 @@ -(defproject org.iplantc/clojure-commons "5.2.4.0" +(defproject org.iplantc/clojure-commons "5.2.5.0" :description "Common Utilities for Clojure Projects" :url "https://github.com/iPlantCollaborativeOpenSource/DE" :license {:name "BSD" @@ -20,5 +20,5 @@ [ring "1.4.0"] [slingshot "0.12.2"] [trptcolin/versioneer "0.2.0"] - [org.iplantc/service-logging "5.2.4.0"]] + [org.iplantc/service-logging "5.2.5.0"]] :profiles {:test {:resource-paths ["resources" "test-resources"]}}) diff --git a/libs/kameleon/project.clj b/libs/kameleon/project.clj index 9a063ece0..bc633cc48 100644 --- a/libs/kameleon/project.clj +++ b/libs/kameleon/project.clj @@ -1,4 +1,4 @@ -(defproject org.iplantc/kameleon "5.2.4.0" +(defproject org.iplantc/kameleon "5.2.5.0" :description "Library for interacting with backend relational databases." :url "https://github.com/iPlantCollaborativeOpenSource/DE" :license {:name "BSD" diff --git a/libs/mescal/project.clj b/libs/mescal/project.clj index 1e991bc5b..fc422af5f 100644 --- a/libs/mescal/project.clj +++ b/libs/mescal/project.clj @@ -1,4 +1,4 @@ -(defproject org.iplantc/mescal "5.2.4.0" +(defproject org.iplantc/mescal "5.2.5.0" :description "A Clojure client library for the Agave API." :url "https://github.com/iPlantCollaborativeOpenSource/DE" :license {:name "BSD Standard License" @@ -9,7 +9,7 @@ [clj-time "0.7.0"] [com.cemerick/url "0.1.1"] [medley "0.5.3"] - [org.iplantc/authy "5.2.4.0"] - [org.iplantc/clojure-commons "5.2.4.0"] - [org.iplantc/service-logging "5.2.4.0"] + [org.iplantc/authy "5.2.5.0"] + [org.iplantc/clojure-commons "5.2.5.0"] + [org.iplantc/service-logging "5.2.5.0"] [slingshot "0.10.3"]]) diff --git a/libs/service-logging/project.clj b/libs/service-logging/project.clj index e822851fe..c48b63aeb 100644 --- a/libs/service-logging/project.clj +++ b/libs/service-logging/project.clj @@ -1,4 +1,4 @@ -(defproject org.iplantc/service-logging "5.2.4.0" +(defproject org.iplantc/service-logging "5.2.5.0" :description "Common Logging Utilities for Clojure Projects" :url "https://github.com/iPlantCollaborativeOpenSource/DE" :license {:name "BSD" diff --git a/services/Infosquito/project.clj b/services/Infosquito/project.clj index ea43a0f1f..2c8acfeff 100644 --- a/services/Infosquito/project.clj +++ b/services/Infosquito/project.clj @@ -7,7 +7,7 @@ (string/trim (:out (sh "git" "rev-parse" "HEAD"))) "")) -(defproject org.iplantc/infosquito "5.2.4.0" +(defproject org.iplantc/infosquito "5.2.5.0" :description "An ICAT database crawler used to index the contents of iRODS." :url "https://github.com/iPlantCollaborativeOpenSource/DE" :license {:name "BSD" @@ -29,7 +29,7 @@ [com.novemberain/langohr "2.11.0"] [slingshot "0.10.3"] [me.raynes/fs "1.4.6"] - [org.iplantc/clojure-commons "5.2.4.0"] - [org.iplantc/common-cli "5.2.4.0"] - [org.iplantc/service-logging "5.2.4.0"]] + [org.iplantc/clojure-commons "5.2.5.0"] + [org.iplantc/common-cli "5.2.5.0"] + [org.iplantc/service-logging "5.2.5.0"]] :profiles {:dev {:resource-paths ["dev-resources"]}}) diff --git a/services/Infosquito/version b/services/Infosquito/version index 675c9b112..da030363c 100644 --- a/services/Infosquito/version +++ b/services/Infosquito/version @@ -1 +1 @@ -5.2.4.0 +5.2.5.0 diff --git a/services/JEX/project.clj b/services/JEX/project.clj index 42d226a67..da048a259 100644 --- a/services/JEX/project.clj +++ b/services/JEX/project.clj @@ -7,7 +7,7 @@ (string/trim (:out (sh "git" "rev-parse" "HEAD"))) "")) -(defproject org.iplantc/jex "5.2.4.0" +(defproject org.iplantc/jex "5.2.5.0" :description "A backend job execution service that submits jobs to Condor." :url "https://github.com/iPlantCollaborativeOpenSource/DE" :license {:name "BSD" @@ -24,11 +24,11 @@ [com.fasterxml.jackson.core/jackson-core]]] [com.cemerick/url "0.1.1"] [compojure "1.3.2"] - [org.iplantc/clojure-commons "5.2.4.0"] + [org.iplantc/clojure-commons "5.2.5.0"] [slingshot "0.12.2"] - [org.iplantc/common-cli "5.2.4.0"] - [org.iplantc/common-cfg "5.2.4.0"] - [org.iplantc/service-logging "5.2.4.0"] + [org.iplantc/common-cli "5.2.5.0"] + [org.iplantc/common-cfg "5.2.5.0"] + [org.iplantc/service-logging "5.2.5.0"] [me.raynes/fs "1.4.6"]] :plugins [[lein-midje "3.1.1"]] :profiles {:dev {:dependencies [[midje "1.6.3"]]}} diff --git a/services/JEX/version b/services/JEX/version index 675c9b112..da030363c 100644 --- a/services/JEX/version +++ b/services/JEX/version @@ -1 +1 @@ -5.2.4.0 +5.2.5.0 diff --git a/services/NotificationAgent/project.clj b/services/NotificationAgent/project.clj index ff12b953c..2e0032344 100644 --- a/services/NotificationAgent/project.clj +++ b/services/NotificationAgent/project.clj @@ -7,7 +7,7 @@ (string/trim (:out (sh "git" "rev-parse" "HEAD"))) "")) -(defproject org.iplantc/notificationagent "5.2.4.0" +(defproject org.iplantc/notificationagent "5.2.5.0" :description "A web service for storing and forwarding notifications." :url "https://github.com/iPlantCollaborativeOpenSource/DE" :license {:name "BSD" @@ -22,10 +22,10 @@ [com.fasterxml.jackson.core/jackson-databind] [com.fasterxml.jackson.core/jackson-core]]] [compojure "1.4.0"] - [org.iplantc/clojure-commons "5.2.4.0"] - [org.iplantc/kameleon "5.2.4.0"] - [org.iplantc/common-cli "5.2.4.0"] - [org.iplantc/service-logging "5.2.4.0"] + [org.iplantc/clojure-commons "5.2.5.0"] + [org.iplantc/kameleon "5.2.5.0"] + [org.iplantc/common-cli "5.2.5.0"] + [org.iplantc/service-logging "5.2.5.0"] [me.raynes/fs "1.4.6"] [clj-http "2.0.0"] [clj-time "0.11.0"] diff --git a/services/NotificationAgent/version b/services/NotificationAgent/version index 675c9b112..da030363c 100644 --- a/services/NotificationAgent/version +++ b/services/NotificationAgent/version @@ -1 +1 @@ -5.2.4.0 +5.2.5.0 diff --git a/services/anon-files/project.clj b/services/anon-files/project.clj index 3a6940e5b..8bd912e01 100644 --- a/services/anon-files/project.clj +++ b/services/anon-files/project.clj @@ -7,7 +7,7 @@ (string/trim (:out (sh "git" "rev-parse" "HEAD"))) "")) -(defproject org.iplantc/anon-files "5.2.4.0" +(defproject org.iplantc/anon-files "5.2.5.0" :description "Serves up files and directories that are shared with the anonymous user in iRODS." :url "https://github.com/iPlantCollaborativeOpenSource/DE" :license {:name "BSD"} @@ -16,12 +16,12 @@ :main ^:skip-aot anon-files.core :profiles {:uberjar {:aot :all}} :dependencies [[org.clojure/clojure "1.6.0"] - [org.iplantc/clj-jargon "5.2.4.0" + [org.iplantc/clj-jargon "5.2.5.0" :exclusions [[org.slf4j/slf4j-log4j12] [log4j]]] - [org.iplantc/service-logging "5.2.4.0"] - [org.iplantc/common-cli "5.2.4.0"] - [org.iplantc/common-cfg "5.2.4.0"] + [org.iplantc/service-logging "5.2.5.0"] + [org.iplantc/common-cli "5.2.5.0"] + [org.iplantc/common-cfg "5.2.5.0"] [medley "0.6.0"] [compojure "1.3.4"] [ring "1.4.0"]] diff --git a/services/anon-files/version b/services/anon-files/version index 675c9b112..da030363c 100644 --- a/services/anon-files/version +++ b/services/anon-files/version @@ -1 +1 @@ -5.2.4.0 +5.2.5.0 diff --git a/services/apps/project.clj b/services/apps/project.clj index 6695a120d..e75a3cecd 100644 --- a/services/apps/project.clj +++ b/services/apps/project.clj @@ -7,7 +7,7 @@ (string/trim (:out (sh "git" "rev-parse" "HEAD"))) "")) -(defproject org.iplantc/apps "5.2.4.0" +(defproject org.iplantc/apps "5.2.5.0" :description "Framework for hosting DiscoveryEnvironment metadata services." :url "https://github.com/iPlantCollaborativeOpenSource/DE" :license {:name "BSD" @@ -20,14 +20,14 @@ [com.google.guava/guava "18.0"] [medley "0.7.0"] [metosin/compojure-api "0.24.2"] - [org.iplantc/authy "5.2.4.0"] - [org.iplantc/clojure-commons "5.2.4.0"] - [org.iplantc/kameleon "5.2.4.0"] - [org.iplantc/mescal "5.2.4.0"] - [org.iplantc/common-cli "5.2.4.0"] - [org.iplantc/common-cfg "5.2.4.0"] - [org.iplantc/common-swagger-api "5.2.4.0"] - [org.iplantc/service-logging "5.2.4.0"] + [org.iplantc/authy "5.2.5.0"] + [org.iplantc/clojure-commons "5.2.5.0"] + [org.iplantc/kameleon "5.2.5.0"] + [org.iplantc/mescal "5.2.5.0"] + [org.iplantc/common-cli "5.2.5.0"] + [org.iplantc/common-cfg "5.2.5.0"] + [org.iplantc/common-swagger-api "5.2.5.0"] + [org.iplantc/service-logging "5.2.5.0"] [me.raynes/fs "1.4.6"] [mvxcvi/clj-pgp "0.8.0"]] :plugins [[lein-ring "0.9.6"] diff --git a/services/apps/version b/services/apps/version index 675c9b112..da030363c 100644 --- a/services/apps/version +++ b/services/apps/version @@ -1 +1 @@ -5.2.4.0 +5.2.5.0 diff --git a/services/clockwork/project.clj b/services/clockwork/project.clj index 30f5984ea..dffb8364a 100644 --- a/services/clockwork/project.clj +++ b/services/clockwork/project.clj @@ -7,7 +7,7 @@ (string/trim (:out (sh "git" "rev-parse" "HEAD"))) "")) -(defproject org.iplantc/clockwork "5.2.4.0" +(defproject org.iplantc/clockwork "5.2.5.0" :description "Scheduled jobs for the iPlant Discovery Environment" :url "https://github.com/iPlantCollaborativeOpenSource/DE" :license {:name "BSD" @@ -30,13 +30,13 @@ [com.mchange/c3p0 "0.9.5.1"] [korma "0.3.0-RC5" :exclusions [c3p0]] - [org.iplantc/clj-jargon "5.2.4.0" + [org.iplantc/clj-jargon "5.2.5.0" :exclusions [[org.slf4j/slf4j-log4j12] [log4j]]] - [org.iplantc/clojure-commons "5.2.4.0"] - [org.iplantc/common-cli "5.2.4.0"] - [org.iplantc/kameleon "5.2.4.0"] - [org.iplantc/service-logging "5.2.4.0"] + [org.iplantc/clojure-commons "5.2.5.0"] + [org.iplantc/common-cli "5.2.5.0"] + [org.iplantc/kameleon "5.2.5.0"] + [org.iplantc/service-logging "5.2.5.0"] [me.raynes/fs "1.4.6"] [slingshot "0.10.3"]] :profiles {:dev {:resource-paths ["resources/test"]} diff --git a/services/clockwork/version b/services/clockwork/version index 675c9b112..da030363c 100644 --- a/services/clockwork/version +++ b/services/clockwork/version @@ -1 +1 @@ -5.2.4.0 +5.2.5.0 diff --git a/services/condor-log-monitor/version b/services/condor-log-monitor/version index 675c9b112..da030363c 100644 --- a/services/condor-log-monitor/version +++ b/services/condor-log-monitor/version @@ -1 +1 @@ -5.2.4.0 +5.2.5.0 diff --git a/services/data-info/project.clj b/services/data-info/project.clj index 62ce6e8dd..2d44de604 100644 --- a/services/data-info/project.clj +++ b/services/data-info/project.clj @@ -7,7 +7,7 @@ (string/trim (:out (sh "git" "rev-parse" "HEAD"))) "")) -(defproject org.iplantc/data-info "5.2.4.0" +(defproject org.iplantc/data-info "5.2.5.0" :description "provides the data information HTTP API" :manifest {"Git-Ref" ~(git-ref)} :uberjar-name "data-info-standalone.jar" @@ -28,19 +28,19 @@ [org.apache.tika/tika-core "1.11"] [net.sf.opencsv/opencsv "2.3"] [slingshot "0.12.2"] - [org.iplantc/clj-icat-direct "5.2.4.0" + [org.iplantc/clj-icat-direct "5.2.5.0" :exclusions [[org.slf4j/slf4j-log4j12] [log4j]]] - [org.iplantc/clj-jargon "5.2.4.0" + [org.iplantc/clj-jargon "5.2.5.0" :exclusions [[org.slf4j/slf4j-log4j12] [log4j]]] - [org.iplantc/clojure-commons "5.2.4.0"] - [org.iplantc/common-cli "5.2.4.0"] - [org.iplantc/common-cfg "5.2.4.0"] - [org.iplantc/common-swagger-api "5.2.4.0"] - [org.iplantc/heuristomancer "5.2.4.0"] - [org.iplantc/kameleon "5.2.4.0"] - [org.iplantc/service-logging "5.2.4.0"]] + [org.iplantc/clojure-commons "5.2.5.0"] + [org.iplantc/common-cli "5.2.5.0"] + [org.iplantc/common-cfg "5.2.5.0"] + [org.iplantc/common-swagger-api "5.2.5.0"] + [org.iplantc/heuristomancer "5.2.5.0"] + [org.iplantc/kameleon "5.2.5.0"] + [org.iplantc/service-logging "5.2.5.0"]] :plugins [[lein-ring "0.9.6"] [swank-clojure "1.4.2"]] :profiles {:dev {:resource-paths ["conf/test"]} diff --git a/services/data-info/version b/services/data-info/version index 675c9b112..da030363c 100644 --- a/services/data-info/version +++ b/services/data-info/version @@ -1 +1 @@ -5.2.4.0 +5.2.5.0 diff --git a/services/dewey/project.clj b/services/dewey/project.clj index ee0a6b160..ed2d1d6cc 100644 --- a/services/dewey/project.clj +++ b/services/dewey/project.clj @@ -7,7 +7,7 @@ (string/trim (:out (sh "git" "rev-parse" "HEAD"))) "")) -(defproject org.iplantc/dewey "5.2.4.0" +(defproject org.iplantc/dewey "5.2.5.0" :description "This is a RabbitMQ client responsible for keeping an elasticsearch index synchronized with an iRODS repository using messages produced by iRODS." :url "https://github.com/iPlantCollaborativeOpenSource/DE" @@ -30,12 +30,12 @@ [compojure "1.1.8"] [ring "1.4.0"] [slingshot "0.10.3"] - [org.iplantc/clj-jargon "5.2.4.0" + [org.iplantc/clj-jargon "5.2.5.0" :exclusions [[org.slf4j/slf4j-log4j12] [log4j]]] - [org.iplantc/clojure-commons "5.2.4.0"] - [org.iplantc/common-cli "5.2.4.0"] - [org.iplantc/service-logging "5.2.4.0"] + [org.iplantc/clojure-commons "5.2.5.0"] + [org.iplantc/common-cli "5.2.5.0"] + [org.iplantc/service-logging "5.2.5.0"] [me.raynes/fs "1.4.6"]] :resource-paths [] :profiles {:dev {:dependencies [[midje "1.6.3"]] diff --git a/services/dewey/version b/services/dewey/version index 675c9b112..da030363c 100644 --- a/services/dewey/version +++ b/services/dewey/version @@ -1 +1 @@ -5.2.4.0 +5.2.5.0 diff --git a/services/info-typer/project.clj b/services/info-typer/project.clj index b13f8b198..afe3f1f96 100644 --- a/services/info-typer/project.clj +++ b/services/info-typer/project.clj @@ -7,7 +7,7 @@ (string/trim (:out (sh "git" "rev-parse" "HEAD"))) "")) -(defproject org.iplantc/info-typer "5.2.4.0" +(defproject org.iplantc/info-typer "5.2.5.0" :description "An AMQP based info type detection service for iRODS" :url "https://github.com/iPlantCollaborativeOpenSource/DE" :license {:name "BSD" @@ -17,13 +17,13 @@ :dependencies [[org.clojure/clojure "1.7.0"] [com.novemberain/langohr "3.1.0"] [me.raynes/fs "1.4.6"] - [org.iplantc/clj-jargon "5.2.4.0" + [org.iplantc/clj-jargon "5.2.5.0" :exclusions [[org.slf4j/slf4j-log4j12] [log4j]]] - [org.iplantc/clojure-commons "5.2.4.0" :exclusions [commons-logging]] - [org.iplantc/common-cli "5.2.4.0"] - [org.iplantc/heuristomancer "5.2.4.0"] - [org.iplantc/service-logging "5.2.4.0"]] + [org.iplantc/clojure-commons "5.2.5.0" :exclusions [commons-logging]] + [org.iplantc/common-cli "5.2.5.0"] + [org.iplantc/heuristomancer "5.2.5.0"] + [org.iplantc/service-logging "5.2.5.0"]] :main ^:skip-aot info-typer.core :profiles {:dev {:resource-paths ["conf/test"]} :uberjar {:aot :all}} diff --git a/services/info-typer/version b/services/info-typer/version index 675c9b112..da030363c 100644 --- a/services/info-typer/version +++ b/services/info-typer/version @@ -1 +1 @@ -5.2.4.0 +5.2.5.0 diff --git a/services/iplant-email/project.clj b/services/iplant-email/project.clj index bd125dfbc..6d9c52faf 100644 --- a/services/iplant-email/project.clj +++ b/services/iplant-email/project.clj @@ -7,7 +7,7 @@ (string/trim (:out (sh "git" "rev-parse" "HEAD"))) "")) -(defproject org.iplantc/iplant-email "5.2.4.0" +(defproject org.iplantc/iplant-email "5.2.5.0" :description "iPlant Email Service" :url "https://github.com/iPlantCollaborativeOpenSource/DE" :license {:name "BSD" @@ -15,8 +15,8 @@ :manifest {"Git-Ref" ~(git-ref)} :uberjar-name "iplant-email-standalone.jar" :dependencies [[org.clojure/clojure "1.5.1"] - [org.iplantc/clojure-commons "5.2.4.0"] - [org.iplantc/service-logging "5.2.4.0"] + [org.iplantc/clojure-commons "5.2.5.0"] + [org.iplantc/service-logging "5.2.5.0"] [cheshire "5.5.0" :exclusions [[com.fasterxml.jackson.dataformat/jackson-dataformat-cbor] [com.fasterxml.jackson.dataformat/jackson-dataformat-smile] @@ -26,7 +26,7 @@ [javax.mail/mail "1.4"] [org.bituf/clj-stringtemplate "0.2"] [compojure "1.0.1"] - [org.iplantc/common-cli "5.2.4.0"] + [org.iplantc/common-cli "5.2.5.0"] [me.raynes/fs "1.4.6"]] :aot [iplant-email.core] :main iplant-email.core) diff --git a/services/iplant-email/version b/services/iplant-email/version index 675c9b112..da030363c 100644 --- a/services/iplant-email/version +++ b/services/iplant-email/version @@ -1 +1 @@ -5.2.4.0 +5.2.5.0 diff --git a/services/iplant-groups/project.clj b/services/iplant-groups/project.clj index 166025aad..be5e29096 100644 --- a/services/iplant-groups/project.clj +++ b/services/iplant-groups/project.clj @@ -7,7 +7,7 @@ (string/trim (:out (sh "git" "rev-parse" "HEAD"))) "")) -(defproject org.iplantc/iplant-groups "5.2.4.0" +(defproject org.iplantc/iplant-groups "5.2.5.0" :description "A REST front-end for Grouper." :url "https://github.com/iPlantCollaborativeOpenSource/DE" :license {:name "BSD" @@ -22,11 +22,11 @@ [medley "0.7.0"] [metosin/compojure-api "0.24.2"] [me.raynes/fs "1.4.6"] - [org.iplantc/clojure-commons "5.2.4.0"] - [org.iplantc/common-cfg "5.2.4.0"] - [org.iplantc/common-cli "5.2.4.0"] - [org.iplantc/common-swagger-api "5.2.4.0"] - [org.iplantc/service-logging "5.2.4.0"] + [org.iplantc/clojure-commons "5.2.5.0"] + [org.iplantc/common-cfg "5.2.5.0"] + [org.iplantc/common-cli "5.2.5.0"] + [org.iplantc/common-swagger-api "5.2.5.0"] + [org.iplantc/service-logging "5.2.5.0"] [ring/ring-core "1.4.0"] [ring/ring-jetty-adapter "1.4.0"]] :plugins [[lein-ring "0.9.6"]] diff --git a/services/iplant-groups/version b/services/iplant-groups/version index 675c9b112..da030363c 100644 --- a/services/iplant-groups/version +++ b/services/iplant-groups/version @@ -1 +1 @@ -5.2.4.0 +5.2.5.0 diff --git a/services/jex-events/version b/services/jex-events/version index 675c9b112..da030363c 100644 --- a/services/jex-events/version +++ b/services/jex-events/version @@ -1 +1 @@ -5.2.4.0 +5.2.5.0 diff --git a/services/kifshare/project.clj b/services/kifshare/project.clj index 88684e5c2..11b7f7de8 100644 --- a/services/kifshare/project.clj +++ b/services/kifshare/project.clj @@ -7,7 +7,7 @@ (string/trim (:out (sh "git" "rev-parse" "HEAD"))) "")) -(defproject org.iplantc/kifshare "5.2.4.0" +(defproject org.iplantc/kifshare "5.2.5.0" :description "CyVerse Quickshare for iRODS" :url "https://github.com/cyverse/DE" @@ -20,12 +20,12 @@ :dependencies [[org.clojure/clojure "1.6.0"] [org.clojure/tools.logging "0.3.1"] [medley "0.5.5"] - [org.iplantc/clj-jargon "5.2.4.0" + [org.iplantc/clj-jargon "5.2.5.0" :exclusions [[org.slf4j/slf4j-log4j12] [log4j]]] - [org.iplantc/service-logging "5.2.4.0"] - [org.iplantc/clojure-commons "5.2.4.0"] - [org.iplantc/common-cli "5.2.4.0"] + [org.iplantc/service-logging "5.2.5.0"] + [org.iplantc/clojure-commons "5.2.5.0"] + [org.iplantc/common-cli "5.2.5.0"] [me.raynes/fs "1.4.6"] [cheshire "5.5.0" :exclusions [[com.fasterxml.jackson.dataformat/jackson-dataformat-cbor] diff --git a/services/kifshare/version b/services/kifshare/version index 675c9b112..da030363c 100644 --- a/services/kifshare/version +++ b/services/kifshare/version @@ -1 +1 @@ -5.2.4.0 +5.2.5.0 diff --git a/services/metadata/project.clj b/services/metadata/project.clj index fa5180d7d..e35d375b9 100644 --- a/services/metadata/project.clj +++ b/services/metadata/project.clj @@ -7,7 +7,7 @@ (string/trim (:out (sh "git" "rev-parse" "HEAD"))) "")) -(defproject org.iplantc/metadata "5.2.4.0" +(defproject org.iplantc/metadata "5.2.5.0" :description "The REST API for the Discovery Environment Metadata services." :url "https://github.com/iPlantCollaborativeOpenSource/DE" :license {:name "BSD Standard License" @@ -15,12 +15,12 @@ :manifest {"Git-Ref" ~(git-ref)} :dependencies [[org.clojure/clojure "1.7.0"] [metosin/compojure-api "0.24.2"] - [org.iplantc/clojure-commons "5.2.4.0"] - [org.iplantc/common-cfg "5.2.4.0"] - [org.iplantc/common-cli "5.2.4.0"] - [org.iplantc/common-swagger-api "5.2.4.0"] - [org.iplantc/kameleon "5.2.4.0"] - [org.iplantc/service-logging "5.2.4.0"] + [org.iplantc/clojure-commons "5.2.5.0"] + [org.iplantc/common-cfg "5.2.5.0"] + [org.iplantc/common-cli "5.2.5.0"] + [org.iplantc/common-swagger-api "5.2.5.0"] + [org.iplantc/kameleon "5.2.5.0"] + [org.iplantc/service-logging "5.2.5.0"] [slingshot "0.12.2"]] :main metadata.core :ring {:handler metadata.core/dev-handler diff --git a/services/metadata/version b/services/metadata/version index 675c9b112..da030363c 100644 --- a/services/metadata/version +++ b/services/metadata/version @@ -1 +1 @@ -5.2.4.0 +5.2.5.0 diff --git a/services/monkey/project.clj b/services/monkey/project.clj index f430fd9a2..9303a25be 100644 --- a/services/monkey/project.clj +++ b/services/monkey/project.clj @@ -7,7 +7,7 @@ (string/trim (:out (sh "git" "rev-parse" "HEAD"))) "")) -(defproject org.iplantc/monkey "5.2.4.0" +(defproject org.iplantc/monkey "5.2.5.0" :description "A metadata database crawler. It synchronizes the tag documents in the search data index with the tag information inthe metadata database. 🐒" :url "https://github.com/iPlantCollaborativeOpenSource/DE" @@ -24,7 +24,7 @@ [com.novemberain/langohr "2.11.0"] [me.raynes/fs "1.4.6"] [slingshot "0.10.3"] - [org.iplantc/clojure-commons "5.2.4.0"] - [org.iplantc/common-cli "5.2.4.0"] - [org.iplantc/service-logging "5.2.4.0"]] + [org.iplantc/clojure-commons "5.2.5.0"] + [org.iplantc/common-cli "5.2.5.0"] + [org.iplantc/service-logging "5.2.5.0"]] :profiles {:dev {:resource-paths ["conf/test"]}}) diff --git a/services/monkey/version b/services/monkey/version index 675c9b112..da030363c 100644 --- a/services/monkey/version +++ b/services/monkey/version @@ -1 +1 @@ -5.2.4.0 +5.2.5.0 diff --git a/services/saved-searches/project.clj b/services/saved-searches/project.clj index 2c94659a4..08db77e77 100644 --- a/services/saved-searches/project.clj +++ b/services/saved-searches/project.clj @@ -7,7 +7,7 @@ (string/trim (:out (sh "git" "rev-parse" "HEAD"))) "")) -(defproject org.iplantc/saved-searches "5.2.4.0" +(defproject org.iplantc/saved-searches "5.2.5.0" :description "DE API for managing saved searches." :url "https://github.com/iPlantCollaborativeOpenSource/DE" :license {:name "BSD"} @@ -16,10 +16,10 @@ :main saved-searches.core :uberjar-name "saved-searches-standalone.jar" :dependencies [[org.clojure/clojure "1.5.1"] - [org.iplantc/common-cli "5.2.4.0"] - [org.iplantc/common-cfg "5.2.4.0"] - [org.iplantc/kameleon "5.2.4.0"] - [org.iplantc/service-logging "5.2.4.0"] + [org.iplantc/common-cli "5.2.5.0"] + [org.iplantc/common-cfg "5.2.5.0"] + [org.iplantc/kameleon "5.2.5.0"] + [org.iplantc/service-logging "5.2.5.0"] [io.aviso/pretty "0.1.17"] [me.raynes/fs "1.4.6"] [cheshire "5.5.0" diff --git a/services/saved-searches/version b/services/saved-searches/version index 675c9b112..da030363c 100644 --- a/services/saved-searches/version +++ b/services/saved-searches/version @@ -1 +1 @@ -5.2.4.0 +5.2.5.0 diff --git a/services/terrain/project.clj b/services/terrain/project.clj index fd06f39a9..5d20fb22f 100644 --- a/services/terrain/project.clj +++ b/services/terrain/project.clj @@ -7,7 +7,7 @@ (string/trim (:out (sh "git" "rev-parse" "HEAD"))) "")) -(defproject org.iplantc/terrain "5.2.4.0-SNAPSHOT" +(defproject org.iplantc/terrain "5.2.5.0-SNAPSHOT" :description "Framework for hosting DiscoveryEnvironment metadata services." :url "https://github.com/cyverse/DE" :license {:name "BSD Standard License" @@ -33,15 +33,15 @@ [org.nexml.model/nexml "1.5-SNAPSHOT"] ; provides org.nexml.model [org/forester "1.005" ] [slingshot "0.12.2"] - [org.iplantc/clj-cas "5.2.4.0"] - [org.iplantc/clj-icat-direct "5.2.4.0"] - [org.iplantc/clj-jargon "5.2.4.0"] - [org.iplantc/clojure-commons "5.2.4.0"] - [org.iplantc/common-cfg "5.2.4.0"] - [org.iplantc/common-cli "5.2.4.0"] - [org.iplantc/kameleon "5.2.4.0"] - [org.iplantc/heuristomancer "5.2.4.0"] - [org.iplantc/service-logging "5.2.4.0"]] + [org.iplantc/clj-cas "5.2.5.0"] + [org.iplantc/clj-icat-direct "5.2.5.0"] + [org.iplantc/clj-jargon "5.2.5.0"] + [org.iplantc/clojure-commons "5.2.5.0"] + [org.iplantc/common-cfg "5.2.5.0"] + [org.iplantc/common-cli "5.2.5.0"] + [org.iplantc/kameleon "5.2.5.0"] + [org.iplantc/heuristomancer "5.2.5.0"] + [org.iplantc/service-logging "5.2.5.0"]] :plugins [[lein-ring "0.9.2" :exclusions [org.clojure/clojure]] [swank-clojure "1.4.2" :exclusions [org.clojure/clojure]]] :profiles {:dev {:resource-paths ["conf/test"]} diff --git a/services/terrain/version b/services/terrain/version index 81895948f..f39bd79e2 100644 --- a/services/terrain/version +++ b/services/terrain/version @@ -1 +1 @@ -5.2.4.0-SNAPSHOT +5.2.5.0-SNAPSHOT diff --git a/services/tree-urls/project.clj b/services/tree-urls/project.clj index 528a44a9a..70ce5e176 100644 --- a/services/tree-urls/project.clj +++ b/services/tree-urls/project.clj @@ -7,7 +7,7 @@ (string/trim (:out (sh "git" "rev-parse" "HEAD"))) "")) -(defproject org.iplantc/tree-urls "5.2.4.0" +(defproject org.iplantc/tree-urls "5.2.5.0" :description "DE API for managing tree urls." :url "https://github.com/iPlantCollaborativeOpenSource/DE" :license {:name "BSD"} @@ -16,10 +16,10 @@ :main tree-urls.core :uberjar-name "tree-urls-standalone.jar" :dependencies [[org.clojure/clojure "1.5.1"] - [org.iplantc/common-cli "5.2.4.0"] - [org.iplantc/common-cfg "5.2.4.0"] - [org.iplantc/kameleon "5.2.4.0"] - [org.iplantc/service-logging "5.2.4.0"] + [org.iplantc/common-cli "5.2.5.0"] + [org.iplantc/common-cfg "5.2.5.0"] + [org.iplantc/kameleon "5.2.5.0"] + [org.iplantc/service-logging "5.2.5.0"] [io.aviso/pretty "0.1.17"] [me.raynes/fs "1.4.6"] [cheshire "5.5.0" diff --git a/services/tree-urls/version b/services/tree-urls/version index 675c9b112..da030363c 100644 --- a/services/tree-urls/version +++ b/services/tree-urls/version @@ -1 +1 @@ -5.2.4.0 +5.2.5.0 diff --git a/services/user-preferences/project.clj b/services/user-preferences/project.clj index 192950e9c..7c1eda7e7 100644 --- a/services/user-preferences/project.clj +++ b/services/user-preferences/project.clj @@ -7,7 +7,7 @@ (string/trim (:out (sh "git" "rev-parse" "HEAD"))) "")) -(defproject org.iplantc/user-preferences "5.2.4.0" +(defproject org.iplantc/user-preferences "5.2.5.0" :description "DE API for managing user preferences." :url "https://github.com/iPlantCollaborativeOpenSource/DE" :license {:name "BSD"} @@ -16,10 +16,10 @@ :main user-preferences.core :uberjar-name "user-preferences-standalone.jar" :dependencies [[org.clojure/clojure "1.5.1"] - [org.iplantc/common-cli "5.2.4.0"] - [org.iplantc/common-cfg "5.2.4.0"] - [org.iplantc/kameleon "5.2.4.0"] - [org.iplantc/service-logging "5.2.4.0"] + [org.iplantc/common-cli "5.2.5.0"] + [org.iplantc/common-cfg "5.2.5.0"] + [org.iplantc/kameleon "5.2.5.0"] + [org.iplantc/service-logging "5.2.5.0"] [io.aviso/pretty "0.1.17"] [me.raynes/fs "1.4.6"] [cheshire "5.5.0" diff --git a/services/user-preferences/version b/services/user-preferences/version index 675c9b112..da030363c 100644 --- a/services/user-preferences/version +++ b/services/user-preferences/version @@ -1 +1 @@ -5.2.4.0 +5.2.5.0 diff --git a/services/user-sessions/project.clj b/services/user-sessions/project.clj index ae906520e..53be52b67 100644 --- a/services/user-sessions/project.clj +++ b/services/user-sessions/project.clj @@ -7,7 +7,7 @@ (string/trim (:out (sh "git" "rev-parse" "HEAD"))) "")) -(defproject org.iplantc/user-sessions "5.2.4.0" +(defproject org.iplantc/user-sessions "5.2.5.0" :description "DE API for managing user sessions." :url "https://github.com/iPlantCollaborativeOpenSource/DE" :license {:name "BSD"} @@ -16,10 +16,10 @@ :aot [user-sessions.core] :main user-sessions.core :dependencies [[org.clojure/clojure "1.5.1"] - [org.iplantc/common-cli "5.2.4.0"] - [org.iplantc/common-cfg "5.2.4.0"] - [org.iplantc/service-logging "5.2.4.0"] - [org.iplantc/kameleon "5.2.4.0"] + [org.iplantc/common-cli "5.2.5.0"] + [org.iplantc/common-cfg "5.2.5.0"] + [org.iplantc/service-logging "5.2.5.0"] + [org.iplantc/kameleon "5.2.5.0"] [io.aviso/pretty "0.1.17"] [me.raynes/fs "1.4.6"] [cheshire "5.5.0" diff --git a/services/user-sessions/version b/services/user-sessions/version index 675c9b112..da030363c 100644 --- a/services/user-sessions/version +++ b/services/user-sessions/version @@ -1 +1 @@ -5.2.4.0 +5.2.5.0 diff --git a/tools/facepalm/project.clj b/tools/facepalm/project.clj index dba13951d..8b37a0726 100644 --- a/tools/facepalm/project.clj +++ b/tools/facepalm/project.clj @@ -11,7 +11,7 @@ (string/trim (:out (sh "git" "rev-parse" "HEAD"))) "")) -(defproject org.iplantc/facepalm "5.2.4.0" +(defproject org.iplantc/facepalm "5.2.5.0" :description "Command-line utility for DE database managment." :url "https://github.com/iPlantCollaborativeOpenSource/DE" :license {:name "BSD" @@ -28,9 +28,9 @@ [korma "0.4.0" :exclusions [c3p0]] [me.raynes/fs "1.4.6"] - [org.iplantc/clj-jargon "5.2.4.0"] - [org.iplantc/clojure-commons "5.2.4.0"] - [org.iplantc/kameleon "5.2.4.0"] + [org.iplantc/clj-jargon "5.2.5.0"] + [org.iplantc/clojure-commons "5.2.5.0"] + [org.iplantc/kameleon "5.2.5.0"] [postgresql "9.1-901-1.jdbc4"] [slingshot "0.10.3"] [clj-http "2.0.0"]] diff --git a/tools/facepalm/version b/tools/facepalm/version index 675c9b112..da030363c 100644 --- a/tools/facepalm/version +++ b/tools/facepalm/version @@ -1 +1 @@ -5.2.4.0 +5.2.5.0 diff --git a/tools/filetool/project.clj b/tools/filetool/project.clj index 8194f2e43..0b30e178f 100644 --- a/tools/filetool/project.clj +++ b/tools/filetool/project.clj @@ -7,7 +7,7 @@ (string/trim (:out (sh "git" "rev-parse" "HEAD"))) "")) -(defproject org.iplantc/porklock "5.2.4.0" +(defproject org.iplantc/porklock "5.2.5.0" :description "A command-line tool for interacting with iRODS." :url "https://github.com/iPlantCollaborativeOpenSource/DE" :license {:name "BSD" @@ -21,6 +21,6 @@ [org.clojure/tools.logging "0.3.1"] [commons-io/commons-io "2.4"] [slingshot "0.12.2"] - [org.iplantc/clj-jargon "5.2.4.0"] - [org.iplantc/clojure-commons "5.2.4.0"] - [org.iplantc/common-cli "5.2.4.0"]]) + [org.iplantc/clj-jargon "5.2.5.0"] + [org.iplantc/clojure-commons "5.2.5.0"] + [org.iplantc/common-cli "5.2.5.0"]]) diff --git a/tools/filetool/version b/tools/filetool/version index 675c9b112..da030363c 100644 --- a/tools/filetool/version +++ b/tools/filetool/version @@ -1 +1 @@ -5.2.4.0 +5.2.5.0 diff --git a/tools/job-preserver/project.clj b/tools/job-preserver/project.clj index cd0841004..6f85991cd 100644 --- a/tools/job-preserver/project.clj +++ b/tools/job-preserver/project.clj @@ -1,4 +1,4 @@ -(defproject job-preserver "5.2.4.0" +(defproject job-preserver "5.2.5.0" :description "Migration to move job data that was skipped in previous migrations." :url "https://github.com/iPlantCollaborativeOpenSource/DiscoveryEnvironmentBackend" :license {:name "BSD Standard License" @@ -10,7 +10,7 @@ [org.clojure/clojure "1.6.0"] [org.clojure/tools.cli "0.3.1"] [org.clojure/tools.logging "0.3.1"] - [org.iplantc/kameleon "5.2.4.0"]] + [org.iplantc/kameleon "5.2.5.0"]] :uberjar-name "job-preserver.jar" :aot [job-preserver.core] :main job-preserver.core) diff --git a/tools/sharkbait/project.clj b/tools/sharkbait/project.clj index ed18275e8..78c6a391e 100644 --- a/tools/sharkbait/project.clj +++ b/tools/sharkbait/project.clj @@ -7,7 +7,7 @@ (string/trim (:out (sh "git" "rev-parse" "HEAD"))) "")) -(defproject org.iplantc/sharkbait "5.2.4.0" +(defproject org.iplantc/sharkbait "5.2.5.0" :description "Utility for initializing Grouper." :url "https://github.com/iPlantCollaborativeOpenSource/DE/" :license {:name "BSD" @@ -22,8 +22,8 @@ [org.clojure/tools.logging "0.3.1"] [org.hibernate/hibernate-core "3.6.10.Final"] [org.hibernate/hibernate-ehcache "3.6.10.Final"] - [org.iplantc/common-cli "5.2.4.0"] - [org.iplantc/kameleon "5.2.4.0"] + [org.iplantc/common-cli "5.2.5.0"] + [org.iplantc/kameleon "5.2.5.0"] [postgresql "9.3-1102.jdbc41"]] :main sharkbait.core :profiles {:uberjar {:aot :all}}) diff --git a/tools/sharkbait/version b/tools/sharkbait/version index 675c9b112..da030363c 100644 --- a/tools/sharkbait/version +++ b/tools/sharkbait/version @@ -1 +1 @@ -5.2.4.0 +5.2.5.0 diff --git a/tools/template-mover/project.clj b/tools/template-mover/project.clj index cc2caa955..3afa9ea99 100644 --- a/tools/template-mover/project.clj +++ b/tools/template-mover/project.clj @@ -7,7 +7,7 @@ (string/trim (:out (sh "git" "rev-parse" "HEAD"))) "")) -(defproject template-mover "5.2.4.0" +(defproject template-mover "5.2.5.0" :description "Utility to copy metadata templates to their new database." :url "https://github.com/iPlantCollaborativeOpenSource/DiscoveryEnvironmentBackend" :license {:name "BSD Standard License" @@ -19,7 +19,7 @@ [org.clojure/java.jdbc "0.3.7"] [org.clojure/tools.cli "0.3.1"] [org.clojure/tools.logging "0.3.1"] - [org.iplantc/kameleon "5.2.4.0"] + [org.iplantc/kameleon "5.2.5.0"] [postgresql "9.1-901-1.jdbc4"]] :aot :all :main template-mover.core) diff --git a/tools/template-mover/version b/tools/template-mover/version index 675c9b112..da030363c 100644 --- a/tools/template-mover/version +++ b/tools/template-mover/version @@ -1 +1 @@ -5.2.4.0 +5.2.5.0 diff --git a/ui/gradle.properties b/ui/gradle.properties index 1436070c2..4a109b493 100644 --- a/ui/gradle.properties +++ b/ui/gradle.properties @@ -1,4 +1,4 @@ -version=2.4.0 +version=2.5.0 BUILD_TAG= BUILD_ID= From cd76375bab7a64192033535ee9b207c6b5248211 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Thu, 4 Feb 2016 15:16:15 -0700 Subject: [PATCH 003/183] CORE-5291: After delete, return path in trash This modifies only the data-info endpoints, but since terrain serves as a pure passthrough for these endpoints, it will also be reflected in the terrain endpoints. --- .../src/data_info/routes/domain/trash.clj | 18 ++++++++++++++++++ .../data-info/src/data_info/routes/trash.clj | 6 +++--- .../data-info/src/data_info/services/trash.clj | 13 +++++++++---- 3 files changed, 30 insertions(+), 7 deletions(-) diff --git a/services/data-info/src/data_info/routes/domain/trash.clj b/services/data-info/src/data_info/routes/domain/trash.clj index 7a359427d..383ec417c 100644 --- a/services/data-info/src/data_info/routes/domain/trash.clj +++ b/services/data-info/src/data_info/routes/domain/trash.clj @@ -27,3 +27,21 @@ (s/defschema RestorationPaths {:restored (describe RestorationPathsMap "A map of paths from the request to their restoration info")}) + +(s/defschema TrashPathsMap + {(describe s/Keyword "The iRODS data item's original path.") + (describe String "The data item's path in the trash")}) + +(s/defschema TrashPaths + (assoc Paths + :trash-paths (describe TrashPathsMap "A map of paths from the request to their location in the trash, if any."))) + +;; Used only for documentation in Swagger UI +(s/defschema TrashPathsDocMap + {:/path/from/request/to/a/file/or/folder + (describe String "The data item's path in the trash")}) + +;; Used only for documentation in Swagger UI +(s/defschema TrashPathsDoc + (assoc TrashPaths + :trash-paths (describe TrashPathsDocMap "A map of paths from the request to their location in the trash, if any."))) diff --git a/services/data-info/src/data_info/routes/trash.clj b/services/data-info/src/data_info/routes/trash.clj index 841b51c2f..d695a1793 100644 --- a/services/data-info/src/data_info/routes/trash.clj +++ b/services/data-info/src/data_info/routes/trash.clj @@ -20,7 +20,7 @@ :tags ["bulk"] :query [params StandardUserQueryParams] :body [body (describe Paths "The paths to move to the trash")] - :return Paths + :return (s/doc-only TrashPaths TrashPathsDoc) :summary "Delete Data Items" :description (str "Delete the data items with the listed paths." @@ -46,7 +46,7 @@ (DELETE* "/" [:as {uri :uri}] :query [params StandardUserQueryParams] - :return Paths + :return TrashPaths :summary "Delete Data Item" :description (str "Deletes the data item with the provided UUID." @@ -56,7 +56,7 @@ (DELETE* "/children" [:as {uri :uri}] :query [params StandardUserQueryParams] - :return Paths + :return TrashPaths :summary "Delete Data Item Contents" :description (str "Deletes the contents of the folder with the provided UUID." diff --git a/services/data-info/src/data_info/services/trash.clj b/services/data-info/src/data_info/services/trash.clj index d388f0bf2..ed00931a4 100644 --- a/services/data-info/src/data_info/services/trash.clj +++ b/services/data-info/src/data_info/services/trash.clj @@ -35,7 +35,8 @@ [cm p user] (let [trash-path (randomized-trash-path user p)] (move cm p trash-path :user user :admin-users (cfg/irods-admins)) - (set-metadata cm trash-path "ipc-trash-origin" p paths/IPCSYSTEM))) + (set-metadata cm trash-path "ipc-trash-origin" p paths/IPCSYSTEM) + trash-path)) (defn- home-matcher [user path] @@ -51,7 +52,8 @@ (defn- delete-paths [user paths] (with-jargon (cfg/jargon-cfg) [cm] - (let [paths (mapv ft/rm-last-slash paths)] + (let [paths (mapv ft/rm-last-slash paths) + trash-paths (atom (hash-map))] (validators/user-exists cm user) (validators/all-paths-exist cm paths) (validators/user-owns-paths cm user paths) @@ -69,10 +71,13 @@ ;;; If the file isn't already in the user's trash, move it there ;;; otherwise, do a hard delete. (if-not (.startsWith p (paths/user-trash-path user)) - (move-to-trash cm p user) + (do (let [trash-path (move-to-trash cm p user)] + (reset! trash-paths + (assoc @trash-paths p trash-path)))) (delete cm p true))) ;;; Force a delete to bypass proxy user's trash. - {:paths paths}))) + {:paths paths + :trash-paths @trash-paths}))) (defn- delete-uuid "Delete by UUID: given a user and a data item UUID, delete that data item, returning a list of filenames deleted." From 389470ecce35aa496804416054235a16f689cbc0 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Thu, 4 Feb 2016 16:53:21 -0700 Subject: [PATCH 004/183] Remove unused net.sf.json-lib/json-lib dependency from data-info. --- services/data-info/project.clj | 1 - 1 file changed, 1 deletion(-) diff --git a/services/data-info/project.clj b/services/data-info/project.clj index 2d44de604..35a7b4ce1 100644 --- a/services/data-info/project.clj +++ b/services/data-info/project.clj @@ -24,7 +24,6 @@ [dire "0.5.3"] [me.raynes/fs "1.4.6"] [metosin/compojure-api "0.24.2"] - [net.sf.json-lib/json-lib "2.4" :classifier "jdk15"] [org.apache.tika/tika-core "1.11"] [net.sf.opencsv/opencsv "2.3"] [slingshot "0.12.2"] From e9ccd94f0e1ca601099bdc9bac3679f5d29df576 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Thu, 28 Jan 2016 16:27:36 -0700 Subject: [PATCH 005/183] CORE-6215: add anon-files sharing to data-info. --- services/data-info/src/data_info/routes.clj | 2 + .../src/data_info/routes/domain/sharing.clj | 25 ++++ .../src/data_info/routes/sharing.clj | 19 +++ .../src/data_info/services/sharing.clj | 121 ++++++++++++++++++ 4 files changed, 167 insertions(+) create mode 100644 services/data-info/src/data_info/routes/domain/sharing.clj create mode 100644 services/data-info/src/data_info/routes/sharing.clj create mode 100644 services/data-info/src/data_info/services/sharing.clj diff --git a/services/data-info/src/data_info/routes.clj b/services/data-info/src/data_info/routes.clj index 1d2bb2b21..d8037d238 100644 --- a/services/data-info/src/data_info/routes.clj +++ b/services/data-info/src/data_info/routes.clj @@ -14,6 +14,7 @@ [data-info.routes.users :as users-routes] [data-info.routes.navigation :as navigation-routes] [data-info.routes.rename :as rename-routes] + [data-info.routes.sharing :as sharing-routes] [data-info.routes.status :as status-routes] [data-info.routes.stats :as stat-routes] [data-info.routes.trash :as trash-routes] @@ -53,5 +54,6 @@ users-routes/permissions-gatherer navigation-routes/navigation stat-routes/stat-gatherer + sharing-routes/sharing-routes trash-routes/trash (route/not-found (svc/unrecognized-path-response)))) diff --git a/services/data-info/src/data_info/routes/domain/sharing.clj b/services/data-info/src/data_info/routes/domain/sharing.clj new file mode 100644 index 000000000..261503e4c --- /dev/null +++ b/services/data-info/src/data_info/routes/domain/sharing.clj @@ -0,0 +1,25 @@ +(ns data-info.routes.domain.sharing + (:use [common-swagger-api.schema :only [describe + NonBlankString]]) + (:require [schema.core :as s])) + +(s/defschema AnonFileUrls + {(describe s/Keyword "the iRODS data item's path") + (describe NonBlankString "the URL for the file to request in anon-files.")}) + +(s/defschema AnonShareInfo + {:user + (describe NonBlankString "The user performing the request.") + + :paths + (describe AnonFileUrls "The anon-files URLs for the paths provided with the request.")}) + +;; Used only for display as documentation in Swagger UI +(s/defschema AnonFilePathsMap + {:/path/from/request/to/a/file + (describe NonBlankString "the URL for the file to request in anon-files.")}) + +;; Used only for display as documentation in Swagger UI +(s/defschema AnonShareResponse + (assoc AnonShareInfo + :paths (describe AnonFilePathsMap "The anon-files URLs for the paths provided with the request."))) diff --git a/services/data-info/src/data_info/routes/sharing.clj b/services/data-info/src/data_info/routes/sharing.clj new file mode 100644 index 000000000..6cebd1335 --- /dev/null +++ b/services/data-info/src/data_info/routes/sharing.clj @@ -0,0 +1,19 @@ +(ns data-info.routes.sharing + (:use [common-swagger-api.schema] + [data-info.routes.domain.common] + [data-info.routes.domain.sharing]) + (:require [data-info.services.sharing :as sharing] + [data-info.util.service :as svc] + [data-info.util.schema :as s])) + +(defroutes* sharing-routes + (POST* "/anonymizer" [:as {uri :uri}] + :tags ["bulk"] + :query [params StandardUserQueryParams] + :body [body (describe Paths "The paths to make readable by the anonymous user.")] + :return (s/doc-only AnonShareInfo AnonShareResponse) + :summary "Make Data Items Anonymously Readable" + :description (str +"Given a list of files in the body, makes the files readable by the anonymous user." +(get-error-code-block "ERR_NOT_A_FILE, ERR_DOES_NOT_EXIST, ERR_NOT_OWNER, ERR_TOO_MANY_PATHS, ERR_NOT_A_USER")) + (svc/trap uri sharing/do-anon-files params body))) diff --git a/services/data-info/src/data_info/services/sharing.clj b/services/data-info/src/data_info/services/sharing.clj new file mode 100644 index 000000000..893e38723 --- /dev/null +++ b/services/data-info/src/data_info/services/sharing.clj @@ -0,0 +1,121 @@ +(ns data-info.services.sharing + (:use [clj-jargon.init :only [with-jargon]] + [clj-jargon.item-info :only [trash-base-dir is-dir?]] + [clj-jargon.permissions] + [slingshot.slingshot :only [try+ throw+]]) + (:require [clojure.tools.logging :as log] + [clojure.string :as string] + [clojure-commons.file-utils :as ft] + [cemerick.url :as url] + [dire.core :refer [with-pre-hook! with-post-hook!]] + [data-info.util.logging :as dul] + [data-info.util.paths :as paths] + [data-info.util.config :as cfg] + [data-info.util.validators :as validators])) + +(defn- shared? + ([cm share-with fpath] + (:read (permissions cm share-with fpath))) + ([cm share-with fpath desired-perm] + (let [curr-perm (permission-for cm share-with fpath)] + (= curr-perm desired-perm)))) + +(defn- skip-share + [user path reason] + (log/warn "Skipping share of" path "with" user "because:" reason) + {:user user + :path path + :reason reason + :skipped true}) + +(defn- share-path-home + "Returns the home directory that a shared file is under." + [share-path] + (string/join "/" (take 4 (string/split share-path #"\/")))) + +(defn- share-path + "Shares a path with a user. This consists of the following steps: + + 1. The parent directories up to the sharer's home directory need to be marked as readable + by the sharee. Othwerwise, any files that are shared will be orphaned in the UI. + + 2. If the shared item is a directory then the inherit bit needs to be set so that files + that are uploaded into the directory will also be shared. + + 3. The permissions are set on the item being shared. This is done recursively in case the + item being shared is a directory." + [cm user share-with perm fpath] + (let [hdir (share-path-home fpath) + trash-dir (trash-base-dir (:zone cm) user) + base-dirs #{hdir trash-dir}] + (log/warn fpath "is being shared with" share-with "by" user) + (process-parent-dirs (partial set-readable cm share-with true) #(not (base-dirs %)) fpath) + + (when (is-dir? cm fpath) + (log/warn fpath "is a directory, setting the inherit bit.") + (set-inherits cm fpath)) + + (when-not (is-readable? cm share-with hdir) + (log/warn share-with "is being given read permissions on" hdir "by" user) + (set-permission cm share-with hdir :read false)) + + (log/warn share-with "is being given recursive permissions (" perm ") on" fpath) + (set-permission cm share-with fpath (keyword perm) true) + + {:user share-with :path fpath})) + +(defn- share-paths + [cm user share-withs fpaths perm] + (for [share-with share-withs + fpath fpaths] + (cond (= user share-with) (skip-share share-with fpath :share-with-self) + (paths/in-trash? user fpath) (skip-share share-with fpath :share-from-trash) + (shared? cm share-with fpath perm) (skip-share share-with fpath :already-shared) + :else (share-path cm user share-with perm fpath)))) + +(defn- share + [cm user share-withs fpaths perm] + (validators/user-exists cm user) + (validators/all-users-exist cm share-withs) + (validators/all-paths-exist cm fpaths) + (validators/user-owns-paths cm user fpaths) + + (let [keyfn #(if (:skipped %) :skipped :succeeded) + share-recs (group-by keyfn (share-paths cm user share-withs fpaths perm)) + sharees (map :user (:succeeded share-recs)) + home-dir (paths/user-home-dir user)] + {:user sharees + :path fpaths + :skipped (map #(dissoc % :skipped) (:skipped share-recs)) + :permission perm})) + +(defn- anon-file-url + [p] + (let [aurl (url/url (cfg/anon-files-base))] + (str (-> aurl (assoc :path (ft/path-join (:path aurl) (string/replace p #"^\/" ""))))))) + +(defn- anon-files-urls + [paths] + (into {} (map #(vector %1 (anon-file-url %1)) paths))) + +(defn- anon-files + [user paths] + (with-jargon (cfg/jargon-cfg) [cm] + (validators/user-exists cm user) + (validators/all-paths-exist cm paths) + (validators/paths-are-files cm paths) + (validators/user-owns-paths cm user paths) + (log/warn "Giving read access to" (cfg/anon-user) "on:" (string/join " " paths)) + (share cm user [(cfg/anon-user)] paths :read) + {:user user :paths (anon-files-urls paths)})) + +(defn do-anon-files + [{:keys [user]} {:keys [paths]}] + (anon-files user (mapv ft/rm-last-slash paths))) + +(with-pre-hook! #'do-anon-files + (fn [params body] + (dul/log-call "do-anon-files" params body) + (validators/validate-num-paths (:paths body)))) + +(with-post-hook! #'do-anon-files (dul/log-func "do-anon-files")) From acfef5dce706e24f5adff585016af967216a187a Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Thu, 4 Feb 2016 12:46:40 -0700 Subject: [PATCH 006/183] CORE-6215: migrate anon-files to call data-info endpoint. --- .../terrain/src/terrain/clients/data_info.clj | 5 ++++ .../src/terrain/clients/data_info/raw.clj | 8 ++++++ .../terrain/src/terrain/routes/filesystem.clj | 2 +- .../terrain/services/filesystem/sharing.clj | 27 ------------------- 4 files changed, 14 insertions(+), 28 deletions(-) diff --git a/services/terrain/src/terrain/clients/data_info.clj b/services/terrain/src/terrain/clients/data_info.clj index e03b55082..b83d7182d 100644 --- a/services/terrain/src/terrain/clients/data_info.clj +++ b/services/terrain/src/terrain/clients/data_info.clj @@ -228,6 +228,11 @@ (let [path-uuid (uuid-for-path (:user params) (:path body))] (raw/set-file-type (:user params) path-uuid (:type body)))) +(defn share-with-anonymous + "Uses the data-info anonymizer endpoint to share paths with the anonymous user." + [params body] + (raw/share-with-anonymous (:user params) (:paths body))) + (defn gen-output-dir "Either obtains or creates a default output directory using a specified base name." [base] diff --git a/services/terrain/src/terrain/clients/data_info/raw.clj b/services/terrain/src/terrain/clients/data_info/raw.clj index 262164d30..072a40007 100644 --- a/services/terrain/src/terrain/clients/data_info/raw.clj +++ b/services/terrain/src/terrain/clients/data_info/raw.clj @@ -221,6 +221,14 @@ (request :post ["data" path-uuid "metadata" "save"] (mk-req-map user (json/encode {:dest dest :recursive recursive})))) +;; SHARING + +(defn share-with-anonymous + "Share a list of paths with the anonymous user." + [user paths] + (request :post ["anonymizer"] + (mk-req-map user (json/encode {:paths paths})))) + ;; MISC (defn collect-permissions diff --git a/services/terrain/src/terrain/routes/filesystem.clj b/services/terrain/src/terrain/routes/filesystem.clj index 8c9800c9b..9c81eec03 100644 --- a/services/terrain/src/terrain/routes/filesystem.clj +++ b/services/terrain/src/terrain/routes/filesystem.clj @@ -90,7 +90,7 @@ (controller req data/read-tabular-chunk :params :body)) (POST "/filesystem/anon-files" [:as req] - (controller req sharing/do-anon-files :params :body)))) + (controller req data/share-with-anonymous :params :body)))) (defn secured-filesystem-metadata-routes "The routes for file metadata endpoints." diff --git a/services/terrain/src/terrain/services/filesystem/sharing.clj b/services/terrain/src/terrain/services/filesystem/sharing.clj index 5ca5d48c3..0dec5b252 100644 --- a/services/terrain/src/terrain/services/filesystem/sharing.clj +++ b/services/terrain/src/terrain/services/filesystem/sharing.clj @@ -172,30 +172,3 @@ [p] (let [aurl (url/url (cfg/anon-files-base))] (str (-> aurl (assoc :path (ft/path-join (:path aurl) (string/replace p #"^\/" ""))))))) - -(defn anon-files-urls - [paths] - (into {} (map #(vector %1 (anon-file-url %1)) paths))) - -(defn anon-files - [user paths] - (with-jargon (icat/jargon-cfg) [cm] - (validators/user-exists cm user) - (validators/all-paths-exist cm paths) - (validators/paths-are-files cm paths) - (validators/user-owns-paths cm user paths) - (log/warn "Giving read access to" (cfg/fs-anon-user) "on:" (string/join " " paths)) - (share user [(cfg/fs-anon-user)] paths :read) - {:user user :paths (anon-files-urls paths)})) - -(defn fix-broken-paths - [paths] - (mapv #(string/replace % #"\/$" "") paths)) - -(defn do-anon-files - [params body] - (paths/log-call "do-anon-files" params body) - (validate-map params {:user string?}) - (validate-map body {:paths sequential?}) - (validators/validate-num-paths (:paths body)) - (anon-files (:user params) (fix-broken-paths (:paths body)))) From dd006ef85d27832bc93820f923880aa02982141d Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Fri, 5 Feb 2016 13:58:46 -0700 Subject: [PATCH 007/183] Use swap! rather than reset! for atom updates in data-info.services.trash. --- services/data-info/src/data_info/services/trash.clj | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/services/data-info/src/data_info/services/trash.clj b/services/data-info/src/data_info/services/trash.clj index ed00931a4..1b5b81d24 100644 --- a/services/data-info/src/data_info/services/trash.clj +++ b/services/data-info/src/data_info/services/trash.clj @@ -72,8 +72,7 @@ ;;; otherwise, do a hard delete. (if-not (.startsWith p (paths/user-trash-path user)) (do (let [trash-path (move-to-trash cm p user)] - (reset! trash-paths - (assoc @trash-paths p trash-path)))) + (swap! trash-paths assoc p trash-path))) (delete cm p true))) ;;; Force a delete to bypass proxy user's trash. {:paths paths @@ -206,9 +205,8 @@ (move cm path fully-restored :user user :admin-users (cfg/irods-admins)) (log/warn "Done moving " path " to " fully-restored) - (reset! retval - (assoc @retval path {:restored-path fully-restored - :partial-restore restored-to-homedir})))) + (swap! retval assoc path {:restored-path fully-restored + :partial-restore restored-to-homedir}))) {:restored @retval})) {:restored {}})))) From 91572bc1389114d477f9a72d2b934ff119585c09 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Mon, 8 Feb 2016 12:55:44 -0700 Subject: [PATCH 008/183] Revert force of lein 2.5.3 given 2.6.1 release. This reverts commit e645cada4488290f7bc69d594845908a3937fe99. --- docker/de-backend-buildenv/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/de-backend-buildenv/Dockerfile b/docker/de-backend-buildenv/Dockerfile index 3c9572e43..b3dc88c67 100644 --- a/docker/de-backend-buildenv/Dockerfile +++ b/docker/de-backend-buildenv/Dockerfile @@ -44,7 +44,7 @@ ENV LEIN_ROOT 1 ENV PATH /bin:/usr/bin:/usr/local/bin:/sbin/:/usr/sbin:/opt/go/bin:/opt/gopath/bin:/opt/maven/bin:/opt/nodejs/bin RUN go get github.com/tools/godep RUN go get github.com/constabulary/gb/... -ADD https://raw.githubusercontent.com/technomancy/leiningen/2.5.3/bin/lein /usr/bin/lein +ADD https://raw.githubusercontent.com/technomancy/leiningen/stable/bin/lein /usr/bin/lein ADD build_profiles.clj /root/.lein/profiles.clj RUN chmod a+x /usr/bin/lein From 9373eaf9b008bd1725dd95f0a33e9af810a2ca95 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Tue, 9 Feb 2016 12:33:30 -0700 Subject: [PATCH 009/183] Switch the base of jex-events' docker image to use jeanblanchard/alpine-glibc. --- services/jex-events/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/jex-events/Dockerfile b/services/jex-events/Dockerfile index 26783a560..b5e9bbf9f 100644 --- a/services/jex-events/Dockerfile +++ b/services/jex-events/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:14.04 +FROM jeanblanchard/alpine-glibc ADD bin/jex-events /bin/ From 5f4376dd35554805489c914c280ece9af7d8cb7b Mon Sep 17 00:00:00 2001 From: Ashley Ramsey Date: Mon, 8 Feb 2016 12:39:02 -0700 Subject: [PATCH 010/183] CORE-7479 Change DE window header text to bold for readability --- .../theme/base/client/desktop/window/IplantWindowStyles.css | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/window/IplantWindowStyles.css b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/window/IplantWindowStyles.css index d4203ed9e..62551ab84 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/window/IplantWindowStyles.css +++ b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/window/IplantWindowStyles.css @@ -1,6 +1,6 @@ @font-face { - font-family: TextaRegular; - src: url(../Texta_Font/Texta-Regular.otf); + font-family: TextaBold; + src: url(../Texta_Font/Texta-Bold.otf); } @sprite .restoreBtn { @@ -67,7 +67,7 @@ overflow: hidden; white-space: nowrap; text-overflow: ellipsis; - font-family: TextaRegular, Univers, Calibri, "Gill Sans", "Gill Sans MT", "Myriad Pro", + font-family: TextaBold, Univers, Calibri, "Gill Sans", "Gill Sans MT", "Myriad Pro", Myriad, "DejaVu Sans Condensed", "Liberation Sans", "Nimbus Sans L", Tahoma, Geneva, "Helvetica Neue", Helvetica, Arial, sans-serif !important; From 9ce2e7c10dde57c643838ba29f0fed1c20890246 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Wed, 10 Feb 2016 11:33:44 -0700 Subject: [PATCH 011/183] CORE-3484: added uniqueness constraints on the name and path columns of the genome_reference table --- .../src/main/constraints/46_genome_ref.sql | 15 +++++++++++++++ .../conversions/v2.5.0/c250_2016021001.clj | 19 +++++++++++++++++++ .../src/main/data/99_version.sql | 1 + libs/kameleon/project.clj | 2 +- 4 files changed, 36 insertions(+), 1 deletion(-) create mode 100644 databases/de-database-schema/src/main/constraints/46_genome_ref.sql create mode 100644 databases/de-database-schema/src/main/conversions/v2.5.0/c250_2016021001.clj diff --git a/databases/de-database-schema/src/main/constraints/46_genome_ref.sql b/databases/de-database-schema/src/main/constraints/46_genome_ref.sql new file mode 100644 index 000000000..857694117 --- /dev/null +++ b/databases/de-database-schema/src/main/constraints/46_genome_ref.sql @@ -0,0 +1,15 @@ +SET search_path = public, pg_catalog; + +-- +-- Uniqueness constraint on reference genome name. +-- +ALTER TABLE ONLY genome_reference +ADD CONSTRAINT genome_ref_mame_unique +UNIQUE (name); + +-- +-- Uniqueness constraint on reference genome path. +-- +ALTER TABLE ONLY genome_reference +ADD CONSTRAINT genome_ref_path_unique +UNIQUE (path); diff --git a/databases/de-database-schema/src/main/conversions/v2.5.0/c250_2016021001.clj b/databases/de-database-schema/src/main/conversions/v2.5.0/c250_2016021001.clj new file mode 100644 index 000000000..b4507a936 --- /dev/null +++ b/databases/de-database-schema/src/main/conversions/v2.5.0/c250_2016021001.clj @@ -0,0 +1,19 @@ +(ns facepalm.c250-2016021001 + (:use [korma.core] + [kameleon.sql-reader :only [load-sql-file]])) + +(def ^:private version + "The destination database version." + "2.5.0:20160210.01") + +(defn- add-reference-genome-uniqueness-constraints + "Adds uniqueness constraints to the name and path columns of the genome_reference table." + [] + (println "\t* Adding uniqueness constraints to the genome_reference table") + (load-sql-file "constraints/46_genome_ref.sql")) + +(defn convert + "Performs the conversion for this database version" + [] + (println "Performing the conversion for" version) + (add-reference-genome-uniqueness-constraints)) diff --git a/databases/de-database-schema/src/main/data/99_version.sql b/databases/de-database-schema/src/main/data/99_version.sql index d1cb9ecf3..87e0614c8 100644 --- a/databases/de-database-schema/src/main/data/99_version.sql +++ b/databases/de-database-schema/src/main/data/99_version.sql @@ -70,3 +70,4 @@ INSERT INTO version (version) VALUES ('2.1.0:20150901.01'); INSERT INTO version (version) VALUES ('2.2.0:20151005.01'); INSERT INTO version (version) VALUES ('2.3.0:20151110.01'); INSERT INTO version (version) VALUES ('2.4.0:20160106.01'); +INSERT INTO version (version) VALUES ('2.5.0:20160210.01'); diff --git a/libs/kameleon/project.clj b/libs/kameleon/project.clj index bc633cc48..ab69c820f 100644 --- a/libs/kameleon/project.clj +++ b/libs/kameleon/project.clj @@ -14,4 +14,4 @@ [slingshot "0.12.2"]] :plugins [[lein-marginalia "0.7.1"] [test2junit "1.1.3"]] - :manifest {"db-version" "2.4.0:20160106.01"}) + :manifest {"db-version" "2.5.0:20160210.01"}) From 6c27115bbc395d58d1b1a6a21fde7d66eab0e743 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Wed, 10 Feb 2016 13:40:30 -0700 Subject: [PATCH 012/183] CORE-3485: added a check for duplidate reference genome paths --- .../src/clojure_commons/exception_util.clj | 5 +++ .../src/apps/metadata/reference_genomes.clj | 36 +++++++++++++------ 2 files changed, 31 insertions(+), 10 deletions(-) diff --git a/libs/iplant-clojure-commons/src/clojure_commons/exception_util.clj b/libs/iplant-clojure-commons/src/clojure_commons/exception_util.clj index 9fc7265f7..e55bc4953 100644 --- a/libs/iplant-clojure-commons/src/clojure_commons/exception_util.clj +++ b/libs/iplant-clojure-commons/src/clojure_commons/exception_util.clj @@ -11,3 +11,8 @@ "Throws an error indicating that the request is forbidden." [reason & {:as ex-info}] (throw+ (assoc ex-info :type ::cx/forbidden :error reason))) + +(defn exists + "Throws an error indicating that there was an attempt to create something that already exists." + [reason & {:as ex-info}] + (throw+ (assoc ex-info :type ::cx/exists :error reason))) diff --git a/services/apps/src/apps/metadata/reference_genomes.clj b/services/apps/src/apps/metadata/reference_genomes.clj index 1aa3ec7d7..317fa04cf 100644 --- a/services/apps/src/apps/metadata/reference_genomes.clj +++ b/services/apps/src/apps/metadata/reference_genomes.clj @@ -10,6 +10,7 @@ [apps.util.conversions :only [date->timestamp]] [slingshot.slingshot :only [throw+]]) (:require [clojure.tools.logging :as log] + [clojure-commons.exception-util :as cxu] [korma.core :as sql])) (defn- reference-genome-base-query @@ -22,6 +23,13 @@ (join created_by) (join last_modified_by))) +(defn- get-reference-genomes-where + "A convenience function to look up reference genomes that satisfy a simple set of conditions." + [conditions] + (-> (reference-genome-base-query) + (where conditions) + select)) + (defn get-reference-genomes "Lists all of the reference genomes in the database." [{:keys [deleted created_by]}] @@ -83,18 +91,26 @@ (sql/update genome_reference (set-fields update-values) (where {:id reference-genome-id})) (get-reference-genome reference-genome-id))) +(defn- validate-reference-genome-path + "Verifies that a reference genome with the same path doesn't already exist." + [path] + (if (seq (get-reference-genomes-where {:path path})) + (cxu/exists "A reference genome with the given path already exists." :path path))) + (defn add-reference-genome "Adds a reference genome with the given name and path." - [reference-genome] - (let [user-id (get-user-id (:username current-user)) - insert-values (-> reference-genome - (select-keys [:name :path]) - (assoc :created_by user-id - :last_modified_by user-id - :created_on (sqlfn now) - :last_modified_on (sqlfn now))) - reference-genome-id (:id (insert genome_reference (values insert-values)))] - (get-reference-genome reference-genome-id))) + [{:keys [name path] :as reference-genome}] + (let [user-id (get-user-id (:username current-user))] + (validate-reference-genome-path path) + (-> (insert genome_reference + (values {:name name + :path path + :created_by user-id + :last_modified_by user-id + :created_on (sqlfn now) + :last_modified_on (sqlfn now)})) + :id + get-reference-genome))) (def ^:private valid-insert-fields [:id :name :path :deleted :created_by :created_on :last_modified_by :last_modified_on]) From cbbe35b4354674a73b11a29fa719792be491cf93 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Wed, 10 Feb 2016 14:30:08 -0700 Subject: [PATCH 013/183] CORE-3484: added a duplicate name check to the POST /admin/reference-genomes endpoint --- services/apps/src/apps/metadata/reference_genomes.clj | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/services/apps/src/apps/metadata/reference_genomes.clj b/services/apps/src/apps/metadata/reference_genomes.clj index 317fa04cf..3fd799d21 100644 --- a/services/apps/src/apps/metadata/reference_genomes.clj +++ b/services/apps/src/apps/metadata/reference_genomes.clj @@ -97,11 +97,18 @@ (if (seq (get-reference-genomes-where {:path path})) (cxu/exists "A reference genome with the given path already exists." :path path))) +(defn- validate-reference-genome-name + "Verifies that a reference genome with the same name doesn't already exist." + [name] + (if (seq (get-reference-genomes-where {:name name})) + (cxu/exists "A reference genome with the given name already exists." :name name))) + (defn add-reference-genome "Adds a reference genome with the given name and path." [{:keys [name path] :as reference-genome}] (let [user-id (get-user-id (:username current-user))] (validate-reference-genome-path path) + (validate-reference-genome-name name) (-> (insert genome_reference (values {:name name :path path From 241f8d6fb1ffdca66e301ed6a1f445b1b9b1c431 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Wed, 10 Feb 2016 15:24:20 -0700 Subject: [PATCH 014/183] CORE-3484: fixed a typo in a database constraint name --- .../de-database-schema/src/main/constraints/46_genome_ref.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/databases/de-database-schema/src/main/constraints/46_genome_ref.sql b/databases/de-database-schema/src/main/constraints/46_genome_ref.sql index 857694117..9936d9050 100644 --- a/databases/de-database-schema/src/main/constraints/46_genome_ref.sql +++ b/databases/de-database-schema/src/main/constraints/46_genome_ref.sql @@ -4,7 +4,7 @@ SET search_path = public, pg_catalog; -- Uniqueness constraint on reference genome name. -- ALTER TABLE ONLY genome_reference -ADD CONSTRAINT genome_ref_mame_unique +ADD CONSTRAINT genome_ref_name_unique UNIQUE (name); -- From f5c0a22b12f2ad6267ee6bca6050e7876b26ac1b Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Wed, 10 Feb 2016 16:49:24 -0700 Subject: [PATCH 015/183] CORE-3484, CORE-3485: add reference genome path and name validation to PATCH /admin/reference-genomes/:reference-genome-id as well --- .../src/apps/metadata/reference_genomes.clj | 46 +++++++++++-------- 1 file changed, 27 insertions(+), 19 deletions(-) diff --git a/services/apps/src/apps/metadata/reference_genomes.clj b/services/apps/src/apps/metadata/reference_genomes.clj index 3fd799d21..8b911c0cf 100644 --- a/services/apps/src/apps/metadata/reference_genomes.clj +++ b/services/apps/src/apps/metadata/reference_genomes.clj @@ -68,6 +68,24 @@ (assert-not-nil [:reference-genome-id reference-genome-id] (first (get-reference-genomes-by-id reference-genome-id)))) +(defn- validate-reference-genome-path + "Verifies that a reference genome with the same path doesn't already exist." + ([path id] + (if (seq (get-reference-genomes-where {:path path :id [not= id]})) + (cxu/exists "Another reference genome with the given path already exists." :path path))) + ([path] + (if (seq (get-reference-genomes-where {:path path})) + (cxu/exists "A reference genome with the given path already exists." :path path)))) + +(defn- validate-reference-genome-name + "Verifies that a reference genome with the same name doesn't already exist." + ([name id] + (if (seq (get-reference-genomes-where {:name name :id [not= id]})) + (cxu/exists "Another reference genome with the given name already exists." :name name))) + ([name] + (if (seq (get-reference-genomes-where {:name name})) + (cxu/exists "A reference genome with the given name already exists." :name name)))) + (defn get-reference-genome "Gets a reference genome by its ID." [reference-genome-id] @@ -82,26 +100,16 @@ (defn update-reference-genome "Updates the name, path, and deleted flag of a reference genome." - [{reference-genome-id :id :as reference-genome}] + [{reference-genome-id :id :keys [name path] :as reference-genome}] (get-valid-reference-genome reference-genome-id) - (let [update-values (-> reference-genome - (select-keys [:name :path :deleted]) - (assoc :last_modified_by (get-user-id (:username current-user)) - :last_modified_on (sqlfn now)))] - (sql/update genome_reference (set-fields update-values) (where {:id reference-genome-id})) - (get-reference-genome reference-genome-id))) - -(defn- validate-reference-genome-path - "Verifies that a reference genome with the same path doesn't already exist." - [path] - (if (seq (get-reference-genomes-where {:path path})) - (cxu/exists "A reference genome with the given path already exists." :path path))) - -(defn- validate-reference-genome-name - "Verifies that a reference genome with the same name doesn't already exist." - [name] - (if (seq (get-reference-genomes-where {:name name})) - (cxu/exists "A reference genome with the given name already exists." :name name))) + (validate-reference-genome-path path reference-genome-id) + (validate-reference-genome-name name reference-genome-id) + (sql/update genome_reference + (set-fields (assoc (select-keys reference-genome [:name :path :deleted]) + :last_modified_by (get-user-id (:username current-user)) + :last_modified_on (sqlfn now))) + (where {:id reference-genome-id})) + (get-reference-genome reference-genome-id)) (defn add-reference-genome "Adds a reference genome with the given name and path." From 1dd093ca28b3d1a3feed67c0f1c2eb2610d7f0db Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Wed, 10 Feb 2016 17:43:20 -0700 Subject: [PATCH 016/183] CORE-7386: remove support for the `public` query parmeter from GET /admin/apps/categories --- services/apps/src/apps/routes/admin.clj | 2 +- services/apps/src/apps/service/apps/de/listings.clj | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/services/apps/src/apps/routes/admin.clj b/services/apps/src/apps/routes/admin.clj index 183685ec6..5604fa18d 100644 --- a/services/apps/src/apps/routes/admin.clj +++ b/services/apps/src/apps/routes/admin.clj @@ -108,7 +108,7 @@ (defroutes* admin-categories (GET* "/" [] - :query [params CategoryListingParams] + :query [params SecuredQueryParams] :return AppCategoryListing :summary "List App Categories" :description "This service is used by DE admins to obtain a list of public app categories along diff --git a/services/apps/src/apps/service/apps/de/listings.clj b/services/apps/src/apps/service/apps/de/listings.clj index 788567595..64fda7e4a 100644 --- a/services/apps/src/apps/service/apps/de/listings.clj +++ b/services/apps/src/apps/service/apps/de/listings.clj @@ -154,7 +154,7 @@ "Retrieves the list of app groups that are accessible to administrators. This includes all public app groups along with the trash group." [user params] - (let [params (assoc params :admin true)] + (let [params (assoc params :admin true :public true)] (conj (vec (get-app-groups user params)) (format-trash-category nil nil params)))) From 3538e80e4beb9ca6b73e57b78c769aae762d2223 Mon Sep 17 00:00:00 2001 From: Paul Sarando Date: Thu, 11 Feb 2016 11:56:12 -0700 Subject: [PATCH 017/183] CORE-7511 Add more user info to PermID Request details. Updated the responses of Permanent ID Request endpoints that return a request's details to include more user info in the 'requested_by' field, such as email and first/last name. Since the requesting user's email is now available when notifications are sent to that user, notification emails can now be enabled. Added the new ID to the "Completion" notification update in the "comments" field, which is now included as the body of the "Completion" notification email. --- .../services/permanent_id_requests.clj | 53 ++++++++++++++----- 1 file changed, 39 insertions(+), 14 deletions(-) diff --git a/services/terrain/src/terrain/services/permanent_id_requests.clj b/services/terrain/src/terrain/services/permanent_id_requests.clj index 7b5c9eb06..bfbbfa8a8 100644 --- a/services/terrain/src/terrain/services/permanent_id_requests.clj +++ b/services/terrain/src/terrain/services/permanent_id_requests.clj @@ -10,6 +10,7 @@ [terrain.clients.data-info :as data-info] [terrain.clients.data-info.raw :as data-info-client] [terrain.clients.ezid :as ezid] + [terrain.clients.iplant-groups :as groups] [terrain.clients.metadata.raw :as metadata] [terrain.clients.notifications :as notifications] [terrain.util.config :as config] @@ -176,24 +177,29 @@ (metadata/add-metadata-template-avus id data-type template-id publish-avus))) (defn- send-notification - [user subject request-id] + [user email subject contents request-id] (log/debug "sending permanent_id_request notification to" user ":" subject) (try (notifications/send-notification {:type "permanent_id_request" :user user :subject subject - :payload {:uuid request-id}}) + :email true + :email_template "blank" + :payload {:email_address email + :contents contents + :uuid request-id}}) (catch Exception e (log/error e "Could not send permanent_id_request (" request-id ") notification to" user ":" subject)))) (defn- send-update-notification - [{:keys [id type folder history requested_by] :or {folder {:path "unknown"}}}] - (send-notification - requested_by - (str type " Request for " (ft/basename (:path folder)) " Status Changed to " (:status (last history))) - id)) + [{{:keys [username email]} :requested_by + :keys [id type folder history] + :or {folder {:path "unknown"}}}] + (let [{:keys [status comments]} (last history) + subject (str type " Request for " (ft/basename (:path folder)) " Status Changed to " status)] + (send-notification username email subject comments id))) (defn- request-type->shoulder [type] @@ -263,6 +269,19 @@ (dissoc :target_id :target_type :original_path) (assoc :folder (data-info/stat-by-uuid user (uuidify target_id))))) +(defn- format-requested-by + [user {:keys [requested_by target_id] :as permanent-id-request}] + (let [user-info (groups/lookup-subject user requested_by)] + (if user-info + (assoc permanent-id-request :requested_by (groups/format-like-trellis user-info)) + permanent-id-request))) + +(defn- format-permanent-id-request-details + [user permanent-id-request] + (->> permanent-id-request + (format-perm-id-req-response user) + (format-requested-by user))) + (defn- format-perm-id-req-list [requests] (map @@ -273,7 +292,7 @@ [params] (-> (metadata/list-permanent-id-requests params) parse-service-json - (update-in [:requests] format-perm-id-req-list))) + (update :requests format-perm-id-req-list))) (defn create-permanent-id-request [params body] @@ -285,10 +304,15 @@ target-type (validate-request-target-type folder) {request-id :id :as response} (submit-permanent-id-request type folder-id target-type path) staged-path (stage-data-item user folder)] - (send-notification user (str type " Request Submitted for " (ft/basename path)) request-id) + (send-notification + user + (:email current-user) + (str type " Request Submitted for " (ft/basename path)) + nil + request-id) (email/send-permanent-id-request-new type staged-path current-user) (email/send-permanent-id-request-submitted type staged-path current-user) - (format-perm-id-req-response user response))) + (format-permanent-id-request-details user response))) (defn list-permanent-id-request-status-codes [params] @@ -302,25 +326,25 @@ [request-id params] (->> (metadata/get-permanent-id-request request-id) parse-service-json - (format-perm-id-req-response (:shortUsername current-user)))) + (format-permanent-id-request-details (:shortUsername current-user)))) (defn admin-list-permanent-id-requests [params] (-> (metadata/admin-list-permanent-id-requests params) parse-service-json - (update-in [:requests] format-perm-id-req-list))) + (update :requests format-perm-id-req-list))) (defn admin-get-permanent-id-request [request-id params] (->> (metadata/admin-get-permanent-id-request request-id) parse-service-json - (format-perm-id-req-response (:shortUsername current-user)))) + (format-permanent-id-request-details (:shortUsername current-user)))) (defn update-permanent-id-request [request-id params body] (let [response (->> (metadata/update-permanent-id-request request-id body) parse-service-json - (format-perm-id-req-response (:shortUsername current-user)))] + (format-permanent-id-request-details (:shortUsername current-user)))] (send-update-notification response) response)) @@ -355,4 +379,5 @@ (let [identifier (complete-permanent-id-request (:shortUsername current-user) (admin-get-permanent-id-request request-id nil))] (update-permanent-id-request request-id nil (json/encode {:status status-code-completion + :comments identifier :permanent_id identifier})))) From 357e7988ccf3a6fabc7030c74c798c3783413d37 Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Thu, 11 Feb 2016 11:57:54 -0700 Subject: [PATCH 018/183] CORE-6943 modify Email notifications options wordings to include URL import notification emails. --- .../client/desktop/DesktopContextualHelpMessages.properties | 2 +- .../de/theme/base/client/desktop/DesktopMessages.properties | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/DesktopContextualHelpMessages.properties b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/DesktopContextualHelpMessages.properties index 9452d599a..0f060308e 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/DesktopContextualHelpMessages.properties +++ b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/DesktopContextualHelpMessages.properties @@ -1,7 +1,7 @@ iconHomepageDataTip = Store, manage, and share your data here. iconHomepageAnalysesTip = Find the status, parameters, and results of your executed apps. iconHomepageAppsTip = Discover, create, and use scientific apps for your data. -notifyEmailHelp =

This option will send you an email when your analysis is complete or has failed. It will be sent to the email address you used to register for your CyVerse account.


This option is helpful if you would like to track the status of your analysis while outside of the Discovery Environment.

+notifyEmailHelp =

This option will send you an email when your analysis or URL import is complete or has failed. It will be sent to the email address you used to register for your CyVerse account.


This option is helpful if you would like to track the status of your analysis while outside of the Discovery Environment.

rememberFileSelectorPathHelp =

This option allows the Data Manager to automatically navigate the file tree to the file location that was used the last time you ran an app.

This option is helpful if you have an extensive file tree or often use the same input file location.

saveSessionHelp =

Enabling the Save session option will restore your Discovery Environment desktop to exactly how you left it when you last logged out.

defaultOutputFolderHelp =

This will be the default location where all outputs from your analyses can be found.

You can keep the default path or click Browse to select a new location.

diff --git a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/DesktopMessages.properties b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/DesktopMessages.properties index 29473119a..4e0be1843 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/DesktopMessages.properties +++ b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/DesktopMessages.properties @@ -4,7 +4,7 @@ about = About forums = Forums duplicateShortCutKey = The following keyboard shortcut is already in use: {0}. Please choose another shortcut. preferences = Preferences -notifyEmail = Notify me by email when my analysis status changes. +notifyEmail = Notify me by email when my analysis
or URL import status changes. rememberFileSectorPath = Remember last file path for Apps. restoreDefaults = Restore Defaults saveSession = Save Session From b15291931a506d772cc3608cb4286483021f395b Mon Sep 17 00:00:00 2001 From: Paul Sarando Date: Thu, 11 Feb 2016 13:35:04 -0700 Subject: [PATCH 019/183] CORE-7511 Minor refactor. Includes removing unused 'params' arguments from some terrain.services.permanent-id-requests functions. --- .../terrain/routes/permanent_id_requests.clj | 28 ++++++++--------- .../services/permanent_id_requests.clj | 31 +++++++++---------- 2 files changed, 29 insertions(+), 30 deletions(-) diff --git a/services/terrain/src/terrain/routes/permanent_id_requests.clj b/services/terrain/src/terrain/routes/permanent_id_requests.clj index daf7ba119..0a3e18c58 100644 --- a/services/terrain/src/terrain/routes/permanent_id_requests.clj +++ b/services/terrain/src/terrain/routes/permanent_id_requests.clj @@ -14,17 +14,17 @@ (GET "/permanent-id-requests" [:as {params :params}] (service/success-response (list-permanent-id-requests params))) - (POST "/permanent-id-requests" [:as {:keys [params body]}] - (service/success-response (create-permanent-id-request params body))) + (POST "/permanent-id-requests" [:as {:keys [body]}] + (service/success-response (create-permanent-id-request body))) - (GET "/permanent-id-requests/status-codes" [:as {params :params}] - (service/success-response (list-permanent-id-request-status-codes params))) + (GET "/permanent-id-requests/status-codes" [] + (service/success-response (list-permanent-id-request-status-codes))) - (GET "/permanent-id-requests/types" [:as {params :params}] - (service/success-response (list-permanent-id-request-types params))) + (GET "/permanent-id-requests/types" [] + (service/success-response (list-permanent-id-request-types))) - (GET "/permanent-id-requests/:request-id" [request-id :as {params :params}] - (service/success-response (get-permanent-id-request request-id params))))) + (GET "/permanent-id-requests/:request-id" [request-id] + (service/success-response (get-permanent-id-request request-id))))) (defn admin-permanent-id-request-routes "The admin routes for Permanent ID Request endpoints." @@ -36,11 +36,11 @@ (GET "/permanent-id-requests" [:as {params :params}] (service/success-response (admin-list-permanent-id-requests params))) - (GET "/permanent-id-requests/:request-id" [request-id :as {params :params}] - (service/success-response (admin-get-permanent-id-request request-id params))) + (GET "/permanent-id-requests/:request-id" [request-id] + (service/success-response (admin-get-permanent-id-request request-id))) - (POST "/permanent-id-requests/:request-id/ezid" [request-id :as {:keys [params body]}] - (service/success-response (create-permanent-id request-id params body))) + (POST "/permanent-id-requests/:request-id/ezid" [request-id :as {:keys [body]}] + (service/success-response (create-permanent-id request-id body))) - (POST "/permanent-id-requests/:request-id/status" [request-id :as {:keys [params body]}] - (service/success-response (update-permanent-id-request request-id params body))))) + (POST "/permanent-id-requests/:request-id/status" [request-id :as {:keys [body]}] + (service/success-response (update-permanent-id-request request-id body))))) diff --git a/services/terrain/src/terrain/services/permanent_id_requests.clj b/services/terrain/src/terrain/services/permanent_id_requests.clj index bfbbfa8a8..1a5db21aa 100644 --- a/services/terrain/src/terrain/services/permanent_id_requests.clj +++ b/services/terrain/src/terrain/services/permanent_id_requests.clj @@ -271,10 +271,9 @@ (defn- format-requested-by [user {:keys [requested_by target_id] :as permanent-id-request}] - (let [user-info (groups/lookup-subject user requested_by)] - (if user-info - (assoc permanent-id-request :requested_by (groups/format-like-trellis user-info)) - permanent-id-request))) + (if-let [user-info (groups/lookup-subject user requested_by)] + (assoc permanent-id-request :requested_by (groups/format-like-trellis user-info)) + permanent-id-request)) (defn- format-permanent-id-request-details [user permanent-id-request] @@ -295,7 +294,7 @@ (update :requests format-perm-id-req-list))) (defn create-permanent-id-request - [params body] + [body] (create-staging-dir) (let [{type :type folder-id :folder} (service/decode-json body) folder-id (uuidify folder-id) @@ -315,15 +314,15 @@ (format-permanent-id-request-details user response))) (defn list-permanent-id-request-status-codes - [params] + [] (metadata/list-permanent-id-request-status-codes)) (defn list-permanent-id-request-types - [params] + [] (metadata/list-permanent-id-request-types)) (defn get-permanent-id-request - [request-id params] + [request-id] (->> (metadata/get-permanent-id-request request-id) parse-service-json (format-permanent-id-request-details (:shortUsername current-user)))) @@ -335,13 +334,13 @@ (update :requests format-perm-id-req-list))) (defn admin-get-permanent-id-request - [request-id params] + [request-id] (->> (metadata/admin-get-permanent-id-request request-id) parse-service-json (format-permanent-id-request-details (:shortUsername current-user)))) (defn update-permanent-id-request - [request-id params body] + [request-id body] (let [response (->> (metadata/update-permanent-id-request request-id body) parse-service-json (format-permanent-id-request-details (:shortUsername current-user)))] @@ -370,14 +369,14 @@ identifier) (catch Object e (log/error e) - (update-permanent-id-request request-id nil (json/encode {:status status-code-failed})) + (update-permanent-id-request request-id (json/encode {:status status-code-failed})) (throw+ e)))) (defn create-permanent-id - [request-id params body] + [request-id body] (create-publish-dir) (let [identifier (complete-permanent-id-request (:shortUsername current-user) - (admin-get-permanent-id-request request-id nil))] - (update-permanent-id-request request-id nil (json/encode {:status status-code-completion - :comments identifier - :permanent_id identifier})))) + (admin-get-permanent-id-request request-id))] + (update-permanent-id-request request-id (json/encode {:status status-code-completion + :comments identifier + :permanent_id identifier})))) From fa7d575511757239ba4340c05c15f423910bb1e1 Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Thu, 11 Feb 2016 14:48:54 -0700 Subject: [PATCH 020/183] CORE-6943 UI for enabling/disabling URL import email notifications. --- .../presenter/AppLaunchPresenterImpl.java | 2 +- .../de/client/models/UserSettings.java | 43 +++++++++++++------ .../views/widgets/PreferencesDialog.java | 18 +++++--- .../views/widgets/PreferencesView.ui.xml | 15 +++++-- .../iplantc/de/desktop/shared/DeModule.java | 3 +- .../DesktopContextualHelpMessages.java | 3 ++ .../DesktopContextualHelpMessages.properties | 3 +- .../base/client/desktop/DesktopMessages.java | 7 ++- .../client/desktop/DesktopMessages.properties | 3 +- .../PreferencesViewDefaultAppearance.java | 14 +++++- 10 files changed, 81 insertions(+), 30 deletions(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/apps/widgets/client/presenter/AppLaunchPresenterImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/apps/widgets/client/presenter/AppLaunchPresenterImpl.java index 8bab79225..d99081d0d 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/apps/widgets/client/presenter/AppLaunchPresenterImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/apps/widgets/client/presenter/AppLaunchPresenterImpl.java @@ -77,7 +77,7 @@ public void go(final HasOneWidget container) { final AppTemplateAutoBeanFactory factory = GWT.create(AppTemplateAutoBeanFactory.class); final JobExecution je = factory.jobExecution().as(); je.setAppTemplateId(appTemplate.getId()); - je.setEmailNotificationEnabled(userSettings.isEnableEmailNotification()); + je.setEmailNotificationEnabled(userSettings.isEnableAnalysisEmailNotification()); // JDS Replace all Cmd Line restricted chars with underscores String regex = Format.substitute("[{0}]", RegExp.escapeCharacterClassSet(valConstants.restrictedCmdLineChars() diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/models/UserSettings.java b/ui/de-lib/src/main/java/org/iplantc/de/client/models/UserSettings.java index 7d78759b7..67f67254f 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/client/models/UserSettings.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/models/UserSettings.java @@ -22,7 +22,8 @@ public class UserSettings { private final KeyBoardShortcutConstants SHORTCUTS = GWT.create(KeyBoardShortcutConstants.class); - private boolean enableEmailNotification; + private boolean enableAnalysisEmailNotification; + private boolean enableImportEmailNotification; private String defaultFileSelectorPath; private boolean rememberLastPath; private boolean saveSession; @@ -36,7 +37,8 @@ public class UserSettings { private String lastPath; - public static final String EMAIL_NOTIFCATOIN = "enableEmailNotification"; + public static final String EMAIL_ANALYSIS_NOTIFCATOIN = "enableAnalysisEmailNotification"; + public static final String EMAIL_IMPORT_NOTIFICATION = "enableImportEmailNotification"; public static final String DEFAULT_FILE_SELECTOR_PATH = "defaultFileSelectorPath"; public static final String REMEMBER_LAST_PATH = "rememberLastPath"; public static final String SAVE_SESSION = "saveSession"; @@ -57,7 +59,7 @@ public UserSettings(final Splittable userSettingsSplit){ } private UserSettings() { - this.enableEmailNotification = false; + this.enableAnalysisEmailNotification = false; this.rememberLastPath = false; this.saveSession = true; } @@ -83,11 +85,17 @@ public void setValues(Splittable split) { return; } - if (split.get(EMAIL_NOTIFCATOIN) != null) { - setEnableEmailNotification(split.get(EMAIL_NOTIFCATOIN).asBoolean()); + if (split.get(EMAIL_ANALYSIS_NOTIFCATOIN) != null) { + setEnableAnalysisEmailNotification(split.get(EMAIL_ANALYSIS_NOTIFCATOIN).asBoolean()); } else { - setEnableEmailNotification(true); + setEnableAnalysisEmailNotification(true); } + if (split.get(EMAIL_IMPORT_NOTIFICATION) != null) { + setEnableAnalysisEmailNotification(split.get(EMAIL_IMPORT_NOTIFICATION).asBoolean()); + } else { + setEnableAnalysisEmailNotification(true); + } + if (split.get(DEFAULT_FILE_SELECTOR_PATH) != null) { setDefaultFileSelectorPath(split.get(DEFAULT_FILE_SELECTOR_PATH).asString()); } @@ -197,17 +205,17 @@ public String getNotifyShortCut() { return notifyShortCut; } /** - * @param enableEmailNotification the enableEmailNotification to set + * @param enableAnalysisEmailNotification the enableAnalysisEmailNotification to set */ - public void setEnableEmailNotification(boolean enableEmailNotification) { - this.enableEmailNotification = enableEmailNotification; + public void setEnableAnalysisEmailNotification(boolean enableAnalysisEmailNotification) { + this.enableAnalysisEmailNotification = enableAnalysisEmailNotification; } /** - * @return the enableEmailNotification + * @return the enableAnalysisEmailNotification */ - public boolean isEnableEmailNotification() { - return enableEmailNotification; + public boolean isEnableAnalysisEmailNotification() { + return enableAnalysisEmailNotification; } /** @@ -231,7 +239,8 @@ public String getDefaultFileSelectorPath() { */ public Splittable asSplittable() { Splittable ret = StringQuoter.createSplittable(); - StringQuoter.create(isEnableEmailNotification()).assign(ret, EMAIL_NOTIFCATOIN); + StringQuoter.create(isEnableAnalysisEmailNotification()).assign(ret, EMAIL_ANALYSIS_NOTIFCATOIN); + StringQuoter.create(isEnableImportEmailNotification()).assign(ret, EMAIL_IMPORT_NOTIFICATION); StringQuoter.create(getDefaultFileSelectorPath()).assign(ret, DEFAULT_FILE_SELECTOR_PATH); StringQuoter.create(isRememberLastPath()).assign(ret, REMEMBER_LAST_PATH); StringQuoter.create(isSaveSession()).assign(ret, SAVE_SESSION); @@ -340,4 +349,12 @@ public String getLastPath() { public void setLastPath(String lastPath) { this.lastPath = lastPath; } + + public boolean isEnableImportEmailNotification() { + return enableImportEmailNotification; + } + + public void setEnableImportEmailNotification(boolean enableImportEmailNotification) { + this.enableImportEmailNotification = enableImportEmailNotification; + } } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/widgets/PreferencesDialog.java b/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/widgets/PreferencesDialog.java index 639964186..8c2ff53ca 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/widgets/PreferencesDialog.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/widgets/PreferencesDialog.java @@ -56,7 +56,9 @@ public interface PreferencesViewAppearance { String preferences(); - String notifyEmail(); + String notifyAnalysisEmail(); + + String notifyImportEmail(); String completeRequiredFieldsError(); @@ -87,6 +89,8 @@ public interface PreferencesViewAppearance { String closeActiveWindow(); String saveSessionHelp(); + + String notifyEmail(); } public interface HtmlLayoutContainerTemplate extends XTemplates { @@ -106,7 +110,8 @@ interface EditorDriver extends SimpleBeanEditorDriver event) { defaultsBtn.addSelectHandler(new SelectEvent.SelectHandler() { @Override public void onSelect(SelectEvent event) { - enableEmailNotification.setValue(true); + enableAnalysisEmailNotification.setValue(true); + enableImportEmailNotification.setValue(true); rememberLastPath.setValue(true); saveSession.setValue(true); appsShortCut.setValue(KB_CONSTANTS.appsKeyShortCut()); @@ -231,7 +237,8 @@ protected void onButtonPressed(TextButton button) { } else if (button == defaultsBtn) { - enableEmailNotification.setValue(true); + enableAnalysisEmailNotification.setValue(true); + enableImportEmailNotification.setValue(true); rememberLastPath.setValue(true); saveSession.setValue(true); appsShortCut.setValue(KB_CONSTANTS.appsKeyShortCut()); @@ -256,7 +263,8 @@ protected void onEnsureDebugId(String baseID) { getButton(PredefinedButton.CANCEL).ensureDebugId(baseID + DeModule.PreferenceIds.CANCEL); defaultsBtn.ensureDebugId(baseID + DeModule.PreferenceIds.DEFAULTS_BTN); - enableEmailNotification.ensureDebugId(baseID + DeModule.PreferenceIds.EMAIL_NOTIFICATION); + enableAnalysisEmailNotification.ensureDebugId(baseID + DeModule.PreferenceIds.EMAIL_ANALYSIS_NOTIFICATION); + enableImportEmailNotification.ensureDebugId(baseID + DeModule.PreferenceIds.EMAIL_IMPORT_NOTIFICATION); rememberLastPath.ensureDebugId(baseID + DeModule.PreferenceIds.REMEMBER_LAST_PATH); saveSession.ensureDebugId(baseID + DeModule.PreferenceIds.SAVE_SESSION); defaultOutputFolder.ensureDebugId(baseID + DeModule.PreferenceIds.DEFAULT_OUTPUT_FOLDER); diff --git a/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/widgets/PreferencesView.ui.xml b/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/widgets/PreferencesView.ui.xml index b3e34a8aa..f56b50d3c 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/widgets/PreferencesView.ui.xml +++ b/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/widgets/PreferencesView.ui.xml @@ -31,7 +31,7 @@ - + - + + + + + + This option will send you an email when your analysis or URL import is complete or has failed. It will be sent to the email address you used to register for your CyVerse account.


This option is helpful if you would like to track the status of your analysis while outside of the Discovery Environment.

+notifyEmailHelp =

This option will send you an email when your analysis or URL import is complete or has failed. It will be sent to the email address you used to register for your CyVerse account.


This option is helpful if you would like to track the status of your analysis or URL import while outside of the Discovery Environment.

rememberFileSelectorPathHelp =

This option allows the Data Manager to automatically navigate the file tree to the file location that was used the last time you ran an app.

This option is helpful if you have an extensive file tree or often use the same input file location.

saveSessionHelp =

Enabling the Save session option will restore your Discovery Environment desktop to exactly how you left it when you last logged out.

defaultOutputFolderHelp =

This will be the default location where all outputs from your analyses can be found.

You can keep the default path or click Browse to select a new location.

+notifyEmail = Email me when analysis / URL import status changes. \ No newline at end of file diff --git a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/DesktopMessages.java b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/DesktopMessages.java index d5a243bc0..0fa7eb4ab 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/DesktopMessages.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/DesktopMessages.java @@ -51,8 +51,11 @@ public interface DesktopMessages extends Messages { @Key("newNotificationsAlert") String newNotificationsAlert(); - @Key("notifyEmail") - String notifyEmail(); + @Key("notifyAnalysisEmail") + String notifyAnalysisEmail(); + + @Key("notifyImportEmail") + String notifyImportEmail(); @Key("oneCharMax") String oneCharMax(); diff --git a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/DesktopMessages.properties b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/DesktopMessages.properties index 4e0be1843..2a9c852d5 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/DesktopMessages.properties +++ b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/DesktopMessages.properties @@ -4,7 +4,8 @@ about = About forums = Forums duplicateShortCutKey = The following keyboard shortcut is already in use: {0}. Please choose another shortcut. preferences = Preferences -notifyEmail = Notify me by email when my analysis
or URL import status changes. +notifyAnalysisEmail = Email me when my analysis status changes. +notifyImportEmail = Email me when my URL import status changes. rememberFileSectorPath = Remember last file path for Apps. restoreDefaults = Restore Defaults saveSession = Save Session diff --git a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/PreferencesViewDefaultAppearance.java b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/PreferencesViewDefaultAppearance.java index 7606daec6..1814d60ae 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/PreferencesViewDefaultAppearance.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/PreferencesViewDefaultAppearance.java @@ -53,8 +53,13 @@ public String preferences() { } @Override - public String notifyEmail() { - return desktopMessages.notifyEmail(); + public String notifyAnalysisEmail() { + return desktopMessages.notifyAnalysisEmail(); + } + + @Override + public String notifyImportEmail() { + return desktopMessages.notifyImportEmail(); } @Override @@ -131,4 +136,9 @@ public String closeActiveWindow() { public String saveSessionHelp() { return help.saveSessionHelp(); } + + @Override + public String notifyEmail() { + return help.notifyEmail(); + } } From 787b09f3a77f99552ee210962fd3ee03d309e348 Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Thu, 11 Feb 2016 15:28:03 -0700 Subject: [PATCH 021/183] CORE-6943 fix to call the right setter method for enable email notification option for Import URL status. --- .../main/java/org/iplantc/de/client/models/UserSettings.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/models/UserSettings.java b/ui/de-lib/src/main/java/org/iplantc/de/client/models/UserSettings.java index 67f67254f..6aecdea0b 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/client/models/UserSettings.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/models/UserSettings.java @@ -91,9 +91,9 @@ public void setValues(Splittable split) { setEnableAnalysisEmailNotification(true); } if (split.get(EMAIL_IMPORT_NOTIFICATION) != null) { - setEnableAnalysisEmailNotification(split.get(EMAIL_IMPORT_NOTIFICATION).asBoolean()); + setEnableImportEmailNotification(split.get(EMAIL_IMPORT_NOTIFICATION).asBoolean()); } else { - setEnableAnalysisEmailNotification(true); + setEnableImportEmailNotification(true); } if (split.get(DEFAULT_FILE_SELECTOR_PATH) != null) { From c165c6df11af00f83df41d18a9b5a798acd9c167 Mon Sep 17 00:00:00 2001 From: Paul Sarando Date: Thu, 11 Feb 2016 18:04:09 -0700 Subject: [PATCH 022/183] CORE-6943 Update urlupload endpoint to lookup notify pref. --- .../terrain/src/terrain/clients/user_prefs.clj | 15 +++++++++++++-- .../terrain/services/metadata/internal_jobs.clj | 12 ++++++++++-- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/services/terrain/src/terrain/clients/user_prefs.clj b/services/terrain/src/terrain/clients/user_prefs.clj index 25164dc36..04f288bd2 100644 --- a/services/terrain/src/terrain/clients/user_prefs.clj +++ b/services/terrain/src/terrain/clients/user_prefs.clj @@ -1,10 +1,11 @@ (ns terrain.clients.user-prefs (:use [terrain.util.config] [clojure-commons.error-codes] - [slingshot.slingshot :only [throw+]]) + [slingshot.slingshot :only [try+ throw+]]) (:require [clj-http.client :as http] [cemerick.url :refer [url]] - [cheshire.core :as json])) + [cheshire.core :as json] + [clojure.tools.logging :as log])) (defn- user-prefs-url [user] @@ -29,6 +30,16 @@ :else (json/parse-string (:body resp) true)))) +(defn get-prefs-safe + "Same as get-prefs, but does not throw exceptions. + Instead, caught exceptions are logged and nil is returned." + [username] + (try+ + (get-prefs username) + (catch Object e + (log/error e) + nil))) + (defn set-prefs [username prefs-map] (let [json-prefs (json/encode prefs-map) diff --git a/services/terrain/src/terrain/services/metadata/internal_jobs.clj b/services/terrain/src/terrain/services/metadata/internal_jobs.clj index 7572b2d54..4ebaf0353 100644 --- a/services/terrain/src/terrain/services/metadata/internal_jobs.clj +++ b/services/terrain/src/terrain/services/metadata/internal_jobs.clj @@ -1,9 +1,17 @@ (ns terrain.services.metadata.internal-jobs - (:use [slingshot.slingshot :only [throw+]]) + (:use [slingshot.slingshot :only [throw+]] + [terrain.auth.user-attributes :only [current-user]]) (:require [clojure-commons.error-codes :as ce] [terrain.clients.apps :as apps] + [terrain.clients.user-prefs :as prefs] [terrain.util.config :as config])) +(defn- get-url-import-notify-pref + "Looks up the current user's enableImportEmailNotification preference. + Returns true by default if the preference is not set or could not be retrieved." + [] + (:enableImportEmailNotification (prefs/get-prefs-safe (:username current-user)) true)) + (defn- load-param-map [app-id] (->> (apps/get-app app-id) @@ -48,7 +56,7 @@ :debug false :create_output_subdir false :output_dir dest-path - :notify true + :notify (get-url-import-notify-pref) :skip-parent-meta true :file-metadata [(avu "ipc-url-import" address "Import URL")] :archive_logs false}) From 4ffcc9f95c3407f3847d81ca6658d3b8f7cb95b3 Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Fri, 12 Feb 2016 10:20:15 -0700 Subject: [PATCH 023/183] CORE-6943 add new line at EOF --- .../client/desktop/DesktopContextualHelpMessages.properties | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/DesktopContextualHelpMessages.properties b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/DesktopContextualHelpMessages.properties index ad851589c..b88987668 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/DesktopContextualHelpMessages.properties +++ b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/desktop/DesktopContextualHelpMessages.properties @@ -5,4 +5,4 @@ notifyEmailHelp =

This option will send you an email when your analysis or UR rememberFileSelectorPathHelp =

This option allows the Data Manager to automatically navigate the file tree to the file location that was used the last time you ran an app.

This option is helpful if you have an extensive file tree or often use the same input file location.

saveSessionHelp =

Enabling the Save session option will restore your Discovery Environment desktop to exactly how you left it when you last logged out.

defaultOutputFolderHelp =

This will be the default location where all outputs from your analyses can be found.

You can keep the default path or click Browse to select a new location.

-notifyEmail = Email me when analysis / URL import status changes. \ No newline at end of file +notifyEmail = Email me when analysis / URL import status changes. From d31fa8a9427a6a755b66ac6dee6ed8e768193e46 Mon Sep 17 00:00:00 2001 From: John Wregglesworth Date: Tue, 16 Feb 2016 10:23:24 -0700 Subject: [PATCH 024/183] Added ignore_errors: yes to util-cfg-docker-rm-configs --- ansible/roles/util-cfg-docker-rm-configs/tasks/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/ansible/roles/util-cfg-docker-rm-configs/tasks/main.yml b/ansible/roles/util-cfg-docker-rm-configs/tasks/main.yml index cbd1d17bd..4c57d7fbb 100644 --- a/ansible/roles/util-cfg-docker-rm-configs/tasks/main.yml +++ b/ansible/roles/util-cfg-docker-rm-configs/tasks/main.yml @@ -13,5 +13,6 @@ shell: "docker-compose -f {{docker.compose_path}} rm -f -v config_{{service_name}}" register: docker_pull_v changed_when: docker_pull_v.stdout.find('Image is up to date') == -1 + ignore_errors: yes tags: - docker_rm From 275d8e6710b524089be758c032d5154ac932ce4c Mon Sep 17 00:00:00 2001 From: John Wregglesworth Date: Tue, 16 Feb 2016 10:31:42 -0700 Subject: [PATCH 025/183] Added 'ignore_errors: yes' to util-cfg-docker-rm --- ansible/roles/util-cfg-docker-rm/tasks/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/ansible/roles/util-cfg-docker-rm/tasks/main.yml b/ansible/roles/util-cfg-docker-rm/tasks/main.yml index 22c0b91f3..655b743c0 100644 --- a/ansible/roles/util-cfg-docker-rm/tasks/main.yml +++ b/ansible/roles/util-cfg-docker-rm/tasks/main.yml @@ -13,5 +13,6 @@ shell: "docker-compose -f {{docker.compose_path}} rm -f -v {{service_name}}" register: docker_pull_v changed_when: docker_pull_v.stdout.find('Image is up to date') == -1 + ignore_errors: yes tags: - docker_rm From 21db501efdcb91030903284193dbcc61ed263c20 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Wed, 17 Feb 2016 15:03:56 -0700 Subject: [PATCH 026/183] Reinstate infosquito batch size of 1000 on @tedgin's recommendation that this was the original intention. --- .../roles/util-cfg-service/templates/infosquito.properties.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/util-cfg-service/templates/infosquito.properties.j2 b/ansible/roles/util-cfg-service/templates/infosquito.properties.j2 index c52dea9fe..a0eb119fc 100644 --- a/ansible/roles/util-cfg-service/templates/infosquito.properties.j2 +++ b/ansible/roles/util-cfg-service/templates/infosquito.properties.j2 @@ -12,7 +12,7 @@ infosquito.icat.db = {{ icat.db }} # Indexing Options infosquito.base-collection = /{{ irods.zone }} -infosquito.index-batch-size = 100 +infosquito.index-batch-size = 1000 # AMQP Settings infosquito.amqp.host = {{ amqp_broker.host }} From 4dd815b47d088597992f37623aa404d15efeed97 Mon Sep 17 00:00:00 2001 From: Ashley Ramsey Date: Thu, 11 Feb 2016 12:46:54 -0700 Subject: [PATCH 027/183] CORE-4345 Notification DeleteAll btn deletes all only in current filter --- .../iplantc/de/client/services/MessageServiceFacade.java | 4 ++-- .../de/client/services/impl/MessageServiceFacadeImpl.java | 7 ++++++- .../client/presenter/NotificationPresenterImpl.java | 2 +- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/services/MessageServiceFacade.java b/ui/de-lib/src/main/java/org/iplantc/de/client/services/MessageServiceFacade.java index 97c73a543..29f3661bb 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/client/services/MessageServiceFacade.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/services/MessageServiceFacade.java @@ -53,10 +53,10 @@ public interface MessageServiceFacade { */ void getMessageCounts(AsyncCallback callback); - void deleteAll(AsyncCallback callback); + void deleteAll(String filter, AsyncCallback callback); void markAllNotificationsSeen(AsyncCallback callback); void getPermanentIdRequestStatusHistory(String id, AsyncCallback callback); -} \ No newline at end of file +} diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/MessageServiceFacadeImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/MessageServiceFacadeImpl.java index 2623080e0..3ec1fb6d8 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/MessageServiceFacadeImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/MessageServiceFacadeImpl.java @@ -8,6 +8,7 @@ import org.iplantc.de.client.models.notifications.Counts; import org.iplantc.de.client.models.notifications.Notification; import org.iplantc.de.client.models.notifications.NotificationAutoBeanFactory; +import org.iplantc.de.client.models.notifications.NotificationCategory; import org.iplantc.de.client.services.MessageServiceFacade; import org.iplantc.de.client.services.PermIdRequestUserServiceFacade; import org.iplantc.de.client.services.callbacks.NotificationCallback; @@ -141,9 +142,13 @@ public void getMessageCounts(final AsyncCallback callback) { } @Override - public void deleteAll(AsyncCallback callback) { + public void deleteAll(String filter, AsyncCallback callback) { String address = deProperties.getMuleServiceBaseUrl() + "notifications/delete-all"; //$NON-NLS-1$ + if (!filter.toLowerCase().equals(NotificationCategory.ALL.toString().toLowerCase())){ + address += "?filter=" + URL.encodeQueryString(filter.toLowerCase()); + } + ServiceCallWrapper wrapper = new ServiceCallWrapper(DELETE, address); deServiceFacade.getServiceData(wrapper, callback); diff --git a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImpl.java index 47a10cd17..c96fa4fb3 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImpl.java @@ -202,7 +202,7 @@ public void go(HasOneWidget container) { @Override public void onDeleteAllClicked() { view.mask(); - messageServiceFacade.deleteAll(new AsyncCallback() { + messageServiceFacade.deleteAll(currentCategory.toString(), new AsyncCallback() { @Override public void onFailure(Throwable caught) { From a0766c0ec40b47415670dbc4e6f07c15f056d55e Mon Sep 17 00:00:00 2001 From: Ashley Ramsey Date: Thu, 4 Feb 2016 16:56:59 -0700 Subject: [PATCH 028/183] CORE-4832 Hide Notification menu when opening Notification window --- .../main/java/org/iplantc/de/desktop/client/DesktopView.java | 2 ++ .../de/desktop/client/presenter/DesktopPresenterImpl.java | 4 +++- .../org/iplantc/de/desktop/client/views/DesktopViewImpl.java | 4 ++++ 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/DesktopView.java b/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/DesktopView.java index 52b901ee5..4f4037255 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/DesktopView.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/DesktopView.java @@ -248,4 +248,6 @@ interface UnseenNotificationsPresenter { void setUnseenNotificationCount(int count); void setUnseenSystemMessageCount(int count); + + void hideNotificationMenu(); } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/presenter/DesktopPresenterImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/presenter/DesktopPresenterImpl.java index cf6551c5a..fd69690c7 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/presenter/DesktopPresenterImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/presenter/DesktopPresenterImpl.java @@ -214,12 +214,14 @@ public void onSuccess(Void result) { @Override public void doSeeAllNotifications() { - show(ConfigFactory.notifyWindowConfig(NotificationCategory.ALL)); + show(ConfigFactory.notifyWindowConfig(NotificationCategory.ALL)); + view.hideNotificationMenu(); } @Override public void doSeeNewNotifications() { show(ConfigFactory.notifyWindowConfig(NotificationCategory.NEW)); + view.hideNotificationMenu(); } public void doViewGenomes(final File file) { diff --git a/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/DesktopViewImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/DesktopViewImpl.java index 0322f3fd8..1472313c3 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/DesktopViewImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/DesktopViewImpl.java @@ -105,6 +105,10 @@ void onNotificationMenuClicked(ShowContextMenuEvent event){ } } + public void hideNotificationMenu() { + ((DesktopIconButton)notificationsBtn).hideMenu(); + } + @Override public void onRegister(RegisterEvent event) { final Widget eventItem = event.getItem(); From e85db385c37990aec006cec78ab167d2565ca03a Mon Sep 17 00:00:00 2001 From: Ashley Ramsey Date: Thu, 11 Feb 2016 13:20:54 -0700 Subject: [PATCH 029/183] CORE-4345 Add tests for Notifications --- .../presenter/NotificationPresenterImpl.java | 8 +- .../presenter/DesktopPresenterImplTest.java | 15 +- .../DesktopNotifications_DesktopViewTest.java | 38 +++-- .../NotificationPresenterImplTest.java | 135 ++++++++++++++++++ 4 files changed, 183 insertions(+), 13 deletions(-) create mode 100644 ui/de-lib/src/test/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImplTest.java diff --git a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImpl.java index c96fa4fb3..2e7620d58 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImpl.java @@ -115,12 +115,12 @@ public void onSuccess(String result1) { private final IplantErrorStrings errorStrings; - private final EventBus eventBus; - private final MessageServiceFacade messageServiceFacade; - private final NotificationToolbarView toolbar; + EventBus eventBus; + MessageServiceFacade messageServiceFacade; + NotificationToolbarView toolbar; private final NotificationView view; private PagingLoadResult callbackResult; - private NotificationCategory currentCategory; + NotificationCategory currentCategory; private final JsonUtil jsonUtil; public NotificationPresenterImpl(final NotificationView view) { diff --git a/ui/de-lib/src/test/java/org/iplantc/de/desktop/client/presenter/DesktopPresenterImplTest.java b/ui/de-lib/src/test/java/org/iplantc/de/desktop/client/presenter/DesktopPresenterImplTest.java index b712ec1e6..a2a4e8b91 100644 --- a/ui/de-lib/src/test/java/org/iplantc/de/desktop/client/presenter/DesktopPresenterImplTest.java +++ b/ui/de-lib/src/test/java/org/iplantc/de/desktop/client/presenter/DesktopPresenterImplTest.java @@ -5,10 +5,12 @@ import org.iplantc.de.client.events.EventBus; import org.iplantc.de.client.models.WindowType; import org.iplantc.de.commons.client.requests.KeepaliveTimer; +import org.iplantc.de.commons.client.views.window.configs.ConfigFactory; import org.iplantc.de.systemMessages.client.view.NewMessageView; import com.google.gwt.dom.client.Element; import com.google.gwtmockito.GxtMockitoTestRunner; +import com.google.gwtmockito.WithClassesToStub; import com.sencha.gxt.widget.core.client.WindowManager; @@ -22,6 +24,7 @@ import org.mockito.Mock; @RunWith(GxtMockitoTestRunner.class) +@WithClassesToStub(ConfigFactory.class) public class DesktopPresenterImplTest { @Mock DesktopWindowManager desktopWindowManagerMock; @@ -84,4 +87,14 @@ public class DesktopPresenterImplTest { verifyNoMoreInteractions(desktopWindowManagerMock); } -} \ No newline at end of file + @Test public void testDoSeeAllNotifications() { + uut.doSeeAllNotifications(); + verify(viewMock).hideNotificationMenu(); + } + + @Test public void testDoSeeNewNotifications() { + uut.doSeeNewNotifications(); + verify(viewMock).hideNotificationMenu(); + } + +} diff --git a/ui/de-lib/src/test/java/org/iplantc/de/desktop/client/views/DesktopNotifications_DesktopViewTest.java b/ui/de-lib/src/test/java/org/iplantc/de/desktop/client/views/DesktopNotifications_DesktopViewTest.java index 098c4d889..30a5c871d 100644 --- a/ui/de-lib/src/test/java/org/iplantc/de/desktop/client/views/DesktopNotifications_DesktopViewTest.java +++ b/ui/de-lib/src/test/java/org/iplantc/de/desktop/client/views/DesktopNotifications_DesktopViewTest.java @@ -1,35 +1,53 @@ package org.iplantc.de.desktop.client.views; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.anyBoolean; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; + import org.iplantc.de.desktop.client.DesktopView; +import org.iplantc.de.desktop.client.views.widgets.DesktopIconButton; import org.iplantc.de.resources.client.messages.IplantNewUserTourStrings; import com.google.gwt.user.client.ui.Widget; import com.google.gwtmockito.GxtMockitoTestRunner; +import com.google.gwtmockito.WithClassesToStub; import com.sencha.gxt.widget.core.client.WindowManager; import com.sencha.gxt.widget.core.client.event.RegisterEvent; import com.sencha.gxt.widget.core.client.event.ShowContextMenuEvent; import com.sencha.gxt.widget.core.client.event.UnregisterEvent; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.*; +import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; @RunWith(GxtMockitoTestRunner.class) +@WithClassesToStub(DesktopIconButton.class) public class DesktopNotifications_DesktopViewTest { @Mock RegisterEvent registerEventMock; @Mock IplantNewUserTourStrings tourStringsMock; @Mock UnregisterEvent unregisterEventMock; @Mock WindowManager windowManagerMock; + @Mock DesktopView.Presenter mockPresenter; + @Mock DesktopIconButton notificationsBtnMock; - @Test public void notificationsMarkedSeenWhenNotificationBtnSelectedWithLessThan10Unseen() { - DesktopViewImpl uut = new DesktopViewImpl(tourStringsMock, windowManagerMock); + private DesktopViewImpl uut; + + @Before + public void setUp() { + uut = new DesktopViewImpl(tourStringsMock, windowManagerMock); + uut.notificationsBtn = notificationsBtnMock; verifyViewInit(uut); - final DesktopView.Presenter mockPresenter = mock(DesktopView.Presenter.class); uut.setPresenter(mockPresenter); + } + + @Test public void notificationsMarkedSeenWhenNotificationBtnSelectedWithLessThan10Unseen() { + uut.unseenNotificationCount = 9; uut.onNotificationMenuClicked(mock(ShowContextMenuEvent.class)); @@ -38,9 +56,7 @@ public class DesktopNotifications_DesktopViewTest { } @Test public void notificationsNotMarkedSeenWhenNotificationBtnSelectedWithGreaterThan10Unseen() { - DesktopViewImpl uut = new DesktopViewImpl(tourStringsMock, windowManagerMock); - verifyViewInit(uut); - final DesktopView.Presenter mockPresenter = mock(DesktopView.Presenter.class); + uut.setPresenter(mockPresenter); uut.unseenNotificationCount = 11; @@ -49,6 +65,12 @@ public class DesktopNotifications_DesktopViewTest { verifyNoMoreInteractions(mockPresenter); } + @Test public void notificationMenuHidesOnSeeAllNotifications() { + uut.hideNotificationMenu(); + verify(notificationsBtnMock).hideMenu(); + verifyNoMoreInteractions(notificationsBtnMock, mockPresenter); + } + private void verifyViewInit(DesktopViewImpl uut) { verify(windowManagerMock).addRegisterHandler(eq(uut)); verify(windowManagerMock).addUnregisterHandler(eq(uut)); diff --git a/ui/de-lib/src/test/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImplTest.java b/ui/de-lib/src/test/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImplTest.java new file mode 100644 index 000000000..8c94e2382 --- /dev/null +++ b/ui/de-lib/src/test/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImplTest.java @@ -0,0 +1,135 @@ +package org.iplantc.de.notifications.client.presenter; + +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.isA; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import org.iplantc.de.client.events.EventBus; +import org.iplantc.de.client.models.notifications.NotificationCategory; +import org.iplantc.de.client.models.notifications.NotificationMessage; +import org.iplantc.de.client.services.MessageServiceFacade; +import org.iplantc.de.notifications.client.events.DeleteNotificationsUpdateEvent; +import org.iplantc.de.notifications.client.views.NotificationToolbarView; +import org.iplantc.de.notifications.client.views.NotificationView; + +import com.google.gwt.json.client.JSONObject; +import com.google.gwt.user.client.rpc.AsyncCallback; +import com.google.gwtmockito.GxtMockitoTestRunner; + +import com.sencha.gxt.data.shared.ListStore; +import com.sencha.gxt.data.shared.loader.FilterPagingLoadConfig; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Captor; +import org.mockito.Mock; + +import java.util.Iterator; +import java.util.List; + +/** + * @author aramsey + */ +@RunWith(GxtMockitoTestRunner.class) +public class NotificationPresenterImplTest { + + @Mock NotificationView viewMock; + @Mock MessageServiceFacade messageServiceFacadeMock; + @Mock NotificationToolbarView toolbarViewMock; + @Mock EventBus eventBusMock; + @Mock NotificationCategory currentCategoryMock; + @Mock ListStore listStoreMock; + @Mock List listMock; + @Mock NotificationMessage notificationMessageMock; + @Mock Iterator iteratorMock; + + @Captor ArgumentCaptor> asyncCallbackStringCaptor; + + private NotificationPresenterImpl uut; + + @Before + public void setUp() { + when(currentCategoryMock.toString()).thenReturn("sample"); + when(viewMock.getCurrentLoadConfig()).thenReturn(mock(FilterPagingLoadConfig.class)); + when(notificationMessageMock.getId()).thenReturn("id"); + + uut = new NotificationPresenterImpl(viewMock); + + uut.currentCategory = currentCategoryMock; + uut.messageServiceFacade = messageServiceFacadeMock; + uut.toolbar = toolbarViewMock; + uut.eventBus = eventBusMock; + } + + @Test + public void testOnNotificationGridRefresh_emptyListStore() { + when(listStoreMock.size()).thenReturn(0); + when(viewMock.getListStore()).thenReturn(listStoreMock); + + uut.onGridRefresh(); + verify(toolbarViewMock).setDeleteAllButtonEnabled(eq(false)); + } + + @Test + public void testOnNotificationGridRefresh_nonEmptyListStore() { + when(listStoreMock.size()).thenReturn(5); + when(viewMock.getListStore()).thenReturn(listStoreMock); + + uut.onGridRefresh(); + verify(toolbarViewMock).setDeleteAllButtonEnabled(eq(true)); + } + + @Test + public void testOnNotificationSelection_emptyListStore() { + when(listMock.size()).thenReturn(0); + + uut.onNotificationSelection(listMock); + verify(toolbarViewMock).setDeleteButtonEnabled(eq(false)); + } + + @Test + public void testOnNotificationSelection_nonEmptyListStore() { + when(listMock.size()).thenReturn(5); + + uut.onNotificationSelection(listMock); + verify(toolbarViewMock).setDeleteButtonEnabled(eq(true)); + } + + + @Test + public void testOnNotificationToolbarDeleteAllClicked() { + uut.onDeleteAllClicked(); + + verify(viewMock).mask(); + verify(messageServiceFacadeMock).deleteAll(eq(currentCategoryMock.toString()), asyncCallbackStringCaptor.capture()); + AsyncCallback asyncCallback = asyncCallbackStringCaptor.getValue(); + + asyncCallback.onSuccess("result"); + verify(viewMock).unmask(); + verify(viewMock).loadNotifications(eq(viewMock.getCurrentLoadConfig())); + verify(eventBusMock).fireEvent(isA(DeleteNotificationsUpdateEvent.class)); + + } + + @Test + public void testOnNotificationToolbarDeleteClicked() { + when(listMock.isEmpty()).thenReturn(false); + when(listMock.size()).thenReturn(1); + when(iteratorMock.hasNext()).thenReturn(true, false); + when(iteratorMock.next()).thenReturn(notificationMessageMock); + when(listMock.iterator()).thenReturn(iteratorMock); + when(viewMock.getSelectedItems()).thenReturn(listMock); + + uut.onDeleteClicked(); + + verify(messageServiceFacadeMock).deleteMessages(isA(JSONObject.class), asyncCallbackStringCaptor.capture()); + + asyncCallbackStringCaptor.getValue().onSuccess("result"); + verify(eventBusMock).fireEvent(isA(DeleteNotificationsUpdateEvent.class)); + + } +} From 60c54c846f061518e5fe8d30c5e2380d5712744e Mon Sep 17 00:00:00 2001 From: Ashley Ramsey Date: Thu, 11 Feb 2016 14:34:00 -0700 Subject: [PATCH 030/183] CORE-4345 Remove duplicated line in DesktopViewTest --- .../client/views/DesktopNotifications_DesktopViewTest.java | 1 - 1 file changed, 1 deletion(-) diff --git a/ui/de-lib/src/test/java/org/iplantc/de/desktop/client/views/DesktopNotifications_DesktopViewTest.java b/ui/de-lib/src/test/java/org/iplantc/de/desktop/client/views/DesktopNotifications_DesktopViewTest.java index 30a5c871d..329ed71aa 100644 --- a/ui/de-lib/src/test/java/org/iplantc/de/desktop/client/views/DesktopNotifications_DesktopViewTest.java +++ b/ui/de-lib/src/test/java/org/iplantc/de/desktop/client/views/DesktopNotifications_DesktopViewTest.java @@ -57,7 +57,6 @@ public void setUp() { @Test public void notificationsNotMarkedSeenWhenNotificationBtnSelectedWithGreaterThan10Unseen() { - uut.setPresenter(mockPresenter); uut.unseenNotificationCount = 11; uut.onNotificationMenuClicked(mock(ShowContextMenuEvent.class)); From 1bbddb54a8d4a95fec4f2f8b6ae6377aeb2b0a90 Mon Sep 17 00:00:00 2001 From: Ashley Ramsey Date: Mon, 15 Feb 2016 17:11:50 -0700 Subject: [PATCH 031/183] CORE-4345 Update notification/delete-all endpoint to accept filter --- .../NotificationAgent/src/notification_agent/delete.clj | 8 +++++--- .../NotificationAgent/src/notification_agent/query.clj | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/services/NotificationAgent/src/notification_agent/delete.clj b/services/NotificationAgent/src/notification_agent/delete.clj index 0fad25b13..f4f7a11c8 100644 --- a/services/NotificationAgent/src/notification_agent/delete.clj +++ b/services/NotificationAgent/src/notification_agent/delete.clj @@ -2,7 +2,8 @@ (:use [notification-agent.common] [slingshot.slingshot :only [throw+]]) (:require [clojure.tools.logging :as log] - [notification-agent.db :as db])) + [notification-agent.db :as db] + [notification-agent.query :as query])) (defn delete-messages "Handles a message deletion request. The request body should consist of @@ -23,9 +24,10 @@ "Handles a request to delete all messages for a specific user that match" [params] (log/debug "handling a notification delete-all request") - (let [user (validate-user (:user params))] + (let [user (validate-user (:user params)) + query {:filter (query/get-filter params)}] (log/debug "deleting notifications for" user) - (db/delete-matching-notifications user params) + (db/delete-matching-notifications user query) {:count (str (db/count-matching-messages user {:seen false}))})) (defn delete-system-messages diff --git a/services/NotificationAgent/src/notification_agent/query.clj b/services/NotificationAgent/src/notification_agent/query.clj index c0ed2ed18..073e9212c 100644 --- a/services/NotificationAgent/src/notification_agent/query.clj +++ b/services/NotificationAgent/src/notification_agent/query.clj @@ -83,7 +83,7 @@ filt (mangle-filter (:filter query-params))] (if (= filt "new") false seen))) -(defn- get-filter +(defn get-filter "Gets the filter from the query parameters." [query-params] (let [filt (mangle-filter (:filter query-params))] From 50395f0f11f1c815d3cd8dd1c641b849a24e3228 Mon Sep 17 00:00:00 2001 From: Paul Sarando Date: Thu, 18 Feb 2016 13:40:57 -0700 Subject: [PATCH 032/183] Replace `app_version` with `de_version`. --- ansible/inventories/group_vars/all | 4 ++-- ansible/roles/util-cfg-service/templates/ui/de.properties.j2 | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ansible/inventories/group_vars/all b/ansible/inventories/group_vars/all index 27bb91baa..b215fcea2 100644 --- a/ansible/inventories/group_vars/all +++ b/ansible/inventories/group_vars/all @@ -127,8 +127,8 @@ de: # ssl_certificate: "/etc/ssl/example.com.crt" # ssl_certificate_key: "/etc/ssl/example.com.key" -app_version_name: Phthalo -app_version: 2.5.0 +de_version_name: Phthalo +de_version: 2.5.0 de_feedback_to_addr: "" de_mail_from_addr: "{{ de_feedback_to_addr }}" diff --git a/ansible/roles/util-cfg-service/templates/ui/de.properties.j2 b/ansible/roles/util-cfg-service/templates/ui/de.properties.j2 index b926ad98a..1580d5835 100644 --- a/ansible/roles/util-cfg-service/templates/ui/de.properties.j2 +++ b/ansible/roles/util-cfg-service/templates/ui/de.properties.j2 @@ -8,8 +8,8 @@ # This values are used since reading the MANIFEST.MF file from a deployed WAR # is a tricky environment. ############################################################################### -org.iplantc.discoveryenvironment.about.defaultBuildNumber = {{ app_version_name }} -org.iplantc.discoveryenvironment.about.releaseVersion = {{ app_version }} +org.iplantc.discoveryenvironment.about.defaultBuildNumber = {{ de_version_name }} +org.iplantc.discoveryenvironment.about.releaseVersion = {{ de_version }} ############################################################################### # CAS Authentication Settings From da0dae593fff6279912a4c0fa4b210d1c2114ada Mon Sep 17 00:00:00 2001 From: Ashley Ramsey Date: Thu, 18 Feb 2016 14:31:42 -0700 Subject: [PATCH 033/183] CORE-4345 Pass NotificationCategory instead of string to Msg service --- .../iplantc/de/client/services/MessageServiceFacade.java | 3 ++- .../de/client/services/impl/MessageServiceFacadeImpl.java | 6 +++--- .../client/presenter/NotificationPresenterImpl.java | 2 +- .../client/presenter/NotificationPresenterImplTest.java | 2 +- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/services/MessageServiceFacade.java b/ui/de-lib/src/main/java/org/iplantc/de/client/services/MessageServiceFacade.java index 29f3661bb..3ed978249 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/client/services/MessageServiceFacade.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/services/MessageServiceFacade.java @@ -3,6 +3,7 @@ import org.iplantc.de.client.models.HasId; import org.iplantc.de.client.models.notifications.Counts; import org.iplantc.de.client.models.notifications.Notification; +import org.iplantc.de.client.models.notifications.NotificationCategory; import org.iplantc.de.client.services.callbacks.NotificationCallback; import com.google.gwt.json.client.JSONObject; @@ -53,7 +54,7 @@ public interface MessageServiceFacade { */ void getMessageCounts(AsyncCallback callback); - void deleteAll(String filter, AsyncCallback callback); + void deleteAll(NotificationCategory category, AsyncCallback callback); void markAllNotificationsSeen(AsyncCallback callback); diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/MessageServiceFacadeImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/MessageServiceFacadeImpl.java index 3ec1fb6d8..b0eece166 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/MessageServiceFacadeImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/MessageServiceFacadeImpl.java @@ -142,11 +142,11 @@ public void getMessageCounts(final AsyncCallback callback) { } @Override - public void deleteAll(String filter, AsyncCallback callback) { + public void deleteAll(NotificationCategory category, AsyncCallback callback) { String address = deProperties.getMuleServiceBaseUrl() + "notifications/delete-all"; //$NON-NLS-1$ - if (!filter.toLowerCase().equals(NotificationCategory.ALL.toString().toLowerCase())){ - address += "?filter=" + URL.encodeQueryString(filter.toLowerCase()); + if (!category.equals(NotificationCategory.ALL)) { + address += "?filter=" + URL.encodeQueryString(category.name().toLowerCase()); } ServiceCallWrapper wrapper = new ServiceCallWrapper(DELETE, address); diff --git a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImpl.java index 2e7620d58..d6ca2b8de 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImpl.java @@ -202,7 +202,7 @@ public void go(HasOneWidget container) { @Override public void onDeleteAllClicked() { view.mask(); - messageServiceFacade.deleteAll(currentCategory.toString(), new AsyncCallback() { + messageServiceFacade.deleteAll(currentCategory, new AsyncCallback() { @Override public void onFailure(Throwable caught) { diff --git a/ui/de-lib/src/test/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImplTest.java b/ui/de-lib/src/test/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImplTest.java index 8c94e2382..9afc5ee34 100644 --- a/ui/de-lib/src/test/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImplTest.java +++ b/ui/de-lib/src/test/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImplTest.java @@ -105,7 +105,7 @@ public void testOnNotificationToolbarDeleteAllClicked() { uut.onDeleteAllClicked(); verify(viewMock).mask(); - verify(messageServiceFacadeMock).deleteAll(eq(currentCategoryMock.toString()), asyncCallbackStringCaptor.capture()); + verify(messageServiceFacadeMock).deleteAll(eq(currentCategoryMock), asyncCallbackStringCaptor.capture()); AsyncCallback asyncCallback = asyncCallbackStringCaptor.getValue(); asyncCallback.onSuccess("result"); From 6b5502f29b0db534e49123d2a43ad6a379b0208f Mon Sep 17 00:00:00 2001 From: Ashley Ramsey Date: Thu, 18 Feb 2016 16:17:04 -0700 Subject: [PATCH 034/183] CORE-4345 Protect against NPE in Msg service --- .../de/client/services/impl/MessageServiceFacadeImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/MessageServiceFacadeImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/MessageServiceFacadeImpl.java index b0eece166..5c08ca990 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/MessageServiceFacadeImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/MessageServiceFacadeImpl.java @@ -145,7 +145,7 @@ public void getMessageCounts(final AsyncCallback callback) { public void deleteAll(NotificationCategory category, AsyncCallback callback) { String address = deProperties.getMuleServiceBaseUrl() + "notifications/delete-all"; //$NON-NLS-1$ - if (!category.equals(NotificationCategory.ALL)) { + if (NotificationCategory.ALL != category) { address += "?filter=" + URL.encodeQueryString(category.name().toLowerCase()); } From 93eaedbad9e79b317cad9caadf8fdbf985f841fa Mon Sep 17 00:00:00 2001 From: Paul Sarando Date: Fri, 19 Feb 2016 13:22:38 -0700 Subject: [PATCH 035/183] CORE-7518 Optimize count-all-items-under-folder ICAT query. The JOIN on the "parent" and "r_coll_main" tables would cause this query to run for over 2 hours for our iRODS admin proxy user, which has permissions for all folders in the data store. --- .../clj-icat-direct/src/clj_icat_direct/queries.clj | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/libs/clj-icat-direct/src/clj_icat_direct/queries.clj b/libs/clj-icat-direct/src/clj_icat_direct/queries.clj index ea2a5ba40..48fb97643 100644 --- a/libs/clj-icat-direct/src/clj_icat_direct/queries.clj +++ b/libs/clj-icat-direct/src/clj_icat_direct/queries.clj @@ -533,17 +533,16 @@ FROM r_data_main WHERE coll_id = ANY(ARRAY( SELECT coll_id FROM parent ))) - SELECT count(*) AS total - FROM ( SELECT DISTINCT d.data_id FROM r_objt_access a + SELECT ((SELECT count(DISTINCT d.data_id) FROM r_objt_access a JOIN data_objs d ON a.object_id = d.data_id WHERE a.user_id IN ( SELECT group_user_id FROM user_groups ) - AND a.object_id IN ( SELECT data_id from data_objs ) - UNION - SELECT DISTINCT c.coll_id FROM r_coll_main c + AND a.object_id IN ( SELECT data_id from data_objs )) + + + (SELECT count(DISTINCT c.coll_id) FROM r_coll_main c JOIN r_objt_access a ON c.coll_id = a.object_id - JOIN parent p ON c.parent_coll_name = p.coll_name WHERE a.user_id IN ( SELECT group_user_id FROM user_groups ) - AND c.coll_type != 'linkPoint' ) AS contents" + AND c.parent_coll_name = ANY(ARRAY( SELECT coll_name FROM parent )) + AND c.coll_type != 'linkPoint')) AS total" :list-folders-in-folder "WITH user_groups AS ( SELECT g.* FROM r_user_main u From a53169ab99eaeb56ab801abab673a2bd99b431c8 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Wed, 17 Feb 2016 15:55:48 -0700 Subject: [PATCH 036/183] Improve count- and list- in folder queries * In CTEs, select only columns which are used in later requests * Remove unnecessary UNION, prefer simple SELECT DISTINCT with an OR condition * Remove unnecessary 'parent' CTEs --- .../src/clj_icat_direct/queries.clj | 36 ++++++++----------- 1 file changed, 14 insertions(+), 22 deletions(-) diff --git a/libs/clj-icat-direct/src/clj_icat_direct/queries.clj b/libs/clj-icat-direct/src/clj_icat_direct/queries.clj index 48fb97643..0d42b1f59 100644 --- a/libs/clj-icat-direct/src/clj_icat_direct/queries.clj +++ b/libs/clj-icat-direct/src/clj_icat_direct/queries.clj @@ -517,21 +517,19 @@ (def queries {:count-all-items-under-folder - "WITH user_groups AS ( SELECT g.* + "WITH user_groups AS ( SELECT g.group_user_id FROM r_user_main u JOIN r_user_group g ON g.user_id = u.user_id WHERE u.user_name = ? AND u.zone_name = ? ), - parent AS ( SELECT * from r_coll_main + parent AS ( SELECT DISTINCT coll_id, coll_name from r_coll_main WHERE coll_name = ? - UNION - SELECT * from r_coll_main - WHERE coll_name LIKE ? || '/%' ), + OR coll_name LIKE ? || '/%' ), - data_objs AS ( SELECT * + data_objs AS ( SELECT data_id FROM r_data_main - WHERE coll_id = ANY(ARRAY( SELECT coll_id FROM parent ))) + WHERE coll_id = ANY(ARRAY( SELECT coll_id FROM parent )) ) SELECT ((SELECT count(DISTINCT d.data_id) FROM r_objt_access a JOIN data_objs d ON a.object_id = d.data_id @@ -545,14 +543,11 @@ AND c.coll_type != 'linkPoint')) AS total" :list-folders-in-folder - "WITH user_groups AS ( SELECT g.* FROM r_user_main u + "WITH user_groups AS ( SELECT g.group_user_id FROM r_user_main u JOIN r_user_group g ON g.user_id = u.user_id WHERE u.user_name = ? AND u.zone_name = ? ), - parent AS ( SELECT * from r_coll_main - WHERE coll_name = ? ) - SELECT DISTINCT c.parent_coll_name as dir_name, c.coll_name as full_path, @@ -565,28 +560,28 @@ MAX(a.access_type_id) as access_type_id FROM r_coll_main c JOIN r_objt_access a ON c.coll_id = a.object_id - JOIN parent p ON c.parent_coll_name = p.coll_name JOIN r_objt_metamap mm ON mm.object_id = c.coll_id JOIN r_meta_main m ON m.meta_id = mm.meta_id WHERE a.user_id IN ( SELECT group_user_id FROM user_groups ) AND c.coll_type != 'linkPoint' + AND c.parent_coll_name = ? AND m.meta_attr_name = 'ipc_UUID' GROUP BY dir_name, full_path, base_name, c.create_ts, c.modify_ts, type, data_size, uuid ORDER BY base_name ASC" :count-files-in-folder - "WITH user_groups AS ( SELECT g.* + "WITH user_groups AS ( SELECT g.group_user_id FROM r_user_main u JOIN r_user_group g ON g.user_id = u.user_id WHERE u.user_name = ? AND u.zone_name = ? ), - parent AS ( SELECT * from r_coll_main + parent AS ( SELECT coll_id from r_coll_main WHERE coll_name = ? ), - data_objs AS ( SELECT * + data_objs AS ( SELECT data_id FROM r_data_main - WHERE coll_id = ANY(ARRAY( SELECT coll_id FROM parent ))) + WHERE coll_id IN ( SELECT coll_id FROM parent )) SELECT count(DISTINCT d.data_id) FROM r_objt_access a JOIN data_objs d ON a.object_id = d.data_id @@ -594,20 +589,17 @@ AND a.object_id IN ( SELECT data_id from data_objs )" :count-folders-in-folder - "WITH user_groups AS ( SELECT g.* + "WITH user_groups AS ( SELECT g.group_user_id FROM r_user_main u JOIN r_user_group g ON g.user_id = u.user_id WHERE u.user_name = ? AND u.zone_name = ? ), - parent AS ( SELECT * from r_coll_main - WHERE coll_name = ? ) - SELECT count(DISTINCT c.coll_id) FROM r_coll_main c JOIN r_objt_access a ON c.coll_id = a.object_id - JOIN parent p ON c.parent_coll_name = p.coll_name WHERE a.user_id IN ( SELECT group_user_id FROM user_groups ) - AND c.coll_type != 'linkPoint'" + AND c.coll_type != 'linkPoint' + AND c.parent_coll_name = ?" :file-permissions "SELECT DISTINCT o.access_type_id, u.user_name From 625102cdb6fd543dd976585d1b6cfd6dce79b7ad Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Wed, 17 Feb 2016 17:12:40 -0700 Subject: [PATCH 037/183] Select only needed columns in file_types CTE. --- libs/clj-icat-direct/src/clj_icat_direct/queries.clj | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/clj-icat-direct/src/clj_icat_direct/queries.clj b/libs/clj-icat-direct/src/clj_icat_direct/queries.clj index 0d42b1f59..a6bf1ee88 100644 --- a/libs/clj-icat-direct/src/clj_icat_direct/queries.clj +++ b/libs/clj-icat-direct/src/clj_icat_direct/queries.clj @@ -712,7 +712,7 @@ AND o.object_id IN (SELECT object_id FROM r_objt_access WHERE user_id in (SELECT group_user_id FROM groups))), - file_types AS (SELECT * + file_types AS (SELECT om.object_id, mm.meta_attr_value FROM r_objt_metamap AS om JOIN r_meta_main AS mm ON mm.meta_id = om.meta_id WHERE om.object_id = ANY(ARRAY(SELECT object_id FROM uuids)) From 32a3551ae175ac5d171e09c5fdb287b5741370e8 Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Mon, 22 Feb 2016 12:21:24 -0700 Subject: [PATCH 038/183] CORE-7512 display requester email and request status history under Update request Dialog in Belphegor. --- .../PermanentIdRequestPresenter.java | 62 ++++++++++++++++--- .../PermanentIdRequestAdminServiceFacade.java | 2 + ...manentIdRequestAdminServiceFacadeImpl.java | 7 +++ .../views/PermanentIdRequestView.java | 15 +++-- .../views/PermanentIdRequestViewImpl.java | 18 +----- .../views/PermanentIdRequestViewImpl.ui.xml | 1 + .../views/UpdatePermanentIdRequest.ui.xml | 16 +++-- .../views/UpdatePermanentIdRequestDialog.java | 42 +++++++------ .../identifiers/PermanentIdRequesDetails.java | 30 +++++++++ .../PermanentIdRequestAutoBeanFactory.java | 2 + .../PermanentIdRequestStatusHistory.java | 25 -------- ...ntIdRequestPresenterDefaultAppearance.java | 11 ++++ ...rmanentIdRequestViewDefaultAppearance.java | 10 --- .../toolbar/ToolbarDisplayMessages.properties | 2 +- .../PermanentIdRequestPresenterTest.java | 4 +- 15 files changed, 157 insertions(+), 90 deletions(-) create mode 100644 ui/de-lib/src/main/java/org/iplantc/de/client/models/identifiers/PermanentIdRequesDetails.java delete mode 100644 ui/de-lib/src/main/java/org/iplantc/de/client/models/identifiers/PermanentIdRequestStatusHistory.java diff --git a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/presenter/PermanentIdRequestPresenter.java b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/presenter/PermanentIdRequestPresenter.java index 4503e6f95..96c0392e9 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/presenter/PermanentIdRequestPresenter.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/presenter/PermanentIdRequestPresenter.java @@ -4,6 +4,8 @@ import org.iplantc.de.admin.desktop.client.permIdRequest.views.PermanentIdRequestView; import org.iplantc.de.admin.desktop.client.permIdRequest.views.PermanentIdRequestView.PermanentIdRequestPresenterAppearance; import org.iplantc.de.admin.desktop.client.permIdRequest.views.PermanentIdRequestView.Presenter; +import org.iplantc.de.admin.desktop.client.permIdRequest.views.UpdatePermanentIdRequestDialog; +import org.iplantc.de.client.models.identifiers.PermanentIdRequesDetails; import org.iplantc.de.client.models.identifiers.PermanentIdRequest; import org.iplantc.de.client.models.identifiers.PermanentIdRequestAutoBeanFactory; import org.iplantc.de.client.models.identifiers.PermanentIdRequestList; @@ -21,6 +23,8 @@ import com.google.web.bindery.autobean.shared.AutoBean; import com.google.web.bindery.autobean.shared.AutoBeanCodex; +import com.sencha.gxt.widget.core.client.event.SelectEvent; + /** * * @@ -87,9 +91,8 @@ public void onFailure(Throwable caught) { @Override public void onSuccess(String result) { view.unmask(); - final AutoBean decode = AutoBeanCodex.decode(factory, - PermanentIdRequestList.class, - result); + final AutoBean decode = + AutoBeanCodex.decode(factory, PermanentIdRequestList.class, result); view.loadRequests(decode.as().getRequests()); } @@ -103,7 +106,7 @@ public void loadPermIdRequests() { } @Override - public void updateRequest(final PermanentIdRequestUpdate update) { + public void doUpdateRequest(final PermanentIdRequestUpdate update) { if (selectedRequest != null && update != null) { view.mask(I18N.DISPLAY.loadingMask()); prsvc.updatePermanentIdRequestStatus(selectedRequest.getId(), @@ -114,7 +117,8 @@ public void updateRequest(final PermanentIdRequestUpdate update) { public void onFailure(Throwable caught) { view.unmask(); IplantAnnouncer.getInstance() - .schedule(new ErrorAnnouncementConfig(appearance.statusUpdateFailure())); + .schedule(new ErrorAnnouncementConfig( + appearance.statusUpdateFailure())); } @@ -122,7 +126,8 @@ public void onFailure(Throwable caught) { public void onSuccess(String result) { view.unmask(); IplantAnnouncer.getInstance() - .schedule(new SuccessAnnouncementConfig(appearance.statusUpdateSuccess())); + .schedule(new SuccessAnnouncementConfig( + appearance.statusUpdateSuccess())); selectedRequest.setStatus(update.getStatus()); view.update(selectedRequest); } @@ -130,6 +135,41 @@ public void onSuccess(String result) { } } + @Override + public void onUpdateRequest() { + getRequestDetails(new AsyncCallback() { + @Override + public void onFailure(Throwable caught) { + view.unmask(); + IplantErrorDialog ied = new IplantErrorDialog(I18N.DISPLAY.error(), caught.getMessage()); + ied.show(); + } + + @Override + public void onSuccess(String result) { + view.unmask(); + final AutoBean decode = + AutoBeanCodex.decode(factory, PermanentIdRequesDetails.class, result); + final UpdatePermanentIdRequestDialog dialog = new UpdatePermanentIdRequestDialog( + selectedRequest.getStatus(), + decode.as(), + factory); + dialog.setHeadingText(appearance.updateStatus()); + dialog.getOkButton().setText(appearance.update()); + dialog.getOkButton().addSelectHandler(new SelectEvent.SelectHandler() { + + @Override + public void onSelect(SelectEvent event) { + final PermanentIdRequestUpdate update = dialog.getPermanentIdRequestUpdate(); + doUpdateRequest(update); + } + }); + + dialog.show(); + } + }); + } + @Override public void createPermanentId() { if (selectedRequest != null) { @@ -152,7 +192,7 @@ public void onSuccess(String result) { IplantAnnouncer.getInstance() .schedule(new SuccessAnnouncementConfig(appearance.createPermIdSucess())); - //refresh page + //refresh page loadPermIdRequests(); } @@ -160,4 +200,12 @@ public void onSuccess(String result) { } } + @Override + public void getRequestDetails(AsyncCallback callback) { + if (selectedRequest != null) { + view.mask(I18N.DISPLAY.loadingMask()); + prsvc.getRequestDetails(selectedRequest.getId(), callback); + } + + } } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/service/PermanentIdRequestAdminServiceFacade.java b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/service/PermanentIdRequestAdminServiceFacade.java index 90d46e402..05c84bb03 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/service/PermanentIdRequestAdminServiceFacade.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/service/PermanentIdRequestAdminServiceFacade.java @@ -23,4 +23,6 @@ void updatePermanentIdRequestStatus(String requestId, void createPermanentId(String id, AsyncCallback asyncCallback); + void getRequestDetails(String id, AsyncCallback asyncCallback); + } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/service/imp/PermanentIdRequestAdminServiceFacadeImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/service/imp/PermanentIdRequestAdminServiceFacadeImpl.java index ccbbaa378..e4491f04b 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/service/imp/PermanentIdRequestAdminServiceFacadeImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/service/imp/PermanentIdRequestAdminServiceFacadeImpl.java @@ -47,4 +47,11 @@ public void createPermanentId(String requestId, AsyncCallback asyncCallb } + @Override + public void getRequestDetails(String id, AsyncCallback asyncCallback) { + String address = PERMID_ADMIN_REQUEST + "/" + id; + final ServiceCallWrapper wrapper = new ServiceCallWrapper(Type.GET, address); + deService.getServiceData(wrapper, asyncCallback); + } + } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestView.java b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestView.java index b2939f448..c36ea7823 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestView.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestView.java @@ -7,6 +7,7 @@ import org.iplantc.de.client.services.DiskResourceServiceFacade; import com.google.gwt.resources.client.ImageResource; +import com.google.gwt.user.client.rpc.AsyncCallback; import com.google.gwt.user.client.ui.HasOneWidget; import com.google.gwt.user.client.ui.IsWidget; @@ -70,10 +71,6 @@ public interface PermanentIdRequestViewAppearance { String commentsLbl(); - String updateStatus(); - - String update(); - String request(); } @@ -89,9 +86,13 @@ public interface Presenter { void setSelectedRequest(PermanentIdRequest request); - void updateRequest(PermanentIdRequestUpdate update); + void doUpdateRequest(PermanentIdRequestUpdate update); + + void onUpdateRequest(); void createPermanentId(); + + void getRequestDetails(AsyncCallback callback); } public interface PermanentIdRequestPresenterAppearance { @@ -106,6 +107,10 @@ public interface PermanentIdRequestPresenterAppearance { String statusUpdateFailure(); String statusUpdateSuccess(); + + String updateStatus(); + + String update(); } } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestViewImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestViewImpl.java index b1154767b..9e637262b 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestViewImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestViewImpl.java @@ -97,23 +97,7 @@ public void onSelectionChanged(SelectionChangedEvent event) @UiHandler("updateBtn") void onUpdateBtnClicked(SelectEvent event) { - final UpdatePermanentIdRequestDialog dialog = new UpdatePermanentIdRequestDialog(grid.getSelectionModel() - .getSelectedItem(), - presenter, - factory); - - dialog.setHeadingText(appearance.updateStatus()); - dialog.getOkButton().setText(appearance.update()); - dialog.getOkButton().addSelectHandler(new SelectHandler() { - - @Override - public void onSelect(SelectEvent event) { - final PermanentIdRequestUpdate update = dialog.getPermanentIdRequestUpdate(); - presenter.updateRequest(update); - } - }); - - dialog.show(); + presenter.onUpdateRequest(); } @UiHandler("metadataBtn") diff --git a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestViewImpl.ui.xml b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestViewImpl.ui.xml index b9d1da324..abf48a4d0 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestViewImpl.ui.xml +++ b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestViewImpl.ui.xml @@ -45,6 +45,7 @@ text="Create Permanent Identifier" icon="{appearance.updateIcon}" enabled="false" /> + diff --git a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/UpdatePermanentIdRequest.ui.xml b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/UpdatePermanentIdRequest.ui.xml index dc30bbcc4..3e96f429c 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/UpdatePermanentIdRequest.ui.xml +++ b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/UpdatePermanentIdRequest.ui.xml @@ -2,7 +2,8 @@ + xmlns:form="urn:import:com.sencha.gxt.widget.core.client.form" + xmlns:widget="urn:import:org.iplantc.de.commons.client.widgets"> - + @@ -18,10 +19,17 @@ + + + + + + + - + @@ -35,7 +43,7 @@ - + diff --git a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/UpdatePermanentIdRequestDialog.java b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/UpdatePermanentIdRequestDialog.java index 8e153c1dd..8268bd8e7 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/UpdatePermanentIdRequestDialog.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/UpdatePermanentIdRequestDialog.java @@ -1,13 +1,16 @@ package org.iplantc.de.admin.desktop.client.permIdRequest.views; -import org.iplantc.de.admin.desktop.client.permIdRequest.views.PermanentIdRequestView.PermanentIdRequestViewAppearance; -import org.iplantc.de.client.models.identifiers.PermanentIdRequest; +import org.iplantc.de.client.models.identifiers.PermanentIdRequesDetails; import org.iplantc.de.client.models.identifiers.PermanentIdRequestAutoBeanFactory; import org.iplantc.de.client.models.identifiers.PermanentIdRequestStatus; import org.iplantc.de.client.models.identifiers.PermanentIdRequestUpdate; import org.iplantc.de.commons.client.views.dialogs.IPlantDialog; +import org.iplantc.de.commons.client.widgets.IPlantAnchor; +import org.iplantc.de.notifications.client.views.dialogs.RequestHistoryDialog; import com.google.gwt.core.client.GWT; +import com.google.gwt.event.dom.client.ClickEvent; +import com.google.gwt.event.dom.client.ClickHandler; import com.google.gwt.uibinder.client.UiBinder; import com.google.gwt.uibinder.client.UiFactory; import com.google.gwt.uibinder.client.UiField; @@ -17,8 +20,6 @@ import com.sencha.gxt.cell.core.client.form.ComboBoxCell.TriggerAction; import com.sencha.gxt.data.shared.LabelProvider; -import com.sencha.gxt.widget.core.client.event.SelectEvent; -import com.sencha.gxt.widget.core.client.event.SelectEvent.SelectHandler; import com.sencha.gxt.widget.core.client.form.SimpleComboBox; import com.sencha.gxt.widget.core.client.form.TextArea; @@ -36,29 +37,32 @@ interface UpdatePermanentIdRequestUiBinder extends UiBinder statusCombo; @UiField TextArea commentsEditor; - private final PermanentIdRequestAutoBeanFactory factory; - @SuppressWarnings("unused") - private final PermanentIdRequest request; - @SuppressWarnings("unused") - private final PermanentIdRequestView.Presenter presenter; - - public UpdatePermanentIdRequestDialog(final PermanentIdRequest request, - final PermanentIdRequestView.Presenter presenter, - final PermanentIdRequestAutoBeanFactory factory) { + private final PermanentIdRequestAutoBeanFactory factory; + public UpdatePermanentIdRequestDialog(String curr_status, + final PermanentIdRequesDetails details, + PermanentIdRequestAutoBeanFactory factory) { this.factory = factory; - this.request = request; - this.presenter = presenter; add(uiBinder.createAndBindUi(this)); - currentStatusLabel.setText(request.getStatus()); - commentsEditor.setHeight(200); - + currentStatusLabel.setText(curr_status); + currentStatusLabel.addClickHandler(new ClickHandler() { + @Override + public void onClick(ClickEvent event) { + RequestHistoryDialog dlg = new RequestHistoryDialog( + details.getRequestor().getUsername() + "-" + details.getType(), + details.getHistory()); + dlg.show(); + } + }); + userEmail.setText(details.getRequestor().getEmail()); } @UiFactory diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/models/identifiers/PermanentIdRequesDetails.java b/ui/de-lib/src/main/java/org/iplantc/de/client/models/identifiers/PermanentIdRequesDetails.java new file mode 100644 index 000000000..3bf4e68d4 --- /dev/null +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/models/identifiers/PermanentIdRequesDetails.java @@ -0,0 +1,30 @@ +package org.iplantc.de.client.models.identifiers; + +import org.iplantc.de.client.models.UserBootstrap; +import org.iplantc.de.client.models.requestStatus.RequestHistory; + +import com.google.web.bindery.autobean.shared.AutoBean; + +import java.util.List; + +/** + * + * + * @author sriram + * + */ +public interface PermanentIdRequesDetails { + + String getId(); + + String getType(); + + String getFolder(); + + @AutoBean.PropertyName("requested_by") + UserBootstrap getRequestor(); + + @AutoBean.PropertyName("history") + List getHistory(); + +} diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/models/identifiers/PermanentIdRequestAutoBeanFactory.java b/ui/de-lib/src/main/java/org/iplantc/de/client/models/identifiers/PermanentIdRequestAutoBeanFactory.java index dd767d85a..6c4dc0a0b 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/client/models/identifiers/PermanentIdRequestAutoBeanFactory.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/models/identifiers/PermanentIdRequestAutoBeanFactory.java @@ -16,4 +16,6 @@ public interface PermanentIdRequestAutoBeanFactory extends AutoBeanFactory { AutoBean getAllRequests(); AutoBean getStatus(); + + AutoBean getDeatils(); } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/models/identifiers/PermanentIdRequestStatusHistory.java b/ui/de-lib/src/main/java/org/iplantc/de/client/models/identifiers/PermanentIdRequestStatusHistory.java deleted file mode 100644 index 3db8c081f..000000000 --- a/ui/de-lib/src/main/java/org/iplantc/de/client/models/identifiers/PermanentIdRequestStatusHistory.java +++ /dev/null @@ -1,25 +0,0 @@ -package org.iplantc.de.client.models.identifiers; - -import com.google.web.bindery.autobean.shared.AutoBean.PropertyName; - -import java.util.Date; - -/** - * - * - * @author sriram - * - */ -public interface PermanentIdRequestStatusHistory { - - String getStatus(); - - @PropertyName("updated_by") - String getUpdatedBy(); - - @PropertyName("status_date") - Date getStatusDate(); - - String getComments(); - -} diff --git a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermanentIdRequestPresenterDefaultAppearance.java b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermanentIdRequestPresenterDefaultAppearance.java index 8d588ede7..63329d0a4 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermanentIdRequestPresenterDefaultAppearance.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermanentIdRequestPresenterDefaultAppearance.java @@ -47,4 +47,15 @@ public String statusUpdateSuccess() { return displayStrings.statusUpdateSuccess(); } + @Override + public String updateStatus() { + return displayStrings.updateStatus(); + } + + @Override + public String update() { + return displayStrings.update(); + } + + } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermanentIdRequestViewDefaultAppearance.java b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermanentIdRequestViewDefaultAppearance.java index 162fa075a..6b9f1de37 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermanentIdRequestViewDefaultAppearance.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermanentIdRequestViewDefaultAppearance.java @@ -123,16 +123,6 @@ public String commentsLbl() { return displayStrings.commentsLbl(); } - @Override - public String updateStatus() { - return displayStrings.updateStatus(); - } - - @Override - public String update() { - return displayStrings.update(); - } - @Override public String request() { return displayStrings.request(); diff --git a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/diskResource/toolbar/ToolbarDisplayMessages.properties b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/diskResource/toolbar/ToolbarDisplayMessages.properties index 43d127cd5..1fd0f2bb6 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/diskResource/toolbar/ToolbarDisplayMessages.properties +++ b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/diskResource/toolbar/ToolbarDisplayMessages.properties @@ -43,5 +43,5 @@ importFromCoge = Import Genome from CoGe... selectMetadataFile = Select Metadata File... applyBulkMetadata = Apply Bulk Metadata requestDOI = Request DOI -doiLinkMsg = Please read DOI Manual and check if you meet the requirements for submitting a DOI Reqest.
            Yes, I have read the manual. +doiLinkMsg = Please read DOI Manual and check if you meet the requirements for submitting a DOI Request.
            Yes, I have read the manual. needDOI = I need DOI diff --git a/ui/de-lib/src/test/java/org/iplantc/de/admin/desktop/client/permIdRequest/presenter/PermanentIdRequestPresenterTest.java b/ui/de-lib/src/test/java/org/iplantc/de/admin/desktop/client/permIdRequest/presenter/PermanentIdRequestPresenterTest.java index 99fe7d1b3..c1c7b444c 100644 --- a/ui/de-lib/src/test/java/org/iplantc/de/admin/desktop/client/permIdRequest/presenter/PermanentIdRequestPresenterTest.java +++ b/ui/de-lib/src/test/java/org/iplantc/de/admin/desktop/client/permIdRequest/presenter/PermanentIdRequestPresenterTest.java @@ -90,7 +90,7 @@ public void testCreatePermanentId_NoRequestSelected() { public void testUpdateRequest() { presenter.setSelectedRequest(mockSelectedRequest); when(mockSelectedRequest.getId()).thenReturn("101010101"); - presenter.updateRequest(mockRequestUpdate); + presenter.doUpdateRequest(mockRequestUpdate); verify(mockPrFacade).updatePermanentIdRequestStatus(mockSelectedRequest.getId(), mockRequestUpdate, stirngCallbackCaptor.capture()); @@ -100,7 +100,7 @@ public void testUpdateRequest() { public void testUpdateRequest_nullUpdate() { presenter.setSelectedRequest(mockSelectedRequest); when(mockSelectedRequest.getId()).thenReturn("101010101"); - presenter.updateRequest(null); + presenter.doUpdateRequest(null); verifyZeroInteractions(mockPrFacade); } From 2aba15bc6db9195250506ec23875f43fb3b1cb2d Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Mon, 22 Feb 2016 13:42:45 -0700 Subject: [PATCH 039/183] CORE-7512 address PR comments. --- .../PermanentIdRequestPresenter.java | 28 +++++------ .../PermanentIdRequestAdminServiceFacade.java | 6 ++- ...manentIdRequestAdminServiceFacadeImpl.java | 46 +++++++++++++++++-- .../views/PermanentIdRequestView.java | 5 +- .../views/UpdatePermanentIdRequest.ui.xml | 2 +- .../views/UpdatePermanentIdRequestDialog.java | 4 +- .../PermanentIdRequestAutoBeanFactory.java | 2 +- ...ls.java => PermanentIdRequestDetails.java} | 2 +- .../PermIdRequestDisplayStrings.java | 2 + .../PermIdRequestDisplayStrings.properties | 1 + ...rmanentIdRequestViewDefaultAppearance.java | 5 ++ 11 files changed, 74 insertions(+), 29 deletions(-) rename ui/de-lib/src/main/java/org/iplantc/de/client/models/identifiers/{PermanentIdRequesDetails.java => PermanentIdRequestDetails.java} (91%) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/presenter/PermanentIdRequestPresenter.java b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/presenter/PermanentIdRequestPresenter.java index 96c0392e9..bbb31033e 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/presenter/PermanentIdRequestPresenter.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/presenter/PermanentIdRequestPresenter.java @@ -5,9 +5,9 @@ import org.iplantc.de.admin.desktop.client.permIdRequest.views.PermanentIdRequestView.PermanentIdRequestPresenterAppearance; import org.iplantc.de.admin.desktop.client.permIdRequest.views.PermanentIdRequestView.Presenter; import org.iplantc.de.admin.desktop.client.permIdRequest.views.UpdatePermanentIdRequestDialog; -import org.iplantc.de.client.models.identifiers.PermanentIdRequesDetails; import org.iplantc.de.client.models.identifiers.PermanentIdRequest; import org.iplantc.de.client.models.identifiers.PermanentIdRequestAutoBeanFactory; +import org.iplantc.de.client.models.identifiers.PermanentIdRequestDetails; import org.iplantc.de.client.models.identifiers.PermanentIdRequestList; import org.iplantc.de.client.models.identifiers.PermanentIdRequestUpdate; import org.iplantc.de.client.services.DiskResourceServiceFacade; @@ -20,8 +20,6 @@ import com.google.gwt.user.client.rpc.AsyncCallback; import com.google.gwt.user.client.ui.HasOneWidget; import com.google.inject.Inject; -import com.google.web.bindery.autobean.shared.AutoBean; -import com.google.web.bindery.autobean.shared.AutoBeanCodex; import com.sencha.gxt.widget.core.client.event.SelectEvent; @@ -79,7 +77,7 @@ public void go(HasOneWidget container) { @Override public void getPermIdRequests() { view.mask(I18N.DISPLAY.loadingMask()); - prsvc.getPermanentIdRequests(new AsyncCallback() { + prsvc.getPermanentIdRequests(new AsyncCallback() { @Override public void onFailure(Throwable caught) { @@ -89,12 +87,9 @@ public void onFailure(Throwable caught) { } @Override - public void onSuccess(String result) { - view.unmask(); - final AutoBean decode = - AutoBeanCodex.decode(factory, PermanentIdRequestList.class, result); - - view.loadRequests(decode.as().getRequests()); + public void onSuccess(PermanentIdRequestList result) { + view.unmask(); + view.loadRequests(result.getRequests()); } }); @@ -137,7 +132,7 @@ public void onSuccess(String result) { @Override public void onUpdateRequest() { - getRequestDetails(new AsyncCallback() { + getRequestDetails(new AsyncCallback() { @Override public void onFailure(Throwable caught) { view.unmask(); @@ -146,13 +141,11 @@ public void onFailure(Throwable caught) { } @Override - public void onSuccess(String result) { + public void onSuccess(PermanentIdRequestDetails result) { view.unmask(); - final AutoBean decode = - AutoBeanCodex.decode(factory, PermanentIdRequesDetails.class, result); final UpdatePermanentIdRequestDialog dialog = new UpdatePermanentIdRequestDialog( selectedRequest.getStatus(), - decode.as(), + result, factory); dialog.setHeadingText(appearance.updateStatus()); dialog.getOkButton().setText(appearance.update()); @@ -182,7 +175,8 @@ public void onFailure(Throwable caught) { loadPermIdRequests(); IplantAnnouncer.getInstance() .schedule(new ErrorAnnouncementConfig(appearance.createPermIdFailure())); - IplantErrorDialog ied = new IplantErrorDialog(I18N.DISPLAY.error(), caught.getMessage()); + IplantErrorDialog ied = + new IplantErrorDialog(I18N.DISPLAY.error(), caught.getMessage()); ied.show(); } @@ -201,7 +195,7 @@ public void onSuccess(String result) { } @Override - public void getRequestDetails(AsyncCallback callback) { + public void getRequestDetails(AsyncCallback callback) { if (selectedRequest != null) { view.mask(I18N.DISPLAY.loadingMask()); prsvc.getRequestDetails(selectedRequest.getId(), callback); diff --git a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/service/PermanentIdRequestAdminServiceFacade.java b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/service/PermanentIdRequestAdminServiceFacade.java index 05c84bb03..d0e203198 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/service/PermanentIdRequestAdminServiceFacade.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/service/PermanentIdRequestAdminServiceFacade.java @@ -1,5 +1,7 @@ package org.iplantc.de.admin.desktop.client.permIdRequest.service; +import org.iplantc.de.client.models.identifiers.PermanentIdRequestDetails; +import org.iplantc.de.client.models.identifiers.PermanentIdRequestList; import org.iplantc.de.client.models.identifiers.PermanentIdRequestUpdate; import com.google.gwt.user.client.rpc.AsyncCallback; @@ -15,7 +17,7 @@ public interface PermanentIdRequestAdminServiceFacade { public final String PERMID_ADMIN_REQUEST = "org.iplantc.services.admin.permIdRequests"; - void getPermanentIdRequests(AsyncCallback callback); + void getPermanentIdRequests(AsyncCallback callback); void updatePermanentIdRequestStatus(String requestId, PermanentIdRequestUpdate status, @@ -23,6 +25,6 @@ void updatePermanentIdRequestStatus(String requestId, void createPermanentId(String id, AsyncCallback asyncCallback); - void getRequestDetails(String id, AsyncCallback asyncCallback); + void getRequestDetails(String id, AsyncCallback asyncCallback); } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/service/imp/PermanentIdRequestAdminServiceFacadeImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/service/imp/PermanentIdRequestAdminServiceFacadeImpl.java index e4491f04b..331d30e5b 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/service/imp/PermanentIdRequestAdminServiceFacadeImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/service/imp/PermanentIdRequestAdminServiceFacadeImpl.java @@ -1,13 +1,18 @@ package org.iplantc.de.admin.desktop.client.permIdRequest.service.imp; import org.iplantc.de.admin.desktop.client.permIdRequest.service.PermanentIdRequestAdminServiceFacade; +import org.iplantc.de.client.models.identifiers.PermanentIdRequestAutoBeanFactory; +import org.iplantc.de.client.models.identifiers.PermanentIdRequestDetails; +import org.iplantc.de.client.models.identifiers.PermanentIdRequestList; import org.iplantc.de.client.models.identifiers.PermanentIdRequestUpdate; +import org.iplantc.de.client.services.converters.AsyncCallbackConverter; import org.iplantc.de.shared.services.BaseServiceCallWrapper.Type; import org.iplantc.de.shared.services.DiscEnvApiService; import org.iplantc.de.shared.services.ServiceCallWrapper; import com.google.gwt.user.client.rpc.AsyncCallback; import com.google.inject.Inject; +import com.google.web.bindery.autobean.shared.AutoBean; import com.google.web.bindery.autobean.shared.AutoBeanCodex; import com.google.web.bindery.autobean.shared.AutoBeanUtils; import com.google.web.bindery.autobean.shared.Splittable; @@ -19,14 +24,47 @@ */ public class PermanentIdRequestAdminServiceFacadeImpl implements PermanentIdRequestAdminServiceFacade { + private class DOIRquestDetailsCallbackConverter + extends AsyncCallbackConverter { + + public DOIRquestDetailsCallbackConverter(AsyncCallback callback) { + super(callback); + } + + @Override + protected PermanentIdRequestDetails convertFrom(String result) { + final AutoBean decode = + AutoBeanCodex.decode(factory, PermanentIdRequestDetails.class, result); + return decode.as(); + } + } + + private class DOIRequestsCallbackConverter extends AsyncCallbackConverter { + public DOIRequestsCallbackConverter(AsyncCallback callback) { + super(callback); + } + + @Override + protected PermanentIdRequestList convertFrom(String result) { + final AutoBean decode = + AutoBeanCodex.decode(factory, PermanentIdRequestList.class, result); + return decode.as(); + } + + + } + @Inject private DiscEnvApiService deService; + @Inject + private PermanentIdRequestAutoBeanFactory factory; + @Override - public void getPermanentIdRequests(AsyncCallback callback) { + public void getPermanentIdRequests(AsyncCallback callback) { String address = PERMID_ADMIN_REQUEST; final ServiceCallWrapper wrapper = new ServiceCallWrapper(Type.GET, address); - deService.getServiceData(wrapper, callback); + deService.getServiceData(wrapper, new DOIRequestsCallbackConverter(callback)); } @Override @@ -48,10 +86,10 @@ public void createPermanentId(String requestId, AsyncCallback asyncCallb } @Override - public void getRequestDetails(String id, AsyncCallback asyncCallback) { + public void getRequestDetails(String id, AsyncCallback asyncCallback) { String address = PERMID_ADMIN_REQUEST + "/" + id; final ServiceCallWrapper wrapper = new ServiceCallWrapper(Type.GET, address); - deService.getServiceData(wrapper, asyncCallback); + deService.getServiceData(wrapper, new DOIRquestDetailsCallbackConverter(asyncCallback)); } } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestView.java b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestView.java index c36ea7823..8a408cbce 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestView.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestView.java @@ -3,6 +3,7 @@ import org.iplantc.de.client.models.IsMaskable; import org.iplantc.de.client.models.diskResources.Folder; import org.iplantc.de.client.models.identifiers.PermanentIdRequest; +import org.iplantc.de.client.models.identifiers.PermanentIdRequestDetails; import org.iplantc.de.client.models.identifiers.PermanentIdRequestUpdate; import org.iplantc.de.client.services.DiskResourceServiceFacade; @@ -72,6 +73,8 @@ public interface PermanentIdRequestViewAppearance { String commentsLbl(); String request(); + + String userEmail(); } public interface Presenter { @@ -92,7 +95,7 @@ public interface Presenter { void createPermanentId(); - void getRequestDetails(AsyncCallback callback); + void getRequestDetails(AsyncCallback callback); } public interface PermanentIdRequestPresenterAppearance { diff --git a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/UpdatePermanentIdRequest.ui.xml b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/UpdatePermanentIdRequest.ui.xml index 3e96f429c..47e05b87e 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/UpdatePermanentIdRequest.ui.xml +++ b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/UpdatePermanentIdRequest.ui.xml @@ -20,7 +20,7 @@ - + diff --git a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/UpdatePermanentIdRequestDialog.java b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/UpdatePermanentIdRequestDialog.java index 8268bd8e7..5d6a42616 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/UpdatePermanentIdRequestDialog.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/UpdatePermanentIdRequestDialog.java @@ -1,6 +1,6 @@ package org.iplantc.de.admin.desktop.client.permIdRequest.views; -import org.iplantc.de.client.models.identifiers.PermanentIdRequesDetails; +import org.iplantc.de.client.models.identifiers.PermanentIdRequestDetails; import org.iplantc.de.client.models.identifiers.PermanentIdRequestAutoBeanFactory; import org.iplantc.de.client.models.identifiers.PermanentIdRequestStatus; import org.iplantc.de.client.models.identifiers.PermanentIdRequestUpdate; @@ -48,7 +48,7 @@ interface UpdatePermanentIdRequestUiBinder extends UiBinder getStatus(); - AutoBean getDeatils(); + AutoBean getDetails(); } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/models/identifiers/PermanentIdRequesDetails.java b/ui/de-lib/src/main/java/org/iplantc/de/client/models/identifiers/PermanentIdRequestDetails.java similarity index 91% rename from ui/de-lib/src/main/java/org/iplantc/de/client/models/identifiers/PermanentIdRequesDetails.java rename to ui/de-lib/src/main/java/org/iplantc/de/client/models/identifiers/PermanentIdRequestDetails.java index 3bf4e68d4..9dac01b22 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/client/models/identifiers/PermanentIdRequesDetails.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/models/identifiers/PermanentIdRequestDetails.java @@ -13,7 +13,7 @@ * @author sriram * */ -public interface PermanentIdRequesDetails { +public interface PermanentIdRequestDetails { String getId(); diff --git a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermIdRequestDisplayStrings.java b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermIdRequestDisplayStrings.java index 6973a6892..f3050b207 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermIdRequestDisplayStrings.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermIdRequestDisplayStrings.java @@ -52,4 +52,6 @@ public interface PermIdRequestDisplayStrings extends Messages { String statusUpdateFailure(); String statusUpdateSuccess(); + + String userEmail(); } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermIdRequestDisplayStrings.properties b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermIdRequestDisplayStrings.properties index c722f4fcc..65176f041 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermIdRequestDisplayStrings.properties +++ b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermIdRequestDisplayStrings.properties @@ -23,3 +23,4 @@ metadataSaveError = Metadata invlid. Please fix the errors and try again! requestLoadFailure = Unable to load permanentId requests! statusUpdateFailure = Unable to update the status of this request! statusUpdateSuccess = Request updated! +userEmail = Email diff --git a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermanentIdRequestViewDefaultAppearance.java b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermanentIdRequestViewDefaultAppearance.java index 6b9f1de37..71888ff97 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermanentIdRequestViewDefaultAppearance.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermanentIdRequestViewDefaultAppearance.java @@ -128,4 +128,9 @@ public String request() { return displayStrings.request(); } + @Override + public String userEmail() { + return displayStrings.userEmail(); + } + } From ab050106416bbc75f6b927cefb47ddfe70239e41 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Mon, 22 Feb 2016 13:51:12 -0700 Subject: [PATCH 040/183] Remove mistakenly-preserved commas. --- libs/clj-icat-direct/src/clj_icat_direct/queries.clj | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libs/clj-icat-direct/src/clj_icat_direct/queries.clj b/libs/clj-icat-direct/src/clj_icat_direct/queries.clj index a6bf1ee88..da5137b94 100644 --- a/libs/clj-icat-direct/src/clj_icat_direct/queries.clj +++ b/libs/clj-icat-direct/src/clj_icat_direct/queries.clj @@ -546,7 +546,7 @@ "WITH user_groups AS ( SELECT g.group_user_id FROM r_user_main u JOIN r_user_group g ON g.user_id = u.user_id WHERE u.user_name = ? - AND u.zone_name = ? ), + AND u.zone_name = ? ) SELECT DISTINCT c.parent_coll_name as dir_name, @@ -593,7 +593,7 @@ FROM r_user_main u JOIN r_user_group g ON g.user_id = u.user_id WHERE u.user_name = ? - AND u.zone_name = ? ), + AND u.zone_name = ? ) SELECT count(DISTINCT c.coll_id) FROM r_coll_main c JOIN r_objt_access a ON c.coll_id = a.object_id From cac46d7af05414b963b83b82091f59ae7b376c27 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Mon, 22 Feb 2016 14:07:59 -0700 Subject: [PATCH 041/183] Reinstate ANY(ARRAY(...)) for consistency. --- libs/clj-icat-direct/src/clj_icat_direct/queries.clj | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/clj-icat-direct/src/clj_icat_direct/queries.clj b/libs/clj-icat-direct/src/clj_icat_direct/queries.clj index da5137b94..11d77950e 100644 --- a/libs/clj-icat-direct/src/clj_icat_direct/queries.clj +++ b/libs/clj-icat-direct/src/clj_icat_direct/queries.clj @@ -581,7 +581,7 @@ data_objs AS ( SELECT data_id FROM r_data_main - WHERE coll_id IN ( SELECT coll_id FROM parent )) + WHERE coll_id = ANY(ARRAY( SELECT coll_id FROM parent )) ) SELECT count(DISTINCT d.data_id) FROM r_objt_access a JOIN data_objs d ON a.object_id = d.data_id From f8f28320b31c62f39fbda5abd1729e532193c5c0 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Mon, 22 Feb 2016 14:34:56 -0700 Subject: [PATCH 042/183] Remove DISTINCT on coll_id -- it's not database-enforced unique but we have bigger problems if it's not actually unique than this count being wrong. --- libs/clj-icat-direct/src/clj_icat_direct/queries.clj | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/clj-icat-direct/src/clj_icat_direct/queries.clj b/libs/clj-icat-direct/src/clj_icat_direct/queries.clj index 11d77950e..6c44c854c 100644 --- a/libs/clj-icat-direct/src/clj_icat_direct/queries.clj +++ b/libs/clj-icat-direct/src/clj_icat_direct/queries.clj @@ -523,7 +523,7 @@ WHERE u.user_name = ? AND u.zone_name = ? ), - parent AS ( SELECT DISTINCT coll_id, coll_name from r_coll_main + parent AS ( SELECT coll_id, coll_name from r_coll_main WHERE coll_name = ? OR coll_name LIKE ? || '/%' ), From a685f1244aa4dd4655eb79d8c0d588889ab2bc9b Mon Sep 17 00:00:00 2001 From: Ashley Ramsey Date: Mon, 22 Feb 2016 13:51:39 -0700 Subject: [PATCH 043/183] CORE-4345 Fix Notification DeleteAll query string category.toString returns, for example, 'Tool Request', which after it gets encoded becomes 'tool+request' which is the expected input for the Notification service's filter parameter. category.name returns 'TOOLREQUEST', which after it gets encoded becomes 'toolrequest' which won't be handled properly by the service. --- .../de/client/services/impl/MessageServiceFacadeImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/MessageServiceFacadeImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/MessageServiceFacadeImpl.java index 5c08ca990..6b3c5e6d2 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/MessageServiceFacadeImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/MessageServiceFacadeImpl.java @@ -146,7 +146,7 @@ public void deleteAll(NotificationCategory category, AsyncCallback callb String address = deProperties.getMuleServiceBaseUrl() + "notifications/delete-all"; //$NON-NLS-1$ if (NotificationCategory.ALL != category) { - address += "?filter=" + URL.encodeQueryString(category.name().toLowerCase()); + address += "?filter=" + URL.encodeQueryString(category.toString().toLowerCase()); } ServiceCallWrapper wrapper = new ServiceCallWrapper(DELETE, address); From 0ba23b3bfbfb5b94872d824e68915510cfc6d8ff Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Tue, 23 Feb 2016 09:52:31 -0700 Subject: [PATCH 044/183] CORE-7522: require at least one path in the Paths schema. --- services/data-info/src/data_info/routes/domain/common.clj | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/services/data-info/src/data_info/routes/domain/common.clj b/services/data-info/src/data_info/routes/domain/common.clj index 8fd49a30a..8c5cd555a 100644 --- a/services/data-info/src/data_info/routes/domain/common.clj +++ b/services/data-info/src/data_info/routes/domain/common.clj @@ -11,18 +11,16 @@ (def DataIdPathParam (describe UUID "The data item's UUID")) (s/defschema Paths - {:paths (describe [NonBlankString] "A list of iRODS paths")}) + {:paths (describe [(s/one NonBlankString "path") NonBlankString] "A list of iRODS paths")}) (s/defschema OptionalPaths - (-> Paths - (->optional-param :paths))) + {(s/optional-key :paths) (describe [NonBlankString] "A list of iRODS paths")}) (s/defschema DataIds {:ids (describe [UUID] "A list of iRODS data-object UUIDs")}) (s/defschema OptionalPathsOrDataIds - (-> (merge DataIds Paths) - (->optional-param :paths) + (-> (merge DataIds OptionalPaths) (->optional-param :ids))) (def ValidInfoTypesEnum (apply s/enum (hm/supported-formats))) From e19025825360816a4fadc274c2cb0805bb324885 Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Tue, 23 Feb 2016 11:12:34 -0700 Subject: [PATCH 045/183] CORE-7512 fix tests. --- .../commons/client/widgets/IPlantAnchor.java | 10 ++++++ .../UpdatePermanentIdRequestDialogTest.java | 35 +++++++++++++------ 2 files changed, 35 insertions(+), 10 deletions(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/commons/client/widgets/IPlantAnchor.java b/ui/de-lib/src/main/java/org/iplantc/de/commons/client/widgets/IPlantAnchor.java index f9e396719..a94d2f39a 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/commons/client/widgets/IPlantAnchor.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/commons/client/widgets/IPlantAnchor.java @@ -25,10 +25,13 @@ public static interface IPlantAnchorAppearance { void onUpdateText(XElement element, String text); void render(SafeHtmlBuilder sb); + } private final IPlantAnchorAppearance appearance; + private String text; + @UiConstructor public IPlantAnchor(String text) { this(text, -1); @@ -44,6 +47,7 @@ public IPlantAnchor(String text, int width) { * @param text text to display */ public IPlantAnchor(String text, int width, IPlantAnchorAppearance appearance) { + this.text = text; this.appearance = appearance; SafeHtmlBuilder sb = new SafeHtmlBuilder(); this.appearance.render(sb); @@ -66,6 +70,12 @@ public HandlerRegistration addClickHandler(ClickHandler handler) { } public void setText(String text) { + this.text = text; appearance.onUpdateText(getElement(), text); } + + public String getText() { + return text; + } + } diff --git a/ui/de-lib/src/test/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/UpdatePermanentIdRequestDialogTest.java b/ui/de-lib/src/test/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/UpdatePermanentIdRequestDialogTest.java index 0bccc2691..885f6fef2 100644 --- a/ui/de-lib/src/test/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/UpdatePermanentIdRequestDialogTest.java +++ b/ui/de-lib/src/test/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/UpdatePermanentIdRequestDialogTest.java @@ -1,23 +1,24 @@ package org.iplantc.de.admin.desktop.client.permIdRequest.views; -import org.iplantc.de.admin.desktop.client.permIdRequest.views.PermanentIdRequestView.PermanentIdRequestViewAppearance; +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import org.iplantc.de.client.models.UserBootstrap; import org.iplantc.de.client.models.identifiers.PermanentIdRequest; import org.iplantc.de.client.models.identifiers.PermanentIdRequestAutoBeanFactory; +import org.iplantc.de.client.models.identifiers.PermanentIdRequestDetails; import org.iplantc.de.client.models.identifiers.PermanentIdRequestStatus; import org.iplantc.de.client.models.identifiers.PermanentIdRequestUpdate; +import org.iplantc.de.commons.client.widgets.IPlantAnchor; import com.google.gwt.user.client.ui.Label; -import com.google.gwtmockito.GwtMockitoTestRunner; import com.google.gwtmockito.GxtMockitoTestRunner; import com.google.web.bindery.autobean.shared.AutoBean; -import com.sencha.gxt.widget.core.client.button.TextButton; import com.sencha.gxt.widget.core.client.form.SimpleComboBox; import com.sencha.gxt.widget.core.client.form.TextArea; -import static org.junit.Assert.assertEquals; -import static org.mockito.Mockito.*; - import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; @@ -27,7 +28,10 @@ public class UpdatePermanentIdRequestDialogTest { @Mock - Label mockCurrentStatusLabel; + IPlantAnchor mockCurrentStatusLabel; + + @Mock + Label mockUserEmail; @Mock SimpleComboBox mockStatusCombo; @@ -50,17 +54,25 @@ public class UpdatePermanentIdRequestDialogTest { @Mock AutoBean mockAutoBeanStatus; + @Mock + PermanentIdRequestDetails mockDetails; + + @Mock + UserBootstrap mockStrap; + private UpdatePermanentIdRequestDialog dialog; @Before public void setUp() { - dialog = new UpdatePermanentIdRequestDialog(mockRequest, - mockPresenter, + when(mockDetails.getRequestor()).thenReturn(mockStrap); + when(mockStrap.getEmail()).thenReturn("foo@bar.com"); + dialog = new UpdatePermanentIdRequestDialog(PermanentIdRequestStatus.Submitted.toString(), + mockDetails, mockPrfactory); dialog.commentsEditor = mockCommentsEditor; dialog.currentStatusLabel = mockCurrentStatusLabel; dialog.statusCombo = mockStatusCombo; - + dialog.userEmail = mockUserEmail; } @@ -69,6 +81,9 @@ public void testGetPermanentIdRequestUpdate() { when(mockStatusCombo.getCurrentValue()).thenReturn(PermanentIdRequestStatus.Approved); when(mockCommentsEditor.getValue()).thenReturn("testing"); when(mockCurrentStatusLabel.getText()).thenReturn(PermanentIdRequestStatus.Submitted.toString()); + when(mockUserEmail.getText()).thenReturn("foo@bar.com"); + + when(mockPrfactory.getStatus()).thenReturn(mockAutoBeanStatus); when(mockAutoBeanStatus.as()).thenReturn(mockStatusUpdate); final PermanentIdRequestUpdate pru = mock(PermanentIdRequestUpdate.class); From 4be9d20c0c2c2c76ab0c7acb18634295cfcad9f8 Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Tue, 23 Feb 2016 15:00:22 -0700 Subject: [PATCH 046/183] CORE-7512 organize imports. From a830a3c6c26d58a55448c9667a84ebcb5c591478 Mon Sep 17 00:00:00 2001 From: Ashley Ramsey Date: Thu, 11 Feb 2016 14:49:33 -0700 Subject: [PATCH 047/183] CORE-6341 Move NotificationMessage model --- .../client/views/widgets/UnseenNotificationsView.java | 4 ++-- .../desktop/client/views/windows/NotificationWindow.java | 9 --------- .../{views => model}/NotificationMessageProperties.java | 5 ++++- 3 files changed, 6 insertions(+), 12 deletions(-) rename ui/de-lib/src/main/java/org/iplantc/de/notifications/client/{views => model}/NotificationMessageProperties.java (79%) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/widgets/UnseenNotificationsView.java b/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/widgets/UnseenNotificationsView.java index 790de4149..98f40c9a3 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/widgets/UnseenNotificationsView.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/widgets/UnseenNotificationsView.java @@ -3,7 +3,7 @@ import org.iplantc.de.desktop.client.DesktopView; import org.iplantc.de.client.models.notifications.NotificationMessage; import org.iplantc.de.commons.client.widgets.IPlantAnchor; -import org.iplantc.de.notifications.client.views.NotificationMessageProperties; +import org.iplantc.de.notifications.client.model.NotificationMessageProperties; import com.google.gwt.cell.client.Cell; import com.google.gwt.core.client.GWT; @@ -165,4 +165,4 @@ void onSeeAllNotificationsSelected(ClickEvent event) { public void onMarkAllSeenClicked(ClickEvent event) { presenter.doMarkAllSeen(true); } -} \ No newline at end of file +} diff --git a/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/windows/NotificationWindow.java b/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/windows/NotificationWindow.java index b26d02b10..180240400 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/windows/NotificationWindow.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/windows/NotificationWindow.java @@ -36,15 +36,6 @@ */ public class NotificationWindow extends IplantWindowBase { - private class NotificationKeyProvider implements ModelKeyProvider { - - @Override - public String getKey(NotificationMessage item) { - return item.getId(); - } - - } - private static CheckBoxSelectionModel checkBoxModel; private final IplantDisplayStrings displayStrings; private NotificationView.Presenter presenter; diff --git a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationMessageProperties.java b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/model/NotificationMessageProperties.java similarity index 79% rename from ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationMessageProperties.java rename to ui/de-lib/src/main/java/org/iplantc/de/notifications/client/model/NotificationMessageProperties.java index 4b034c76e..cfa9a1dc7 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationMessageProperties.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/model/NotificationMessageProperties.java @@ -1,12 +1,13 @@ /** * */ -package org.iplantc.de.notifications.client.views; +package org.iplantc.de.notifications.client.model; import org.iplantc.de.client.models.notifications.NotificationCategory; import org.iplantc.de.client.models.notifications.NotificationMessage; import com.sencha.gxt.core.client.ValueProvider; +import com.sencha.gxt.data.shared.ModelKeyProvider; import com.sencha.gxt.data.shared.PropertyAccess; /** @@ -15,6 +16,8 @@ */ public interface NotificationMessageProperties extends PropertyAccess { + ModelKeyProvider id(); + ValueProvider category(); ValueProvider message(); From 983060a680e9d15bd02b1794dfb5ab2821f14bf2 Mon Sep 17 00:00:00 2001 From: Ashley Ramsey Date: Thu, 11 Feb 2016 14:56:23 -0700 Subject: [PATCH 048/183] CORE-6341 Move UI components out of NotificationWindow --- .../views/windows/NotificationWindow.java | 55 +------------------ .../client/views/NotificationViewImpl.java | 54 ++++++++++++++++++ 2 files changed, 55 insertions(+), 54 deletions(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/windows/NotificationWindow.java b/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/windows/NotificationWindow.java index 180240400..51d4cc3a4 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/windows/NotificationWindow.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/windows/NotificationWindow.java @@ -52,11 +52,7 @@ public class NotificationWindow extends IplantWindowBase { public void show(C windowConfig, String tag, boolean isMaximizable) { NotifyWindowConfig notifyWindowConfig = (NotifyWindowConfig) windowConfig; - NotificationKeyProvider keyProvider = new NotificationKeyProvider(); - ListStore store = new ListStore<>(keyProvider); - ColumnModel cm = buildNotificationColumnModel(); - NotificationView view = new NotificationViewImpl(store, cm, checkBoxModel); - presenter = new NotificationPresenterImpl(view); + presenter.go(this); if (notifyWindowConfig != null) { presenter.filterBy(notifyWindowConfig.getSortCategory()); @@ -70,53 +66,4 @@ public WindowState getWindowState() { return createWindowState(config); } - @SuppressWarnings("unchecked") - private ColumnModel buildNotificationColumnModel() { - NotificationMessageProperties props = GWT.create(NotificationMessageProperties.class); - List> configs = new LinkedList<>(); - - checkBoxModel = new CheckBoxSelectionModel<>(new IdentityValueProvider()); - @SuppressWarnings("rawtypes") - ColumnConfig colCheckBox = checkBoxModel.getColumn(); - configs.add(colCheckBox); - - ColumnConfig colCategory = new ColumnConfig<>(props.category(), 100); - colCategory.setHeader(displayStrings.category()); - configs.add(colCategory); - colCategory.setMenuDisabled(true); - colCategory.setSortable(false); - - ColumnConfig colMessage = new ColumnConfig<>(new IdentityValueProvider(), 420); - colMessage.setHeader(displayStrings.messagesGridHeader()); - colMessage.setCell(new NotificationMessageCell()); - configs.add(colMessage); - colMessage.setSortable(false); - colMessage.setMenuDisabled(true); - - ColumnConfig colTimestamp = new ColumnConfig<>(new ValueProvider() { - - @Override - public Date getValue(NotificationMessage object) { - return new Date(object.getTimestamp()); - } - - @Override - public void setValue(NotificationMessage object, - Date value) { - // do nothing - } - - @Override - public String getPath() { - return "timestamp"; - } - }, 170); - colTimestamp.setCell(new DateCell(DateTimeFormat - .getFormat(DateTimeFormat.PredefinedFormat.DATE_TIME_MEDIUM))); - colTimestamp.setHeader(displayStrings.createdDateGridHeader()); - - configs.add(colTimestamp); - return new ColumnModel<>(configs); - } - } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationViewImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationViewImpl.java index 689adc934..13a2505b5 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationViewImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationViewImpl.java @@ -185,4 +185,58 @@ public void unmask() { public TextButton getRefreshButton() { return toolBar.getRefreshButton(); } + + + @UiFactory + ColumnModel createColumnModel() { + NotificationMessageProperties props = GWT.create(NotificationMessageProperties.class); + List> configs = new LinkedList<>(); + + checkBoxModel = + new CheckBoxSelectionModel<>(new IdentityValueProvider()); + @SuppressWarnings("rawtypes") + ColumnConfig colCheckBox = checkBoxModel.getColumn(); + configs.add(colCheckBox); + + ColumnConfig colCategory = + new ColumnConfig<>(props.category(), + appearance.categoryColumnWidth(), + appearance.category()); + configs.add(colCategory); + colCategory.setMenuDisabled(true); + colCategory.setSortable(false); + + ColumnConfig colMessage = + new ColumnConfig<>(new IdentityValueProvider(), + appearance.messagesColumnWidth(), + appearance.messagesGridHeader()); + colMessage.setCell(new NotificationMessageCell()); + configs.add(colMessage); + colMessage.setSortable(false); + colMessage.setMenuDisabled(true); + + ColumnConfig colTimestamp = new ColumnConfig<>(new ValueProvider() { + + @Override + public Date getValue(NotificationMessage object) { + return new Date(object.getTimestamp()); + } + + @Override + public void setValue(NotificationMessage object, + Date value) { + // do nothing + } + + @Override + public String getPath() { + return "timestamp"; + } + }, appearance.createdDateColumnWidth(), appearance.createdDateGridHeader()); + colTimestamp.setCell(new DateCell(DateTimeFormat + .getFormat(DateTimeFormat.PredefinedFormat.DATE_TIME_MEDIUM))); + + configs.add(colTimestamp); + return new ColumnModel<>(configs); + } } From 65a3a44435dff646ad7c917784a274b7556a8d20 Mon Sep 17 00:00:00 2001 From: Ashley Ramsey Date: Mon, 22 Feb 2016 11:22:06 -0700 Subject: [PATCH 049/183] CORE-6341 Implement Notification's Appearance pattern with dependency injection --- .../views/windows/NotificationWindow.java | 33 ++------- .../presenter/NotificationPresenterImpl.java | 44 ++++++------ .../views/NotificationToolbarViewImpl.java | 15 ++-- .../client/views/NotificationView.java | 26 +++++-- .../client/views/NotificationViewImpl.java | 62 ++++++++-------- .../views/cells/NotificationMessageCell.java | 28 ++++---- .../client/views/cells/RequestStatusCell.java | 36 ++++++---- .../org/iplantc/de/theme/base/Base.gwt.xml | 1 + .../NotificationViewDefaultAppearance.java | 72 +++++++++++++++++++ .../notifications/Notifications.gwt.xml | 15 ++++ ...ificationMessageCellDefaultAppearance.java | 27 +++++++ .../RequestStatusCellDefaultAppearance.java | 25 +++++++ 12 files changed, 268 insertions(+), 116 deletions(-) create mode 100644 ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/notifications/NotificationViewDefaultAppearance.java create mode 100644 ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/notifications/Notifications.gwt.xml create mode 100644 ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/notifications/cells/NotificationMessageCellDefaultAppearance.java create mode 100644 ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/notifications/cells/RequestStatusCellDefaultAppearance.java diff --git a/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/windows/NotificationWindow.java b/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/windows/NotificationWindow.java index 51d4cc3a4..c6a7c0496 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/windows/NotificationWindow.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/windows/NotificationWindow.java @@ -1,49 +1,28 @@ package org.iplantc.de.desktop.client.views.windows; import org.iplantc.de.client.models.WindowState; -import org.iplantc.de.client.models.notifications.NotificationCategory; -import org.iplantc.de.client.models.notifications.NotificationMessage; import org.iplantc.de.commons.client.views.window.configs.ConfigFactory; import org.iplantc.de.commons.client.views.window.configs.NotifyWindowConfig; import org.iplantc.de.commons.client.views.window.configs.WindowConfig; import org.iplantc.de.desktop.shared.DeModule; -import org.iplantc.de.notifications.client.presenter.NotificationPresenterImpl; -import org.iplantc.de.notifications.client.views.NotificationMessageProperties; import org.iplantc.de.notifications.client.views.NotificationView; -import org.iplantc.de.notifications.client.views.NotificationViewImpl; -import org.iplantc.de.notifications.client.views.cells.NotificationMessageCell; -import org.iplantc.de.resources.client.messages.IplantDisplayStrings; -import com.google.gwt.cell.client.DateCell; -import com.google.gwt.core.shared.GWT; -import com.google.gwt.i18n.client.DateTimeFormat; import com.google.inject.Inject; -import com.sencha.gxt.core.client.IdentityValueProvider; -import com.sencha.gxt.core.client.ValueProvider; -import com.sencha.gxt.data.shared.ListStore; -import com.sencha.gxt.data.shared.ModelKeyProvider; -import com.sencha.gxt.widget.core.client.grid.CheckBoxSelectionModel; -import com.sencha.gxt.widget.core.client.grid.ColumnConfig; -import com.sencha.gxt.widget.core.client.grid.ColumnModel; - -import java.util.Date; -import java.util.LinkedList; -import java.util.List; - /** * @author sriram, jstroot */ public class NotificationWindow extends IplantWindowBase { - private static CheckBoxSelectionModel checkBoxModel; - private final IplantDisplayStrings displayStrings; private NotificationView.Presenter presenter; + private NotificationView.NotificationViewAppearance appearance; @Inject - NotificationWindow(final IplantDisplayStrings displayStrings) { - this.displayStrings = displayStrings; - setHeadingText(displayStrings.notifications()); + NotificationWindow(NotificationView.Presenter presenter, + NotificationView.NotificationViewAppearance appearance) { + this.presenter = presenter; + this.appearance = appearance; + setHeadingText(appearance.notifications()); ensureDebugId(DeModule.WindowIds.NOTIFICATION); setSize("600", "375"); } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImpl.java index d6ca2b8de..17b12ca35 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImpl.java @@ -1,7 +1,6 @@ package org.iplantc.de.notifications.client.presenter; import org.iplantc.de.client.events.EventBus; -import org.iplantc.de.client.gin.ServicesInjector; import org.iplantc.de.client.models.HasId; import org.iplantc.de.client.models.notifications.Notification; import org.iplantc.de.client.models.notifications.NotificationCategory; @@ -12,12 +11,10 @@ import org.iplantc.de.commons.client.ErrorHandler; import org.iplantc.de.notifications.client.events.DeleteNotificationsUpdateEvent; import org.iplantc.de.notifications.client.events.NotificationCountUpdateEvent; +import org.iplantc.de.notifications.client.gin.factory.NotificationViewFactory; +import org.iplantc.de.notifications.client.model.NotificationMessageProperties; import org.iplantc.de.notifications.client.views.NotificationToolbarView; -import org.iplantc.de.notifications.client.views.NotificationToolbarViewImpl; import org.iplantc.de.notifications.client.views.NotificationView; -import org.iplantc.de.resources.client.messages.I18N; -import org.iplantc.de.resources.client.messages.IplantDisplayStrings; -import org.iplantc.de.resources.client.messages.IplantErrorStrings; import com.google.common.base.Strings; import com.google.common.collect.Lists; @@ -27,10 +24,12 @@ import com.google.gwt.user.client.Command; import com.google.gwt.user.client.rpc.AsyncCallback; import com.google.gwt.user.client.ui.HasOneWidget; +import com.google.inject.Inject; import com.google.web.bindery.autobean.shared.Splittable; import com.google.web.bindery.autobean.shared.impl.StringQuoter; import com.sencha.gxt.data.client.loader.RpcProxy; +import com.sencha.gxt.data.shared.ListStore; import com.sencha.gxt.data.shared.SortDir; import com.sencha.gxt.data.shared.SortInfo; import com.sencha.gxt.data.shared.SortInfoBean; @@ -110,33 +109,36 @@ public void onSuccess(String result1) { }); } } - - private final IplantDisplayStrings displayStrings; - - private final IplantErrorStrings errorStrings; - EventBus eventBus; MessageServiceFacade messageServiceFacade; - NotificationToolbarView toolbar; + private final ListStore listStore; + private final NotificationToolbarView toolbar; private final NotificationView view; private PagingLoadResult callbackResult; + private NotificationView.NotificationViewAppearance appearance; NotificationCategory currentCategory; private final JsonUtil jsonUtil; - public NotificationPresenterImpl(final NotificationView view) { - this.view = view; - this.errorStrings = I18N.ERROR; - this.displayStrings = I18N.DISPLAY; - this.messageServiceFacade = ServicesInjector.INSTANCE.getMessageServiceFacade(); - this.eventBus = EventBus.getInstance(); + @Inject + public NotificationPresenterImpl(final NotificationViewFactory viewFactory, + NotificationView.NotificationViewAppearance appearance, + NotificationToolbarView toolbar, + NotificationMessageProperties messageProperties) { + this.appearance = appearance; + this.listStore = createListStore(messageProperties); + this.view = viewFactory.create(listStore); currentCategory = NotificationCategory.ALL; - toolbar = new NotificationToolbarViewImpl(); this.jsonUtil = JsonUtil.getInstance(); toolbar.setPresenter(this); view.setNorthWidget(toolbar); this.view.setPresenter(this); setRefreshButton(view.getRefreshButton()); // set default cat + + ListStore createListStore(NotificationMessageProperties messageProperties) { + return new ListStore<>(messageProperties.id()); + } + } @Override @@ -243,7 +245,7 @@ public void execute() { messageServiceFacade.deleteMessages(obj, new AsyncCallback() { @Override public void onFailure(Throwable caught) { - ErrorHandler.post(errorStrings.notificationDeletFail(), caught); + ErrorHandler.post(appearance.notificationDeleteFail(), caught); } @Override @@ -274,7 +276,6 @@ public void onNotificationSelection(List items) { @Override public void setRefreshButton(TextButton refreshBtn) { if (refreshBtn != null) { - refreshBtn.setText(displayStrings.refresh()); toolbar.setRefreshButton(refreshBtn); } } @@ -308,7 +309,8 @@ public void load(final FilterPagingLoadConfig loadConfig, final PagingLoader> loader = new PagingLoader<>(proxy); loader.setRemoteSort(true); - loader.addLoadHandler(new LoadResultListStoreBinding>(view.getListStore())); + loader.addLoadHandler(new LoadResultListStoreBinding>( + listStore)); loader.useLoadConfig(buildDefaultLoadConfig()); return loader; } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationToolbarViewImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationToolbarViewImpl.java index 915e6b0aa..8b9697286 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationToolbarViewImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationToolbarViewImpl.java @@ -12,7 +12,9 @@ import com.google.gwt.uibinder.client.UiField; import com.google.gwt.uibinder.client.UiHandler; import com.google.gwt.uibinder.client.UiTemplate; +import com.google.gwt.user.client.ui.Composite; import com.google.gwt.user.client.ui.Widget; +import com.google.inject.Inject; import com.sencha.gxt.data.shared.StringLabelProvider; import com.sencha.gxt.widget.core.client.button.TextButton; @@ -24,7 +26,7 @@ * @author sriram * */ -public class NotificationToolbarViewImpl implements NotificationToolbarView { +public class NotificationToolbarViewImpl extends Composite implements NotificationToolbarView { private static NotificationToolbarUiBinder uiBinder = GWT.create(NotificationToolbarUiBinder.class); @@ -32,7 +34,6 @@ public class NotificationToolbarViewImpl implements NotificationToolbarView { interface NotificationToolbarUiBinder extends UiBinder { } - private final Widget widget; private Presenter presenter; @UiField @@ -47,9 +48,12 @@ interface NotificationToolbarUiBinder extends UiBinder cboFilter = new SimpleComboBox( new StringLabelProvider()); + private NotificationView.NotificationViewAppearance appearance; - public NotificationToolbarViewImpl() { - widget = uiBinder.createAndBindUi(this); + @Inject + public NotificationToolbarViewImpl(NotificationView.NotificationViewAppearance appearance) { + this.appearance = appearance; + initWidget(uiBinder.createAndBindUi(this)); initFilters(); } @@ -75,7 +79,7 @@ public void onSelection(SelectionEvent event) { @Override public Widget asWidget() { - return widget; + return this; } @Override @@ -107,6 +111,7 @@ public void setPresenter(Presenter p) { @Override public void setRefreshButton(TextButton refreshBtn) { + refreshBtn.setText(appearance.refresh()); menuToolBar.insert(refreshBtn, 1); } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationView.java b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationView.java index 0836e6893..76d5cb157 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationView.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationView.java @@ -5,7 +5,6 @@ import com.google.gwt.user.client.ui.IsWidget; -import com.sencha.gxt.data.shared.ListStore; import com.sencha.gxt.data.shared.loader.FilterPagingLoadConfig; import com.sencha.gxt.data.shared.loader.PagingLoadResult; import com.sencha.gxt.data.shared.loader.PagingLoader; @@ -13,7 +12,28 @@ import java.util.List; -public interface NotificationView extends IsWidget { + interface NotificationViewAppearance { + + String notifications(); + + String refresh(); + + String notificationDeleteFail(); + + String category(); + + int categoryColumnWidth(); + + String messagesGridHeader(); + + int messagesColumnWidth(); + + String createdDateGridHeader(); + + int createdDateColumnWidth(); + + } + public interface Presenter extends org.iplantc.de.commons.client.presenter.Presenter { /** * Filters the list of notifications by a given Category. @@ -58,8 +78,6 @@ public interface Presenter extends org.iplantc.de.commons.client.presenter.Prese public void setPresenter(final Presenter presenter); - public ListStore getListStore(); - /** * loads notifications using given laod conig * diff --git a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationViewImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationViewImpl.java index 13a2505b5..17768bd7a 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationViewImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationViewImpl.java @@ -3,33 +3,46 @@ */ package org.iplantc.de.notifications.client.views; -import org.iplantc.de.commons.client.widgets.DEPagingToolbar; +import org.iplantc.de.client.models.notifications.NotificationCategory; import org.iplantc.de.client.models.notifications.NotificationMessage; +import org.iplantc.de.commons.client.widgets.DEPagingToolbar; +import org.iplantc.de.notifications.client.model.NotificationMessageProperties; +import org.iplantc.de.notifications.client.views.cells.NotificationMessageCell; import org.iplantc.de.resources.client.messages.I18N; +import com.google.gwt.cell.client.DateCell; import com.google.gwt.core.client.GWT; +import com.google.gwt.i18n.client.DateTimeFormat; import com.google.gwt.uibinder.client.UiBinder; +import com.google.gwt.uibinder.client.UiFactory; import com.google.gwt.uibinder.client.UiField; -import com.google.gwt.uibinder.client.UiTemplate; import com.google.gwt.user.client.ui.IsWidget; import com.google.gwt.user.client.ui.Widget; +import com.google.inject.Inject; +import com.google.inject.assistedinject.Assisted; +import com.sencha.gxt.core.client.IdentityValueProvider; import com.sencha.gxt.core.client.Style.SelectionMode; +import com.sencha.gxt.core.client.ValueProvider; import com.sencha.gxt.data.shared.ListStore; import com.sencha.gxt.data.shared.loader.FilterPagingLoadConfig; import com.sencha.gxt.data.shared.loader.PagingLoadResult; import com.sencha.gxt.data.shared.loader.PagingLoader; +import com.sencha.gxt.widget.core.client.Composite; import com.sencha.gxt.widget.core.client.FramedPanel; import com.sencha.gxt.widget.core.client.button.TextButton; import com.sencha.gxt.widget.core.client.container.BorderLayoutContainer; import com.sencha.gxt.widget.core.client.container.BorderLayoutContainer.BorderLayoutData; import com.sencha.gxt.widget.core.client.event.RefreshEvent; +import com.sencha.gxt.widget.core.client.grid.CheckBoxSelectionModel; +import com.sencha.gxt.widget.core.client.grid.ColumnConfig; import com.sencha.gxt.widget.core.client.grid.ColumnModel; import com.sencha.gxt.widget.core.client.grid.Grid; -import com.sencha.gxt.widget.core.client.grid.GridSelectionModel; import com.sencha.gxt.widget.core.client.selection.SelectionChangedEvent; import com.sencha.gxt.widget.core.client.selection.SelectionChangedEvent.SelectionChangedHandler; +import java.util.Date; +import java.util.LinkedList; import java.util.List; /** @@ -39,7 +52,7 @@ * @author sriram * */ -public class NotificationViewImpl implements NotificationView { +public class NotificationViewImpl extends Composite implements NotificationView { private static MyUiBinder uiBinder = GWT.create(MyUiBinder.class); @@ -47,36 +60,25 @@ public class NotificationViewImpl implements NotificationView { interface MyUiBinder extends UiBinder { } - @UiField(provided = true) - final ListStore listStore; - @UiField(provided = true) - final ColumnModel cm; - - @UiField - Grid grid; - - @UiField - FramedPanel mainPanel; - - @UiField - BorderLayoutContainer con; - - @UiField - DEPagingToolbar toolBar; - @UiField - BorderLayoutData northData; + @UiField(provided = true) final ListStore listStore; + @UiField Grid grid; + @UiField FramedPanel mainPanel; + @UiField BorderLayoutContainer con; + @UiField DEPagingToolbar toolBar; + @UiField BorderLayoutData northData; - private final Widget widget; - private Presenter presenter; + CheckBoxSelectionModel checkBoxModel; + private NotificationViewAppearance appearance; - public NotificationViewImpl(ListStore listStore, - ColumnModel cm, GridSelectionModel sm) { - this.cm = cm; + @Inject + public NotificationViewImpl(@Assisted ListStore listStore, + NotificationViewAppearance appearance) { this.listStore = listStore; - this.widget = uiBinder.createAndBindUi(this); + this.appearance = appearance; + initWidget(uiBinder.createAndBindUi(this)); toolBar.getElement().getStyle().setProperty("borderBottom", "none"); - grid.setSelectionModel(sm); + grid.setSelectionModel(checkBoxModel); grid.getSelectionModel().setSelectionMode(SelectionMode.MULTI); addGridSelectionHandler(); addGridRefreshHandler(); @@ -109,7 +111,7 @@ public void onSelectionChanged(SelectionChangedEvent event) */ @Override public Widget asWidget() { - return widget; + return this; } /* diff --git a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/cells/NotificationMessageCell.java b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/cells/NotificationMessageCell.java index 1a0844ff5..c1acb1b7b 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/cells/NotificationMessageCell.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/cells/NotificationMessageCell.java @@ -24,6 +24,7 @@ import com.google.common.collect.Lists; import com.google.gwt.cell.client.AbstractCell; +import com.google.gwt.cell.client.Cell; import com.google.gwt.cell.client.ValueUpdater; import com.google.gwt.core.client.GWT; import com.google.gwt.dom.client.Element; @@ -32,8 +33,6 @@ import com.google.web.bindery.autobean.shared.AutoBean; import com.google.web.bindery.autobean.shared.AutoBeanCodex; -import com.sencha.gxt.core.client.util.Format; - import java.util.ArrayList; import java.util.List; @@ -46,26 +45,22 @@ */ public class NotificationMessageCell extends AbstractCell { + public interface NotificationMessageCellAppearance { + void render(Cell.Context context, NotificationMessage value, SafeHtmlBuilder sb); + } + + private final NotificationMessageCellAppearance appearance = + GWT.create(NotificationMessageCellAppearance.class); + private final DiskResourceAutoBeanFactory drFactory = GWT.create(DiskResourceAutoBeanFactory.class); private final AnalysesAutoBeanFactory analysesFactory = GWT.create(AnalysesAutoBeanFactory.class); private final NotificationAutoBeanFactory notificationFactory = GWT.create(NotificationAutoBeanFactory.class); private final DiskResourceUtil diskResourceUtil = DiskResourceUtil.getInstance(); + public NotificationMessageCell() { super("click"); //$NON-NLS-1$ } - @Override - public void render(Context context, NotificationMessage value, SafeHtmlBuilder sb) { - String style = "white-space:pre-wrap;text-overflow:ellipsis;overflow:hidden;"; //$NON-NLS-1$ - - if (value.getContext() != null) { - style += "cursor:pointer; text-decoration:underline;"; //$NON-NLS-1$ - } - - sb.appendHtmlConstant(Format.substitute("
{1}
", style, //$NON-NLS-1$ - value.getMessage())); - } - @Override public void onBrowserEvent(Context context, Element parent, NotificationMessage value, NativeEvent event, ValueUpdater valueUpdater) { @@ -153,4 +148,9 @@ public void onBrowserEvent(Context context, Element parent, NotificationMessage } } + @Override + public void render(Context context, NotificationMessage value, SafeHtmlBuilder sb) { + appearance.render(context, value, sb); + } + } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/cells/RequestStatusCell.java b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/cells/RequestStatusCell.java index 1976f36b9..b414412b9 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/cells/RequestStatusCell.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/cells/RequestStatusCell.java @@ -1,13 +1,11 @@ package org.iplantc.de.notifications.client.views.cells; -import org.iplantc.de.client.models.tool.Tool; import org.iplantc.de.client.models.toolRequest.ToolRequestStatus; import com.google.gwt.cell.client.AbstractCell; +import com.google.gwt.core.client.GWT; import com.google.gwt.safehtml.shared.SafeHtmlBuilder; -import com.sencha.gxt.core.client.util.Format; - /** * A Cell for displaying a ToolRequestStatus in a grid with its associated help text as a QuickTip. * @@ -16,24 +14,32 @@ */ public class RequestStatusCell extends AbstractCell { + public interface RequestStatusCellAppearance { + void render(Context context, String helpText, String value, SafeHtmlBuilder sb); + } + + private RequestStatusCellAppearance appearance = GWT.create(RequestStatusCellAppearance.class); + @Override public void render(Context context, String value, SafeHtmlBuilder sb) { - String qtip = ""; //$NON-NLS-1$ + + String helpText; if (value != null && (value.equalsIgnoreCase(ToolRequestStatus.Completion.toString()) || value.equalsIgnoreCase(ToolRequestStatus.Evaluation.toString()) - || value.equalsIgnoreCase(ToolRequestStatus.Failed.toString()) || value.equalsIgnoreCase(ToolRequestStatus.Installation.toString()) - || value.equalsIgnoreCase(ToolRequestStatus.Pending.toString()) || value.equalsIgnoreCase(ToolRequestStatus.Submitted.toString()) - || value.equalsIgnoreCase(ToolRequestStatus.Validation.toString())) ) { - qtip = Format.substitute("qtip=\"{0}\"", ToolRequestStatus.valueOf(value).getHelpText()); //$NON-NLS-1$ - sb.appendHtmlConstant(Format.substitute("
{1}
", qtip, value)); + || value.equalsIgnoreCase(ToolRequestStatus.Failed.toString()) + || value.equalsIgnoreCase(ToolRequestStatus.Installation.toString()) + || value.equalsIgnoreCase(ToolRequestStatus.Pending.toString()) + || value.equalsIgnoreCase(ToolRequestStatus.Submitted.toString()) || value + .equalsIgnoreCase(ToolRequestStatus.Validation.toString()))) { + helpText = ToolRequestStatus.valueOf(value).getHelpText(); + } else if (value != null) { - qtip = Format.substitute("qtip=\"{0}\"", ToolRequestStatus.valueOf(ToolRequestStatus.Other.toString()).getHelpText()); //$NON-NLS-1$ - sb.appendHtmlConstant(Format.substitute("
{1}
", qtip, value)); - } else{ - sb.appendHtmlConstant(Format.substitute("
{0}
", value)); + helpText = ToolRequestStatus.valueOf(ToolRequestStatus.Other.toString()) + .getHelpText(); + } else { + helpText = null; } - - //$NON-NLS-1$ + appearance.render(context, helpText, value, sb); } } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/Base.gwt.xml b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/Base.gwt.xml index 306ab0cd8..9acdf71d5 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/Base.gwt.xml +++ b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/Base.gwt.xml @@ -7,6 +7,7 @@ + diff --git a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/notifications/NotificationViewDefaultAppearance.java b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/notifications/NotificationViewDefaultAppearance.java new file mode 100644 index 000000000..7273a39fa --- /dev/null +++ b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/notifications/NotificationViewDefaultAppearance.java @@ -0,0 +1,72 @@ +package org.iplantc.de.theme.base.client.notifications; + +import org.iplantc.de.notifications.client.views.NotificationView; +import org.iplantc.de.resources.client.messages.IplantDisplayStrings; +import org.iplantc.de.resources.client.messages.IplantErrorStrings; + +import com.google.gwt.core.client.GWT; + +/** + * @author aramsey + */ +public class NotificationViewDefaultAppearance implements NotificationView.NotificationViewAppearance { + + private IplantDisplayStrings iplantDisplayStrings; + private IplantErrorStrings iplantErrorStrings; + + public NotificationViewDefaultAppearance() { + this(GWT.create(IplantDisplayStrings.class), + (GWT.create(IplantErrorStrings.class))); + } + + public NotificationViewDefaultAppearance(IplantDisplayStrings iplantDisplayStrings, + IplantErrorStrings iplantErrorStrings) { + this.iplantDisplayStrings = iplantDisplayStrings; + this.iplantErrorStrings = iplantErrorStrings; + } + + @Override + public String notifications() { + return iplantDisplayStrings.notifications(); + } + + @Override + public String refresh() { + return iplantDisplayStrings.refresh(); + } + + @Override + public String notificationDeleteFail() { + return iplantErrorStrings.notificationDeletFail(); + } + + @Override + public String category() { + return iplantDisplayStrings.category(); + } + + @Override + public int categoryColumnWidth() { + return 100; + } + + @Override + public String messagesGridHeader() { + return iplantDisplayStrings.messagesGridHeader(); + } + + @Override + public int messagesColumnWidth() { + return 420; + } + + @Override + public String createdDateGridHeader() { + return iplantDisplayStrings.createdDateGridHeader(); + } + + @Override + public int createdDateColumnWidth() { + return 170; + } +} diff --git a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/notifications/Notifications.gwt.xml b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/notifications/Notifications.gwt.xml new file mode 100644 index 000000000..c95926c00 --- /dev/null +++ b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/notifications/Notifications.gwt.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/notifications/cells/NotificationMessageCellDefaultAppearance.java b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/notifications/cells/NotificationMessageCellDefaultAppearance.java new file mode 100644 index 000000000..6f5915a67 --- /dev/null +++ b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/notifications/cells/NotificationMessageCellDefaultAppearance.java @@ -0,0 +1,27 @@ +package org.iplantc.de.theme.base.client.notifications.cells; + +import org.iplantc.de.client.models.notifications.NotificationMessage; +import org.iplantc.de.notifications.client.views.cells.NotificationMessageCell; + +import com.google.gwt.cell.client.Cell; +import com.google.gwt.safehtml.shared.SafeHtmlBuilder; + +import com.sencha.gxt.core.client.util.Format; + +/** + * @author aramsey + */ +public class NotificationMessageCellDefaultAppearance implements NotificationMessageCell.NotificationMessageCellAppearance { + + @Override + public void render(Cell.Context context, NotificationMessage value, SafeHtmlBuilder sb) { + String style = "white-space:pre-wrap;text-overflow:ellipsis;overflow:hidden;"; //$NON-NLS-1$ + + if (value.getContext() != null) { + style += "cursor:pointer; text-decoration:underline;"; //$NON-NLS-1$ + } + + sb.appendHtmlConstant(Format.substitute("
{1}
", style, //$NON-NLS-1$ + value.getMessage())); + } +} diff --git a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/notifications/cells/RequestStatusCellDefaultAppearance.java b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/notifications/cells/RequestStatusCellDefaultAppearance.java new file mode 100644 index 000000000..677a8789a --- /dev/null +++ b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/notifications/cells/RequestStatusCellDefaultAppearance.java @@ -0,0 +1,25 @@ +package org.iplantc.de.theme.base.client.notifications.cells; + +import org.iplantc.de.notifications.client.views.cells.RequestStatusCell; + +import com.google.gwt.cell.client.Cell; +import com.google.gwt.safehtml.shared.SafeHtmlBuilder; + +import com.sencha.gxt.core.client.util.Format; + +/** + * @author aramsey + */ +public class RequestStatusCellDefaultAppearance implements RequestStatusCell.RequestStatusCellAppearance { + @Override + public void render(Cell.Context context, String helpText, String value, SafeHtmlBuilder sb) { + String qtip = ""; + + if (helpText != null) { + qtip = Format.substitute("qtip=\"{0}\"", helpText); + sb.appendHtmlConstant(Format.substitute("
{1}
", qtip, value)); + } else{ + sb.appendHtmlConstant(Format.substitute("
{0}
", value)); + } + } +} From e95b663582953f03ebd299e0f24e109eee95e88e Mon Sep 17 00:00:00 2001 From: Ashley Ramsey Date: Mon, 22 Feb 2016 11:07:26 -0700 Subject: [PATCH 050/183] CORE-6341 Minor file rename --- .../de/notifications/client/views/NotificationViewImpl.java | 6 ++---- ...{NotificationView.ui.xml => NotificationViewImpl.ui.xml} | 0 2 files changed, 2 insertions(+), 4 deletions(-) rename ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/{NotificationView.ui.xml => NotificationViewImpl.ui.xml} (100%) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationViewImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationViewImpl.java index 17768bd7a..1d8897b2b 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationViewImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationViewImpl.java @@ -54,12 +54,10 @@ */ public class NotificationViewImpl extends Composite implements NotificationView { - private static MyUiBinder uiBinder = GWT.create(MyUiBinder.class); - - @UiTemplate("NotificationView.ui.xml") - interface MyUiBinder extends UiBinder { + interface NotificationViewImplUiBinder extends UiBinder { } + private static NotificationViewImplUiBinder uiBinder = GWT.create(NotificationViewImplUiBinder.class); @UiField(provided = true) final ListStore listStore; @UiField Grid grid; diff --git a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationView.ui.xml b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationViewImpl.ui.xml similarity index 100% rename from ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationView.ui.xml rename to ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationViewImpl.ui.xml From 6be180a4c267f84ec99f4e59360ab29191c28f05 Mon Sep 17 00:00:00 2001 From: Ashley Ramsey Date: Mon, 22 Feb 2016 12:24:15 -0700 Subject: [PATCH 051/183] CORE-6341 Notification Gin and dependency injection --- .../client/gin/NotificationGinModule.java | 24 +++++++++++++++++++ .../gin/factory/NotificationViewFactory.java | 14 +++++++++++ .../presenter/NotificationPresenterImpl.java | 7 +++--- .../org/iplantc/de/client/gin/DEInjector.java | 4 +++- 4 files changed, 44 insertions(+), 5 deletions(-) create mode 100644 ui/de-lib/src/main/java/org/iplantc/de/notifications/client/gin/NotificationGinModule.java create mode 100644 ui/de-lib/src/main/java/org/iplantc/de/notifications/client/gin/factory/NotificationViewFactory.java diff --git a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/gin/NotificationGinModule.java b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/gin/NotificationGinModule.java new file mode 100644 index 000000000..8cf2af420 --- /dev/null +++ b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/gin/NotificationGinModule.java @@ -0,0 +1,24 @@ +package org.iplantc.de.notifications.client.gin; + +import org.iplantc.de.notifications.client.gin.factory.NotificationViewFactory; +import org.iplantc.de.notifications.client.presenter.NotificationPresenterImpl; +import org.iplantc.de.notifications.client.views.NotificationToolbarView; +import org.iplantc.de.notifications.client.views.NotificationToolbarViewImpl; +import org.iplantc.de.notifications.client.views.NotificationView; +import org.iplantc.de.notifications.client.views.NotificationViewImpl; + +import com.google.gwt.inject.client.AbstractGinModule; +import com.google.gwt.inject.client.assistedinject.GinFactoryModuleBuilder; + +/** + * @author aramsey + */ +public class NotificationGinModule extends AbstractGinModule { + @Override + protected void configure() { + install(new GinFactoryModuleBuilder().implement(NotificationView.class, NotificationViewImpl.class).build( + NotificationViewFactory.class)); + bind(NotificationToolbarView.class).to(NotificationToolbarViewImpl.class); + bind(NotificationView.Presenter.class).to(NotificationPresenterImpl.class); + } +} diff --git a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/gin/factory/NotificationViewFactory.java b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/gin/factory/NotificationViewFactory.java new file mode 100644 index 000000000..6313d5000 --- /dev/null +++ b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/gin/factory/NotificationViewFactory.java @@ -0,0 +1,14 @@ +package org.iplantc.de.notifications.client.gin.factory; + +import org.iplantc.de.client.models.notifications.NotificationMessage; +import org.iplantc.de.notifications.client.views.NotificationView; + +import com.sencha.gxt.data.shared.ListStore; + +/** + * @author aramsey + */ +public interface NotificationViewFactory { + + NotificationView create(ListStore listStore); +} diff --git a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImpl.java index 17b12ca35..f7b6f975d 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImpl.java @@ -109,15 +109,15 @@ public void onSuccess(String result1) { }); } } - EventBus eventBus; - MessageServiceFacade messageServiceFacade; private final ListStore listStore; private final NotificationToolbarView toolbar; private final NotificationView view; private PagingLoadResult callbackResult; private NotificationView.NotificationViewAppearance appearance; NotificationCategory currentCategory; - private final JsonUtil jsonUtil; + @Inject EventBus eventBus; + @Inject MessageServiceFacade messageServiceFacade; + @Inject JsonUtil jsonUtil; @Inject public NotificationPresenterImpl(final NotificationViewFactory viewFactory, @@ -128,7 +128,6 @@ public NotificationPresenterImpl(final NotificationViewFactory viewFactory, this.listStore = createListStore(messageProperties); this.view = viewFactory.create(listStore); currentCategory = NotificationCategory.ALL; - this.jsonUtil = JsonUtil.getInstance(); toolbar.setPresenter(this); view.setNorthWidget(toolbar); this.view.setPresenter(this); diff --git a/ui/de-webapp/src/main/java/org/iplantc/de/client/gin/DEInjector.java b/ui/de-webapp/src/main/java/org/iplantc/de/client/gin/DEInjector.java index 6e054488d..519d00fb6 100644 --- a/ui/de-webapp/src/main/java/org/iplantc/de/client/gin/DEInjector.java +++ b/ui/de-webapp/src/main/java/org/iplantc/de/client/gin/DEInjector.java @@ -9,6 +9,7 @@ import org.iplantc.de.desktop.client.gin.DEGinModule; import org.iplantc.de.diskResource.client.gin.DiskResourceGinModule; import org.iplantc.de.fileViewers.client.gin.FileViewerGinModule; +import org.iplantc.de.notifications.client.gin.NotificationGinModule; import org.iplantc.de.tags.client.gin.TagsGinModule; import org.iplantc.de.tools.requests.client.gin.ToolRequestGinModule; @@ -29,7 +30,8 @@ DiskResourceGinModule.class, CommentsGinModule.class, TagsGinModule.class, - FileViewerGinModule.class}) + FileViewerGinModule.class, + NotificationGinModule.class}) public interface DEInjector extends Ginjector { public static final DEInjector INSTANCE = GWT.create(DEInjector.class); From ecc6527f1531e0b63d42c3acf0e828b180ad4597 Mon Sep 17 00:00:00 2001 From: Ashley Ramsey Date: Mon, 22 Feb 2016 12:25:34 -0700 Subject: [PATCH 052/183] CORE-6341 Remove Notification presenter references and fire events --- .../events/NotificationGridRefreshEvent.java | 29 +++++++ .../events/NotificationSelectionEvent.java | 43 +++++++++++ ...ificationToolbarDeleteAllClickedEvent.java | 30 ++++++++ ...NotificationToolbarDeleteClickedEvent.java | 30 ++++++++ .../NotificationToolbarSelectionEvent.java | 43 +++++++++++ .../presenter/NotificationPresenterImpl.java | 77 +++++++++++-------- .../client/views/NotificationToolbarView.java | 20 ++--- .../views/NotificationToolbarViewImpl.java | 36 ++++++--- .../client/views/NotificationView.java | 15 ++-- .../client/views/NotificationViewImpl.java | 40 ++++------ 10 files changed, 274 insertions(+), 89 deletions(-) create mode 100644 ui/de-lib/src/main/java/org/iplantc/de/notifications/client/events/NotificationGridRefreshEvent.java create mode 100644 ui/de-lib/src/main/java/org/iplantc/de/notifications/client/events/NotificationSelectionEvent.java create mode 100644 ui/de-lib/src/main/java/org/iplantc/de/notifications/client/events/NotificationToolbarDeleteAllClickedEvent.java create mode 100644 ui/de-lib/src/main/java/org/iplantc/de/notifications/client/events/NotificationToolbarDeleteClickedEvent.java create mode 100644 ui/de-lib/src/main/java/org/iplantc/de/notifications/client/events/NotificationToolbarSelectionEvent.java diff --git a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/events/NotificationGridRefreshEvent.java b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/events/NotificationGridRefreshEvent.java new file mode 100644 index 000000000..337e8560e --- /dev/null +++ b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/events/NotificationGridRefreshEvent.java @@ -0,0 +1,29 @@ +package org.iplantc.de.notifications.client.events; + +import com.google.gwt.event.shared.EventHandler; +import com.google.gwt.event.shared.GwtEvent; +import com.google.gwt.event.shared.HandlerRegistration; + +/** + * @author aramsey + */ +public class NotificationGridRefreshEvent extends GwtEvent { + public static Type TYPE = + new Type(); + + public Type getAssociatedType() { + return TYPE; + } + + protected void dispatch(NotificationGridRefreshEventHandler handler) { + handler.onNotificationGridRefresh(this); + } + + public interface NotificationGridRefreshEventHandler extends EventHandler { + void onNotificationGridRefresh(NotificationGridRefreshEvent event); + } + + public interface HasNotificationGridRefreshEventHandlers { + HandlerRegistration addNotificationGridRefreshEventHandler(NotificationGridRefreshEventHandler handler); + } +} diff --git a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/events/NotificationSelectionEvent.java b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/events/NotificationSelectionEvent.java new file mode 100644 index 000000000..b39407c19 --- /dev/null +++ b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/events/NotificationSelectionEvent.java @@ -0,0 +1,43 @@ +package org.iplantc.de.notifications.client.events; + +import org.iplantc.de.client.models.notifications.NotificationMessage; + +import com.google.gwt.event.shared.EventHandler; +import com.google.gwt.event.shared.GwtEvent; +import com.google.gwt.event.shared.HandlerRegistration; + +import java.util.List; + +/** + * @author aramsey + */ +public class NotificationSelectionEvent extends GwtEvent { + + private List items; + + public static Type TYPE = + new Type(); + + public NotificationSelectionEvent(List items) { + this.items = items; + } + public Type getAssociatedType() { + return TYPE; + } + + protected void dispatch(NotificationSelectionEventHandler handler) { + handler.onNotificationSelection(this); + } + + public interface NotificationSelectionEventHandler extends EventHandler { + void onNotificationSelection(NotificationSelectionEvent event); + } + + public List getNotifications() { + return items; + } + + public interface HasNotificationSelectionEventHandlers { + HandlerRegistration addNotificationSelectionEventHandler (NotificationSelectionEventHandler handler); + } +} diff --git a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/events/NotificationToolbarDeleteAllClickedEvent.java b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/events/NotificationToolbarDeleteAllClickedEvent.java new file mode 100644 index 000000000..8ca0aac90 --- /dev/null +++ b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/events/NotificationToolbarDeleteAllClickedEvent.java @@ -0,0 +1,30 @@ +package org.iplantc.de.notifications.client.events; + +import com.google.gwt.event.shared.EventHandler; +import com.google.gwt.event.shared.GwtEvent; +import com.google.gwt.event.shared.HandlerRegistration; + +/** + * @author aramsey + */ +public class NotificationToolbarDeleteAllClickedEvent + extends GwtEvent { + public static Type TYPE = + new Type(); + + public Type getAssociatedType() { + return TYPE; + } + + protected void dispatch(NotificationToolbarDeleteAllClickedEventHandler handler) { + handler.onNotificationToolbarDeleteAllClicked(this); + } + + public static interface NotificationToolbarDeleteAllClickedEventHandler extends EventHandler { + void onNotificationToolbarDeleteAllClicked(NotificationToolbarDeleteAllClickedEvent event); + } + + public interface HasNotificationToolbarDeleteAllClickedEventHandlers { + HandlerRegistration addNotificationToolbarDeleteAllClickedEventHandler(NotificationToolbarDeleteAllClickedEventHandler handler); + } +} diff --git a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/events/NotificationToolbarDeleteClickedEvent.java b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/events/NotificationToolbarDeleteClickedEvent.java new file mode 100644 index 000000000..b3f944c43 --- /dev/null +++ b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/events/NotificationToolbarDeleteClickedEvent.java @@ -0,0 +1,30 @@ +package org.iplantc.de.notifications.client.events; + +import com.google.gwt.event.shared.EventHandler; +import com.google.gwt.event.shared.GwtEvent; +import com.google.gwt.event.shared.HandlerRegistration; + +/** + * @author aramsey + */ +public class NotificationToolbarDeleteClickedEvent + extends GwtEvent { + public static Type TYPE = + new Type(); + + public Type getAssociatedType() { + return TYPE; + } + + protected void dispatch(NotificationToolbarDeleteClickedEventHandler handler) { + handler.onNotificationToolbarDeleteClicked(this); + } + + public static interface NotificationToolbarDeleteClickedEventHandler extends EventHandler { + void onNotificationToolbarDeleteClicked(NotificationToolbarDeleteClickedEvent event); + } + + public interface HasNotificationToolbarDeleteClickedEventHandlers { + HandlerRegistration addNotificationToolbarDeleteClickedEventHandler(NotificationToolbarDeleteClickedEventHandler handler); + } +} diff --git a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/events/NotificationToolbarSelectionEvent.java b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/events/NotificationToolbarSelectionEvent.java new file mode 100644 index 000000000..bbff690e3 --- /dev/null +++ b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/events/NotificationToolbarSelectionEvent.java @@ -0,0 +1,43 @@ +package org.iplantc.de.notifications.client.events; + +import org.iplantc.de.client.models.notifications.NotificationCategory; + +import com.google.gwt.event.shared.EventHandler; +import com.google.gwt.event.shared.GwtEvent; +import com.google.gwt.event.shared.HandlerRegistration; + +/** + * @author aramsey + */ +public class NotificationToolbarSelectionEvent + extends GwtEvent { + + private NotificationCategory notificationCategory; + + public NotificationToolbarSelectionEvent (NotificationCategory notificationCategory) { + this.notificationCategory = notificationCategory; + } + + public NotificationCategory getNotificationCategory() { + return notificationCategory; + } + + public static Type TYPE = + new Type(); + + public Type getAssociatedType() { + return TYPE; + } + + protected void dispatch(NotificationToolbarSelectionEventHandler handler) { + handler.onNotificationToolbarSelection(this); + } + + public static interface NotificationToolbarSelectionEventHandler extends EventHandler { + void onNotificationToolbarSelection(NotificationToolbarSelectionEvent event); + } + + public interface HasNotificationToolbarSelectionEventHandlers { + HandlerRegistration addNotificationToolbarSelectionEventHandler(NotificationToolbarSelectionEventHandler handler); + } +} diff --git a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImpl.java index f7b6f975d..07b30ce3d 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImpl.java @@ -11,6 +11,11 @@ import org.iplantc.de.commons.client.ErrorHandler; import org.iplantc.de.notifications.client.events.DeleteNotificationsUpdateEvent; import org.iplantc.de.notifications.client.events.NotificationCountUpdateEvent; +import org.iplantc.de.notifications.client.events.NotificationGridRefreshEvent; +import org.iplantc.de.notifications.client.events.NotificationSelectionEvent; +import org.iplantc.de.notifications.client.events.NotificationToolbarDeleteAllClickedEvent; +import org.iplantc.de.notifications.client.events.NotificationToolbarDeleteClickedEvent; +import org.iplantc.de.notifications.client.events.NotificationToolbarSelectionEvent; import org.iplantc.de.notifications.client.gin.factory.NotificationViewFactory; import org.iplantc.de.notifications.client.model.NotificationMessageProperties; import org.iplantc.de.notifications.client.views.NotificationToolbarView; @@ -52,7 +57,12 @@ * * @author sriram */ -public class NotificationPresenterImpl implements NotificationView.Presenter, NotificationToolbarView.Presenter { +public class NotificationPresenterImpl implements NotificationView.Presenter, + NotificationGridRefreshEvent.NotificationGridRefreshEventHandler, + NotificationSelectionEvent.NotificationSelectionEventHandler, + NotificationToolbarSelectionEvent.NotificationToolbarSelectionEventHandler, + NotificationToolbarDeleteClickedEvent.NotificationToolbarDeleteClickedEventHandler, + NotificationToolbarDeleteAllClickedEvent.NotificationToolbarDeleteAllClickedEventHandler { private final class NotificationServiceCallback extends NotificationCallback { private final AsyncCallback> callback; @@ -84,8 +94,8 @@ public void onSuccess(String result) { messages.add(n.getMessage()); } - callbackResult = new PagingLoadResultBean<>(messages, total, - loadConfig.getOffset()); + PagingLoadResult callbackResult = + new PagingLoadResultBean<>(messages, total, loadConfig.getOffset()); callback.onSuccess(callbackResult); List hasIds = Lists.newArrayList(); @@ -112,7 +122,6 @@ public void onSuccess(String result1) { private final ListStore listStore; private final NotificationToolbarView toolbar; private final NotificationView view; - private PagingLoadResult callbackResult; private NotificationView.NotificationViewAppearance appearance; NotificationCategory currentCategory; @Inject EventBus eventBus; @@ -128,16 +137,32 @@ public NotificationPresenterImpl(final NotificationViewFactory viewFactory, this.listStore = createListStore(messageProperties); this.view = viewFactory.create(listStore); currentCategory = NotificationCategory.ALL; - toolbar.setPresenter(this); + this.toolbar = toolbar; view.setNorthWidget(toolbar); - this.view.setPresenter(this); + setRefreshButton(view.getRefreshButton()); - // set default cat + addEventHandlers(); + } + + private void addEventHandlers() { + view.addNotificationGridRefreshEventHandler(this); + view.addNotificationSelectionEventHandler(this); + toolbar.addNotificationToolbarDeleteAllClickedEventHandler(this); + toolbar.addNotificationToolbarDeleteClickedEventHandler(this); + toolbar.addNotificationToolbarSelectionEventHandler(this); + } ListStore createListStore(NotificationMessageProperties messageProperties) { return new ListStore<>(messageProperties.id()); } + @Override + public void onNotificationGridRefresh(NotificationGridRefreshEvent event) { + if (listStore.size() > 0) { + toolbar.setDeleteAllButtonEnabled(true); + } else { + toolbar.setDeleteAllButtonEnabled(false); + } } @Override @@ -181,16 +206,11 @@ public void filterBy(NotificationCategory category) { } @Override - public NotificationCategory getCurrentCategory() { - return currentCategory; - } - - @Override - public void onGridRefresh() { - if (view.getListStore().size() > 0) { - toolbar.setDeleteAllButtonEnabled(true); + public void onNotificationSelection(NotificationSelectionEvent event) { + if (event.getNotifications() == null || event.getNotifications().size() == 0) { + toolbar.setDeleteButtonEnabled(false); } else { - toolbar.setDeleteAllButtonEnabled(false); + toolbar.setDeleteButtonEnabled(true); } } @@ -200,8 +220,9 @@ public void go(HasOneWidget container) { view.setLoader(initProxyLoader()); } + @Override - public void onDeleteAllClicked() { + public void onNotificationToolbarDeleteAllClicked(NotificationToolbarDeleteAllClickedEvent event) { view.mask(); messageServiceFacade.deleteAll(currentCategory, new AsyncCallback() { @@ -219,11 +240,10 @@ public void onSuccess(String result) { eventBus.fireEvent(event); } }); - } @Override - public void onDeleteClicked() { + public void onNotificationToolbarDeleteClicked(NotificationToolbarDeleteClickedEvent event) { final List notifications = view.getSelectedItems(); final Command callback = new Command() { @Override @@ -255,21 +275,11 @@ public void onSuccess(String result) { } }); } - } @Override - public void onFilterSelection(NotificationCategory cat) { - filterBy(cat); - } - - @Override - public void onNotificationSelection(List items) { - if (items == null || items.size() == 0) { - toolbar.setDeleteButtonEnabled(false); - } else { - toolbar.setDeleteButtonEnabled(true); - } + public void onNotificationToolbarSelection(NotificationToolbarSelectionEvent event) { + filterBy(event.getNotificationCategory()); } @Override @@ -279,6 +289,11 @@ public void setRefreshButton(TextButton refreshBtn) { } } + @Override + public NotificationCategory getCurrentCategory() { + return currentCategory; + } + private PagingLoader> initProxyLoader() { RpcProxy> proxy = new RpcProxy>() { diff --git a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationToolbarView.java b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationToolbarView.java index 09f3a2c07..ce9c05764 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationToolbarView.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationToolbarView.java @@ -1,6 +1,9 @@ package org.iplantc.de.notifications.client.views; import org.iplantc.de.client.models.notifications.NotificationCategory; +import org.iplantc.de.notifications.client.events.NotificationToolbarDeleteAllClickedEvent; +import org.iplantc.de.notifications.client.events.NotificationToolbarDeleteClickedEvent; +import org.iplantc.de.notifications.client.events.NotificationToolbarSelectionEvent; import com.google.gwt.user.client.ui.IsWidget; @@ -12,24 +15,15 @@ * @author sriram * */ -public interface NotificationToolbarView extends IsWidget { - - public interface Presenter { - - void onFilterSelection(NotificationCategory cat); - - void onDeleteClicked(); - - void onDeleteAllClicked(); - - } +public interface NotificationToolbarView extends IsWidget, + NotificationToolbarDeleteClickedEvent.HasNotificationToolbarDeleteClickedEventHandlers, + NotificationToolbarDeleteAllClickedEvent.HasNotificationToolbarDeleteAllClickedEventHandlers, + NotificationToolbarSelectionEvent.HasNotificationToolbarSelectionEventHandlers { void setDeleteButtonEnabled(boolean enabled); void setDeleteAllButtonEnabled(boolean enabled); - void setPresenter(Presenter p); - void setRefreshButton(TextButton refreshBtn); void setCurrentCategory(NotificationCategory category); diff --git a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationToolbarViewImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationToolbarViewImpl.java index 8b9697286..2edf32b58 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationToolbarViewImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationToolbarViewImpl.java @@ -4,10 +4,14 @@ package org.iplantc.de.notifications.client.views; import org.iplantc.de.client.models.notifications.NotificationCategory; +import org.iplantc.de.notifications.client.events.NotificationToolbarDeleteAllClickedEvent; +import org.iplantc.de.notifications.client.events.NotificationToolbarDeleteClickedEvent; +import org.iplantc.de.notifications.client.events.NotificationToolbarSelectionEvent; import com.google.gwt.core.client.GWT; import com.google.gwt.event.logical.shared.SelectionEvent; import com.google.gwt.event.logical.shared.SelectionHandler; +import com.google.gwt.event.shared.HandlerRegistration; import com.google.gwt.uibinder.client.UiBinder; import com.google.gwt.uibinder.client.UiField; import com.google.gwt.uibinder.client.UiHandler; @@ -34,8 +38,6 @@ public class NotificationToolbarViewImpl extends Composite implements Notificati interface NotificationToolbarUiBinder extends UiBinder { } - private Presenter presenter; - @UiField TextButton btnDelete; @@ -58,6 +60,24 @@ public NotificationToolbarViewImpl(NotificationView.NotificationViewAppearance a initFilters(); } + @Override + public HandlerRegistration addNotificationToolbarDeleteAllClickedEventHandler( + NotificationToolbarDeleteAllClickedEvent.NotificationToolbarDeleteAllClickedEventHandler handler) { + return addHandler(handler, NotificationToolbarDeleteAllClickedEvent.TYPE); + } + + @Override + public HandlerRegistration addNotificationToolbarDeleteClickedEventHandler( + NotificationToolbarDeleteClickedEvent.NotificationToolbarDeleteClickedEventHandler handler) { + return addHandler(handler, NotificationToolbarDeleteClickedEvent.TYPE); + } + + @Override + public HandlerRegistration addNotificationToolbarSelectionEventHandler( + NotificationToolbarSelectionEvent.NotificationToolbarSelectionEventHandler handler) { + return addHandler(handler, NotificationToolbarSelectionEvent.TYPE); + } + private void initFilters() { cboFilter.add(NotificationCategory.NEW); cboFilter.add(NotificationCategory.ALL); @@ -71,7 +91,7 @@ private void initFilters() { cboFilter.addSelectionHandler(new SelectionHandler() { @Override public void onSelection(SelectionEvent event) { - presenter.onFilterSelection(event.getSelectedItem()); + fireEvent(new NotificationToolbarSelectionEvent(event.getSelectedItem())); } }); cboFilter.setEditable(false); @@ -95,18 +115,12 @@ public void setDeleteAllButtonEnabled(boolean enabled) { @UiHandler("btnDelete") public void deleteClicked(SelectEvent event) { - presenter.onDeleteClicked(); + fireEvent(new NotificationToolbarDeleteClickedEvent()); } @UiHandler("btnDeleteAll") public void deleteAllClicked(SelectEvent event) { - presenter.onDeleteAllClicked(); - } - - @Override - public void setPresenter(Presenter p) { - this.presenter = p; - + fireEvent(new NotificationToolbarDeleteAllClickedEvent()); } @Override diff --git a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationView.java b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationView.java index 76d5cb157..8947237c4 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationView.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationView.java @@ -2,6 +2,8 @@ import org.iplantc.de.client.models.notifications.NotificationCategory; import org.iplantc.de.client.models.notifications.NotificationMessage; +import org.iplantc.de.notifications.client.events.NotificationGridRefreshEvent; +import org.iplantc.de.notifications.client.events.NotificationSelectionEvent; import com.google.gwt.user.client.ui.IsWidget; @@ -12,6 +14,9 @@ import java.util.List; +public interface NotificationView extends IsWidget, + NotificationGridRefreshEvent.HasNotificationGridRefreshEventHandlers, + NotificationSelectionEvent.HasNotificationSelectionEventHandlers { interface NotificationViewAppearance { String notifications(); @@ -49,17 +54,9 @@ public interface Presenter extends org.iplantc.de.commons.client.presenter.Prese */ public FilterPagingLoadConfig buildDefaultLoadConfig(); - /** - * - * - */ - public void onNotificationSelection(List items); - void setRefreshButton(TextButton refreshBtn); NotificationCategory getCurrentCategory(); - - void onGridRefresh(); } /** @@ -76,8 +73,6 @@ public interface Presenter extends org.iplantc.de.commons.client.presenter.Prese */ public List getSelectedItems(); - public void setPresenter(final Presenter presenter); - /** * loads notifications using given laod conig * diff --git a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationViewImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationViewImpl.java index 1d8897b2b..c518a465f 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationViewImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/notifications/client/views/NotificationViewImpl.java @@ -6,12 +6,15 @@ import org.iplantc.de.client.models.notifications.NotificationCategory; import org.iplantc.de.client.models.notifications.NotificationMessage; import org.iplantc.de.commons.client.widgets.DEPagingToolbar; +import org.iplantc.de.notifications.client.events.NotificationGridRefreshEvent; +import org.iplantc.de.notifications.client.events.NotificationSelectionEvent; import org.iplantc.de.notifications.client.model.NotificationMessageProperties; import org.iplantc.de.notifications.client.views.cells.NotificationMessageCell; import org.iplantc.de.resources.client.messages.I18N; import com.google.gwt.cell.client.DateCell; import com.google.gwt.core.client.GWT; +import com.google.gwt.event.shared.HandlerRegistration; import com.google.gwt.i18n.client.DateTimeFormat; import com.google.gwt.uibinder.client.UiBinder; import com.google.gwt.uibinder.client.UiFactory; @@ -82,11 +85,22 @@ public NotificationViewImpl(@Assisted ListStore listStore, addGridRefreshHandler(); } + + @Override + public HandlerRegistration addNotificationGridRefreshEventHandler(NotificationGridRefreshEvent.NotificationGridRefreshEventHandler handler) { + return addHandler(handler, NotificationGridRefreshEvent.TYPE); + } + + @Override + public HandlerRegistration addNotificationSelectionEventHandler(NotificationSelectionEvent.NotificationSelectionEventHandler handler) { + return addHandler(handler, NotificationSelectionEvent.TYPE); + } + private void addGridRefreshHandler() { grid.addRefreshHandler(new RefreshEvent.RefreshHandler() { @Override public void onRefresh(RefreshEvent event) { - presenter.onGridRefresh(); + fireEvent(new NotificationGridRefreshEvent()); } }); } @@ -97,7 +111,7 @@ private void addGridSelectionHandler() { @Override public void onSelectionChanged(SelectionChangedEvent event) { - presenter.onNotificationSelection(event.getSelection()); + fireEvent(new NotificationSelectionEvent(event.getSelection())); } }); } @@ -122,28 +136,6 @@ public List getSelectedItems() { return grid.getSelectionModel().getSelectedItems(); } - /* - * (non-Javadoc) - * - * @see - * org.iplantc.de.client.gxt3.views.NotificationView#setPresenter(org.iplantc.de.client.gxt3.views - * .NotificationView.Presenter) - */ - @Override - public void setPresenter(Presenter presenter) { - this.presenter = presenter; - } - - /* - * (non-Javadoc) - * - * @see org.iplantc.de.client.gxt3.views.NotificationView#getListStore() - */ - @Override - public ListStore getListStore() { - return listStore; - } - @SuppressWarnings("unchecked") @Override public void loadNotifications(FilterPagingLoadConfig config) { From 615028ed491f5f47411333713772f6344a07a848 Mon Sep 17 00:00:00 2001 From: Ashley Ramsey Date: Mon, 22 Feb 2016 11:20:16 -0700 Subject: [PATCH 053/183] CORE-6341 Refactor Notifications tests --- .../NotificationPresenterImplTest.java | 50 +++++++++++++++---- 1 file changed, 39 insertions(+), 11 deletions(-) diff --git a/ui/de-lib/src/test/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImplTest.java b/ui/de-lib/src/test/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImplTest.java index 9afc5ee34..de1513ec8 100644 --- a/ui/de-lib/src/test/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImplTest.java +++ b/ui/de-lib/src/test/java/org/iplantc/de/notifications/client/presenter/NotificationPresenterImplTest.java @@ -11,6 +11,12 @@ import org.iplantc.de.client.models.notifications.NotificationMessage; import org.iplantc.de.client.services.MessageServiceFacade; import org.iplantc.de.notifications.client.events.DeleteNotificationsUpdateEvent; +import org.iplantc.de.notifications.client.events.NotificationGridRefreshEvent; +import org.iplantc.de.notifications.client.events.NotificationSelectionEvent; +import org.iplantc.de.notifications.client.events.NotificationToolbarDeleteAllClickedEvent; +import org.iplantc.de.notifications.client.events.NotificationToolbarDeleteClickedEvent; +import org.iplantc.de.notifications.client.gin.factory.NotificationViewFactory; +import org.iplantc.de.notifications.client.model.NotificationMessageProperties; import org.iplantc.de.notifications.client.views.NotificationToolbarView; import org.iplantc.de.notifications.client.views.NotificationView; @@ -37,7 +43,10 @@ @RunWith(GxtMockitoTestRunner.class) public class NotificationPresenterImplTest { + @Mock NotificationViewFactory viewFactoryMock; + @Mock NotificationView.NotificationViewAppearance appearanceMock; @Mock NotificationView viewMock; + @Mock NotificationMessageProperties messagePropertiesMock; @Mock MessageServiceFacade messageServiceFacadeMock; @Mock NotificationToolbarView toolbarViewMock; @Mock EventBus eventBusMock; @@ -53,59 +62,75 @@ public class NotificationPresenterImplTest { @Before public void setUp() { + when(viewFactoryMock.create(listStoreMock)).thenReturn(viewMock); when(currentCategoryMock.toString()).thenReturn("sample"); when(viewMock.getCurrentLoadConfig()).thenReturn(mock(FilterPagingLoadConfig.class)); when(notificationMessageMock.getId()).thenReturn("id"); - uut = new NotificationPresenterImpl(viewMock); - + uut = new NotificationPresenterImpl(viewFactoryMock, + appearanceMock, + toolbarViewMock, + messagePropertiesMock) { + @Override + ListStore createListStore(NotificationMessageProperties messageProperties) { + return listStoreMock; + } + }; uut.currentCategory = currentCategoryMock; uut.messageServiceFacade = messageServiceFacadeMock; - uut.toolbar = toolbarViewMock; uut.eventBus = eventBusMock; } @Test public void testOnNotificationGridRefresh_emptyListStore() { + NotificationGridRefreshEvent eventMock = mock(NotificationGridRefreshEvent.class); when(listStoreMock.size()).thenReturn(0); - when(viewMock.getListStore()).thenReturn(listStoreMock); - uut.onGridRefresh(); + uut.onNotificationGridRefresh(eventMock); verify(toolbarViewMock).setDeleteAllButtonEnabled(eq(false)); } @Test public void testOnNotificationGridRefresh_nonEmptyListStore() { + NotificationGridRefreshEvent eventMock = mock(NotificationGridRefreshEvent.class); when(listStoreMock.size()).thenReturn(5); - when(viewMock.getListStore()).thenReturn(listStoreMock); - uut.onGridRefresh(); + uut.onNotificationGridRefresh(eventMock); + verify(toolbarViewMock).setDeleteAllButtonEnabled(eq(true)); } @Test public void testOnNotificationSelection_emptyListStore() { + NotificationSelectionEvent eventMock = mock(NotificationSelectionEvent.class); + when(eventMock.getNotifications()).thenReturn(listMock); when(listMock.size()).thenReturn(0); - uut.onNotificationSelection(listMock); + uut.onNotificationSelection(eventMock); + verify(toolbarViewMock).setDeleteButtonEnabled(eq(false)); } @Test public void testOnNotificationSelection_nonEmptyListStore() { + NotificationSelectionEvent eventMock = mock(NotificationSelectionEvent.class); + when(eventMock.getNotifications()).thenReturn(listMock); when(listMock.size()).thenReturn(5); - uut.onNotificationSelection(listMock); + uut.onNotificationSelection(eventMock); + verify(toolbarViewMock).setDeleteButtonEnabled(eq(true)); } @Test public void testOnNotificationToolbarDeleteAllClicked() { - uut.onDeleteAllClicked(); + NotificationToolbarDeleteAllClickedEvent eventMock = mock(NotificationToolbarDeleteAllClickedEvent.class); + uut.onNotificationToolbarDeleteAllClicked(eventMock); verify(viewMock).mask(); verify(messageServiceFacadeMock).deleteAll(eq(currentCategoryMock), asyncCallbackStringCaptor.capture()); + AsyncCallback asyncCallback = asyncCallbackStringCaptor.getValue(); asyncCallback.onSuccess("result"); @@ -117,6 +142,8 @@ public void testOnNotificationToolbarDeleteAllClicked() { @Test public void testOnNotificationToolbarDeleteClicked() { + NotificationToolbarDeleteClickedEvent eventMock = mock(NotificationToolbarDeleteClickedEvent.class); + when(listMock.isEmpty()).thenReturn(false); when(listMock.size()).thenReturn(1); when(iteratorMock.hasNext()).thenReturn(true, false); @@ -124,7 +151,8 @@ public void testOnNotificationToolbarDeleteClicked() { when(listMock.iterator()).thenReturn(iteratorMock); when(viewMock.getSelectedItems()).thenReturn(listMock); - uut.onDeleteClicked(); + uut.onNotificationToolbarDeleteClicked(eventMock); + verify(messageServiceFacadeMock).deleteMessages(isA(JSONObject.class), asyncCallbackStringCaptor.capture()); From 52dd3530ba938bb713952ae93ab420f6d8b102a2 Mon Sep 17 00:00:00 2001 From: Ashley Ramsey Date: Mon, 22 Feb 2016 13:22:58 -0700 Subject: [PATCH 054/183] CORE-6341 Add commons to Notifications GWT xml --- .../java/org/iplantc/de/notifications/Notifications.gwt.xml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/notifications/Notifications.gwt.xml b/ui/de-lib/src/main/java/org/iplantc/de/notifications/Notifications.gwt.xml index 8b7195cab..72ee660a0 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/notifications/Notifications.gwt.xml +++ b/ui/de-lib/src/main/java/org/iplantc/de/notifications/Notifications.gwt.xml @@ -3,5 +3,7 @@ + + - \ No newline at end of file + From 400b5fc10cc53c665e384c8300842fc2470c2259 Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Wed, 24 Feb 2016 10:38:23 -0700 Subject: [PATCH 055/183] CORE-7445 display error when file upload fails when uploading a file > 2G. Note: No specific error msg is displayed cos UI does not receive one. --- .../views/dialogs/SimpleFileUploadDialog.java | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/diskResource/client/views/dialogs/SimpleFileUploadDialog.java b/ui/de-lib/src/main/java/org/iplantc/de/diskResource/client/views/dialogs/SimpleFileUploadDialog.java index 5ca7c090c..85a4ac9bf 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/diskResource/client/views/dialogs/SimpleFileUploadDialog.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/diskResource/client/views/dialogs/SimpleFileUploadDialog.java @@ -10,8 +10,10 @@ import org.iplantc.de.commons.client.info.IplantAnnouncer; import org.iplantc.de.commons.client.validators.DiskResourceNameValidator; import org.iplantc.de.commons.client.views.dialogs.IPlantDialog; +import org.iplantc.de.commons.client.views.dialogs.IplantErrorDialog; import org.iplantc.de.commons.client.widgets.IPCFileUploadField; import org.iplantc.de.diskResource.client.events.FileUploadedEvent; +import org.iplantc.de.resources.client.messages.I18N; import com.google.common.base.Strings; import com.google.common.collect.Lists; @@ -253,12 +255,19 @@ void onSubmitComplete(SubmitCompleteEvent event) { String results = Format.stripTags(results2); Splittable split = StringQuoter.split(results); IPCFileUploadField field = fufList.get(formList.indexOf(event.getSource())); - if (split.isUndefined("file") || (split.get("file") == null)) { - field.markInvalid(appearance.fileUploadsFailed(Lists.newArrayList(field.getValue()))); - IplantAnnouncer.getInstance().schedule( - new ErrorAnnouncementConfig(appearance.fileUploadsFailed(Lists.newArrayList(field.getValue())))); + if (split == null) { + IplantAnnouncer.getInstance() + .schedule(new ErrorAnnouncementConfig(appearance.fileUploadsFailed(Lists.newArrayList( + field.getValue())))); } else { - eventBus.fireEvent(new FileUploadedEvent(uploadDest, field.getValue(), results)); + if (split.isUndefined("file") || (split.get("file") == null)) { + field.markInvalid(appearance.fileUploadsFailed(Lists.newArrayList(field.getValue()))); + IplantAnnouncer.getInstance() + .schedule(new ErrorAnnouncementConfig(appearance.fileUploadsFailed(Lists.newArrayList( + field.getValue())))); + } else { + eventBus.fireEvent(new FileUploadedEvent(uploadDest, field.getValue(), results)); + } } if (submittedForms.size() == 0) { From 7779ffe2994e8dad1a28a1402e74be1eea555257 Mon Sep 17 00:00:00 2001 From: Paul Sarando Date: Wed, 24 Feb 2016 14:06:12 -0700 Subject: [PATCH 056/183] CORE-7374 Add terrain.clients.ezid-test. --- services/terrain/src/terrain/clients/ezid.clj | 2 +- .../test/terrain/clients/ezid_test.clj | 105 ++++++++++++++++++ 2 files changed, 106 insertions(+), 1 deletion(-) create mode 100644 services/terrain/test/terrain/clients/ezid_test.clj diff --git a/services/terrain/src/terrain/clients/ezid.clj b/services/terrain/src/terrain/clients/ezid.clj index f79a03acb..50045cf69 100644 --- a/services/terrain/src/terrain/clients/ezid.clj +++ b/services/terrain/src/terrain/clients/ezid.clj @@ -1,4 +1,4 @@ -(ns ^{:author psarando} terrain.clients.ezid +(ns terrain.clients.ezid (:use [ring.util.http-response :only [charset]] [slingshot.slingshot :only [try+ throw+]]) (:require [cemerick.url :as curl] diff --git a/services/terrain/test/terrain/clients/ezid_test.clj b/services/terrain/test/terrain/clients/ezid_test.clj new file mode 100644 index 000000000..216df57f2 --- /dev/null +++ b/services/terrain/test/terrain/clients/ezid_test.clj @@ -0,0 +1,105 @@ +(ns terrain.clients.ezid-test + (:use [clojure.test] + [terrain.clients.ezid])) + +;; Re-def private functions so they can be tested in this namespace. +(def anvl-escape #'terrain.clients.ezid/anvl-escape) +(def anvl-unescape #'terrain.clients.ezid/anvl-unescape) +(def anvl-decode #'terrain.clients.ezid/anvl-decode) +(def anvl-encode #'terrain.clients.ezid/anvl-encode) + +(deftest anvl-escape-test + (is (= (anvl-escape "Test % escape") "Test %25 escape") + "anvl-escape '%' character") + (is (= (anvl-escape "Test \n escape") "Test %0A escape") + "anvl-escape '\\n' character") + (is (= (anvl-escape "Test \r escape") "Test %0D escape") + "anvl-escape '\\r' character") + (is (= (anvl-escape "Test : escape") "Test %3A escape") + "anvl-escape ':' character") + (is (= (anvl-escape "% This \n is \r a : Test") "%25 This %0A is %0D a %3A Test") + "anvl-escape characters forwards") + (is (= (anvl-escape ": This \r is \n a % Test") "%3A This %0D is %0A a %25 Test") + "anvl-escape characters backwards") + (is (= (anvl-escape "%test: This \r\n is a %25 Test\n") "%25test%3A This %0D%0A is a %2525 Test%0A") + "anvl-escape mixed characters")) + +(deftest anvl-unescape-test + (is (= (anvl-unescape "Test %3A unescape") "Test : unescape") + "anvl-unescape ':' character") + (is (= (anvl-unescape "Test %0D unescape") "Test \r unescape") + "anvl-unescape '\\r' character") + (is (= (anvl-unescape "Test %0A unescape") "Test \n unescape") + "anvl-unescape '\\n' character") + (is (= (anvl-unescape "Test %25 unescape") "Test % unescape") + "anvl-unescape '%' character") + (is (= (anvl-unescape "%3A This %0D is %0A a %25 Test") ": This \r is \n a % Test") + "anvl-unescape characters forwards") + (is (= (anvl-unescape "%25 This %0A is %0D a %3A Test") "% This \n is \r a : Test") + "anvl-unescape characters backwards") + (is (= (anvl-unescape "%25test%3A This %0D%0A is a %2525 Test%0A") "%test: This \r\n is a %25 Test\n") + "anvl-unescape mixed characters")) + +(deftest anvl-decode-test + (is (= (anvl-decode "") {}) + "anvl-decode Empty ANVL string") + (is (= (anvl-decode "Not an ANVL string") "Not an ANVL string") + "anvl-decode Not an ANVL string") + (is (= (anvl-decode ["Not" "a string"]) ["Not" "a string"]) + "anvl-decode Not a string") + (is (= (anvl-decode {:not "a string"}) {:not "a string"}) + "anvl-decode Not a string") + (is (= (anvl-decode "some: simple string") {:some "simple string"}) + "anvl-decode 1 key with a simple string") + (is (= (anvl-decode "test: %3A unescape") {:test ": unescape"}) + "anvl-decode 1 key with a ':' character") + (is (= (anvl-decode "test: %0D unescape") {:test "\r unescape"}) + "anvl-decode 1 key with a '\\r' character") + (is (= (anvl-decode "test: %0A unescape") {:test "\n unescape"}) + "anvl-decode 1 key with a '\\n' character") + (is (= (anvl-decode "test: %25 unescape") {:test "% unescape"}) + "anvl-decode 1 key with a '%' character") + (is (= (anvl-decode + "test1: %25 unescape\ntest2: %0A unescape\ntest3: %0D unescape\ntest4: %3A unescape") + {:test1 "% unescape" + :test2 "\n unescape" + :test3 "\r unescape" + :test4 ": unescape"}) + "anvl-decode 4 keys, 1 escaped character each") + (is (= (anvl-decode +"test1: %3A This %0D is %0A a %25 Test +test2: %25 This %0A is %0D a %3A Test +test3: %25test%3A This %0D%0A is a %2525 Test%0A") + {:test1 ": This \r is \n a % Test" + :test2 "% This \n is \r a : Test" + :test3 "%test: This \r\n is a %25 Test\n"}) + "anvl-decode 3 keys, mixed escaped characters")) + +(deftest anvl-encode-test + (is (= (anvl-encode {}) "") + "anvl-encode Empty ANVL string") + (is (= (anvl-encode {:some "simple string"}) "some: simple string") + "anvl-encode 1 key with a simple string") + (is (= (anvl-encode {:test "% unescape"}) "test: %25 unescape") + "anvl-encode 1 key with a '%' character") + (is (= (anvl-encode {:test "\n unescape"}) "test: %0A unescape") + "anvl-encode 1 key with a '\\n' character") + (is (= (anvl-encode {:test "\r unescape"}) "test: %0D unescape") + "anvl-encode 1 key with a '\\r' character") + (is (= (anvl-encode {:test ": unescape"}) "test: %3A unescape") + "anvl-encode 1 key with a ':' character") + (is (= (anvl-encode + {:test1 "% unescape" + :test2 "\n unescape" + :test3 "\r unescape" + :test4 ": unescape"}) + "test1: %25 unescape\ntest2: %0A unescape\ntest3: %0D unescape\ntest4: %3A unescape") + "anvl-encode 4 keys, 1 escaped character each") + (is (= (anvl-encode + {:test1 ": This \r is \n a % Test" + :test2 "% This \n is \r a : Test" + :test3 "%test: This \r\n is a %25 Test\n"}) +"test1: %3A This %0D is %0A a %25 Test +test2: %25 This %0A is %0D a %3A Test +test3: %25test%3A This %0D%0A is a %2525 Test%0A") + "anvl-encode 3 keys, mixed escaped characters")) From 3449d4a634e61f21aed192527a243a54baf6fac7 Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Wed, 24 Feb 2016 14:27:41 -0700 Subject: [PATCH 057/183] CORE-7519 fix error message details display. --- .../presenter/PermanentIdRequestPresenter.java | 5 ++--- .../java/org/iplantc/de/commons/client/ErrorHandler.java | 8 ++++---- .../presenters/toolbar/ToolbarViewPresenterImpl.java | 7 ++----- .../commons/error/ErrorHandlerDefaultAppearance.java | 4 ++-- 4 files changed, 10 insertions(+), 14 deletions(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/presenter/PermanentIdRequestPresenter.java b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/presenter/PermanentIdRequestPresenter.java index bbb31033e..eb45f00b4 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/presenter/PermanentIdRequestPresenter.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/presenter/PermanentIdRequestPresenter.java @@ -11,6 +11,7 @@ import org.iplantc.de.client.models.identifiers.PermanentIdRequestList; import org.iplantc.de.client.models.identifiers.PermanentIdRequestUpdate; import org.iplantc.de.client.services.DiskResourceServiceFacade; +import org.iplantc.de.commons.client.ErrorHandler; import org.iplantc.de.commons.client.info.ErrorAnnouncementConfig; import org.iplantc.de.commons.client.info.IplantAnnouncer; import org.iplantc.de.commons.client.info.SuccessAnnouncementConfig; @@ -175,9 +176,7 @@ public void onFailure(Throwable caught) { loadPermIdRequests(); IplantAnnouncer.getInstance() .schedule(new ErrorAnnouncementConfig(appearance.createPermIdFailure())); - IplantErrorDialog ied = - new IplantErrorDialog(I18N.DISPLAY.error(), caught.getMessage()); - ied.show(); + ErrorHandler.post(appearance.createPermIdFailure(), caught); } @Override diff --git a/ui/de-lib/src/main/java/org/iplantc/de/commons/client/ErrorHandler.java b/ui/de-lib/src/main/java/org/iplantc/de/commons/client/ErrorHandler.java index fd9a8dacc..baf8f97be 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/commons/client/ErrorHandler.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/commons/client/ErrorHandler.java @@ -162,11 +162,11 @@ private static String parseExceptionJson(Throwable caught) { } if (jsonError != null) { - String name = JsonUtil.getInstance().getString(jsonError, "name"); //$NON-NLS-1$ - String message = JsonUtil.getInstance().getString(jsonError, "message"); //$NON-NLS-1$ + String error_code = JsonUtil.getInstance().getString(jsonError, "error_code"); //$NON-NLS-1$ + String message = JsonUtil.getInstance().getString(jsonError, "reason"); //$NON-NLS-1$ - if (!message.isEmpty() || !name.isEmpty()) { - exceptionMessage = appearance.errorReport(name, message); + if (!message.isEmpty() || !error_code.isEmpty()) { + exceptionMessage = appearance.errorReport(error_code, message); } } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/diskResource/client/presenters/toolbar/ToolbarViewPresenterImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/diskResource/client/presenters/toolbar/ToolbarViewPresenterImpl.java index d38c93fe8..4e5ba7199 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/diskResource/client/presenters/toolbar/ToolbarViewPresenterImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/diskResource/client/presenters/toolbar/ToolbarViewPresenterImpl.java @@ -21,11 +21,11 @@ import org.iplantc.de.client.services.DiskResourceServiceFacade; import org.iplantc.de.client.services.FileEditorServiceFacade; import org.iplantc.de.client.services.PermIdRequestUserServiceFacade; +import org.iplantc.de.commons.client.ErrorHandler; import org.iplantc.de.commons.client.info.ErrorAnnouncementConfig; import org.iplantc.de.commons.client.info.IplantAnnouncer; import org.iplantc.de.commons.client.info.SuccessAnnouncementConfig; import org.iplantc.de.commons.client.views.dialogs.IPlantDialog; -import org.iplantc.de.commons.client.views.dialogs.IplantErrorDialog; import org.iplantc.de.commons.client.views.window.configs.ConfigFactory; import org.iplantc.de.commons.client.views.window.configs.FileViewerWindowConfig; import org.iplantc.de.commons.client.views.window.configs.PathListWindowConfig; @@ -47,7 +47,6 @@ import org.iplantc.de.diskResource.client.views.dialogs.CreateNcbiSraFolderStructureDialog; import org.iplantc.de.diskResource.client.views.dialogs.GenomeSearchDialog; import org.iplantc.de.diskResource.client.views.toolbar.dialogs.TabFileConfigDialog; -import org.iplantc.de.resources.client.messages.I18N; import com.google.common.base.Preconditions; import com.google.gwt.user.client.rpc.AsyncCallback; @@ -405,9 +404,7 @@ public void onDoiRequest(String uuid) { public void onFailure(Throwable caught) { IplantAnnouncer.getInstance() .schedule(new ErrorAnnouncementConfig(appearance.doiRequestFail())); - IplantErrorDialog iplantErrorDialog = - new IplantErrorDialog(I18N.DISPLAY.error(), caught.getMessage()); - iplantErrorDialog.show(); + ErrorHandler.post(appearance.doiRequestFail(),caught); } @Override diff --git a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/commons/error/ErrorHandlerDefaultAppearance.java b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/commons/error/ErrorHandlerDefaultAppearance.java index 3505a8e0c..585f1ddd6 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/commons/error/ErrorHandlerDefaultAppearance.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/commons/error/ErrorHandlerDefaultAppearance.java @@ -34,8 +34,8 @@ public String error() { } @Override - public String errorReport(String name, String message) { - return errorStrings.errorReport(name, message); + public String errorReport(String err_code, String message) { + return errorStrings.errorReport(err_code, message); } @Override From 5e7290e9fc302e88150dbd4caba3590e25399593 Mon Sep 17 00:00:00 2001 From: Paul Sarando Date: Wed, 24 Feb 2016 15:08:12 -0700 Subject: [PATCH 058/183] CORE-7374 Add test2junit to terrain. --- services/terrain/.gitignore | 2 ++ services/terrain/project.clj | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/services/terrain/.gitignore b/services/terrain/.gitignore index 9e4e28ebd..b60c4834d 100644 --- a/services/terrain/.gitignore +++ b/services/terrain/.gitignore @@ -26,3 +26,5 @@ .env .vagrant/ *.swp +build.xml +test2junit diff --git a/services/terrain/project.clj b/services/terrain/project.clj index 5d20fb22f..2616a37a0 100644 --- a/services/terrain/project.clj +++ b/services/terrain/project.clj @@ -43,7 +43,8 @@ [org.iplantc/heuristomancer "5.2.5.0"] [org.iplantc/service-logging "5.2.5.0"]] :plugins [[lein-ring "0.9.2" :exclusions [org.clojure/clojure]] - [swank-clojure "1.4.2" :exclusions [org.clojure/clojure]]] + [swank-clojure "1.4.2" :exclusions [org.clojure/clojure]] + [test2junit "1.1.3"]] :profiles {:dev {:resource-paths ["conf/test"]} :uberjar {:aot :all}} :main ^:skip-aot terrain.core From c8948e1f9538a1ee29ce1d90181b83894735e4f0 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Wed, 24 Feb 2016 14:14:03 -0700 Subject: [PATCH 059/183] CORE-3358: modified the POST /apps endpoint so that it verifies that another app with the same name doesn't already exist in the user's workspace --- services/apps/src/apps/persistence/app_metadata.clj | 11 +++++++++++ services/apps/src/apps/service/apps/de/edit.clj | 5 +++-- .../apps/src/apps/service/apps/de/validation.clj | 12 ++++++++++-- 3 files changed, 24 insertions(+), 4 deletions(-) diff --git a/services/apps/src/apps/persistence/app_metadata.clj b/services/apps/src/apps/persistence/app_metadata.clj index f500bdf4f..ca9fe95f4 100644 --- a/services/apps/src/apps/persistence/app_metadata.clj +++ b/services/apps/src/apps/persistence/app_metadata.clj @@ -8,6 +8,7 @@ [apps.util.assertions] [apps.util.conversions :only [remove-nil-vals]]) (:require [clojure.set :as set] + [clojure.string :as string] [kameleon.app-listing :as app-listing] [korma.core :as sql] [apps.persistence.app-metadata.delete :as delete] @@ -710,3 +711,13 @@ {:u.username username}) {:aca.app_id (uuidify app-id) :l.id [not= (user-favorite-subselect :w.root_category_id faves-idx)]}))))) + +(defn list-duplicate-apps + "List apps with the same name that exist in the same category as the new app." + [app-name category-ids] + (select [:apps :a] + (fields :a.id :a.name :a.description) + (join [:app_category_app :aca] {:a.id :aca.app_id}) + (where {:aca.app_category_id [in category-ids] + (raw "trim(both from a.name)") (string/trim app-name) + :a.deleted false}))) diff --git a/services/apps/src/apps/service/apps/de/edit.clj b/services/apps/src/apps/service/apps/de/edit.clj index 460b3b533..67e3d2cf0 100644 --- a/services/apps/src/apps/service/apps/de/edit.clj +++ b/services/apps/src/apps/service/apps/de/edit.clj @@ -8,7 +8,7 @@ [kameleon.entities] [kameleon.uuids :only [uuidify]] [apps.metadata.params :only [format-reference-genome-value]] - [apps.service.apps.de.validation :only [verify-app-editable verify-app-permission]] + [apps.service.apps.de.validation :only [verify-app-editable verify-app-permission validate-app-name]] [apps.util.config :only [workspace-dev-app-category-index]] [apps.util.conversions :only [remove-nil-vals convert-rule-argument]] [apps.validation :only [validate-parameter]] @@ -384,7 +384,8 @@ (defn add-app "This service will add a single-step App, including the information at its top level." - [user {:keys [references groups] :as app}] + [{:keys [username] :as user} {app-name :name :keys [references groups] :as app}] + (validate-app-name app-name [(get-user-subcategory username (workspace-dev-app-category-index))]) (transaction (let [app-id (:id (persistence/add-app app)) tool-id (->> app :tools first :id) diff --git a/services/apps/src/apps/service/apps/de/validation.clj b/services/apps/src/apps/service/apps/de/validation.clj index 6a7feaf7a..bf2dd9556 100644 --- a/services/apps/src/apps/service/apps/de/validation.clj +++ b/services/apps/src/apps/service/apps/de/validation.clj @@ -1,11 +1,11 @@ (ns apps.service.apps.de.validation - (:use [clojure-commons.exception-util :only [forbidden]] + (:use [clojure-commons.exception-util :only [forbidden exists]] [slingshot.slingshot :only [try+ throw+]] [korma.core :exclude [update]] [kameleon.core] [kameleon.entities] [kameleon.queries :only [parameter-types-for-tool-type]] - [apps.persistence.app-metadata :only [get-app]]) + [apps.persistence.app-metadata :only [get-app list-duplicate-apps]]) (:require [apps.service.apps.de.permissions :as perms] [clojure.string :as string])) @@ -137,3 +137,11 @@ [user app] (verify-app-permission user app "write") (verify-app-not-public app)) + +(defn validate-app-name + "Verifies that an app with the same name doesn't already exist in any of the same app categories. The beta + category is treated as an exception because it's intended to be a staging area for new apps." + [app-name category-ids] + (when (seq (list-duplicate-apps app-name category-ids)) + (exists "An app with the same name already exists in one of the same categories." + :app_name app-name :category_ids category-ids))) From 748efbf2648feac1910d636fcfdf59fa5634ef84 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Wed, 24 Feb 2016 14:32:35 -0700 Subject: [PATCH 060/183] CORE-3358: changed the types of two configuration parameters to UUID --- services/apps/src/apps/service/apps/de/admin.clj | 5 ++--- services/apps/src/apps/service/apps/de/metadata.clj | 3 +-- services/apps/src/apps/util/config.clj | 4 ++-- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/services/apps/src/apps/service/apps/de/admin.clj b/services/apps/src/apps/service/apps/de/admin.clj index 3efb4106c..f9b98027d 100644 --- a/services/apps/src/apps/service/apps/de/admin.clj +++ b/services/apps/src/apps/service/apps/de/admin.clj @@ -1,6 +1,5 @@ (ns apps.service.apps.de.admin - (:use [kameleon.uuids :only [uuidify]] - [korma.db :only [transaction]] + (:use [korma.db :only [transaction]] [apps.persistence.app-metadata.relabel :only [update-app-labels]] [apps.util.assertions :only [assert-not-nil]] [apps.util.config :only [workspace-public-id]] @@ -106,7 +105,7 @@ (validate-subcategory-name parent_id name) (validate-category-empty parent_id) (transaction - (let [category-id (:id (app-groups/create-app-group (uuidify (workspace-public-id)) category))] + (let [category-id (:id (app-groups/create-app-group (workspace-public-id) category))] (app-groups/add-subgroup parent_id category-id) category-id))) diff --git a/services/apps/src/apps/service/apps/de/metadata.clj b/services/apps/src/apps/service/apps/de/metadata.clj index 7c8b71129..8b76ea7c6 100644 --- a/services/apps/src/apps/service/apps/de/metadata.clj +++ b/services/apps/src/apps/service/apps/de/metadata.clj @@ -6,7 +6,6 @@ decategorize-app get-app-subcategory-id remove-app-from-category]] - [kameleon.uuids :only [uuidify]] [apps.service.apps.de.validation :only [app-publishable? verify-app-permission]] [apps.util.config :only [workspace-beta-app-category-id workspace-favorites-app-category-index]] @@ -146,7 +145,7 @@ (amp/set-app-references app-id references) (amp/set-app-suggested-categories app-id categories) (decategorize-app app-id) - (add-app-to-category app-id (uuidify (workspace-beta-app-category-id))) + (add-app-to-category app-id (workspace-beta-app-category-id)) (iplant-groups/make-app-public app-id)) nil) diff --git a/services/apps/src/apps/util/config.clj b/services/apps/src/apps/util/config.clj index 98048e0c7..d6a3c83e1 100644 --- a/services/apps/src/apps/util/config.clj +++ b/services/apps/src/apps/util/config.clj @@ -115,12 +115,12 @@ [props config-valid configs] "apps.workspace.favorites-app-category-index") -(cc/defprop-str workspace-beta-app-category-id +(cc/defprop-uuid workspace-beta-app-category-id "The UUID of the default Beta app category." [props config-valid configs] "apps.workspace.beta-app-category-id") -(cc/defprop-str workspace-public-id +(cc/defprop-uuid workspace-public-id "The UUID of the default Beta app category." [props config-valid configs] "apps.workspace.public-id") From 2bcf29df598cb741b93cfdb941dedaee072dfa71 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Wed, 24 Feb 2016 16:00:38 -0700 Subject: [PATCH 061/183] CORE-3358: updated the PATCH /apps/:app-id endpoint to check for duplicate app names --- .../src/apps/persistence/app_metadata.clj | 25 ++++++++++++++----- .../apps/src/apps/service/apps/de/edit.clj | 5 ++-- .../src/apps/service/apps/de/validation.clj | 12 ++++++--- 3 files changed, 30 insertions(+), 12 deletions(-) diff --git a/services/apps/src/apps/persistence/app_metadata.clj b/services/apps/src/apps/persistence/app_metadata.clj index ca9fe95f4..75266e00a 100644 --- a/services/apps/src/apps/persistence/app_metadata.clj +++ b/services/apps/src/apps/persistence/app_metadata.clj @@ -712,12 +712,25 @@ {:aca.app_id (uuidify app-id) :l.id [not= (user-favorite-subselect :w.root_category_id faves-idx)]}))))) -(defn list-duplicate-apps - "List apps with the same name that exist in the same category as the new app." - [app-name category-ids] +(defn- list-duplicate-apps* + [app-name category-id-set] (select [:apps :a] (fields :a.id :a.name :a.description) (join [:app_category_app :aca] {:a.id :aca.app_id}) - (where {:aca.app_category_id [in category-ids] - (raw "trim(both from a.name)") (string/trim app-name) - :a.deleted false}))) + (where {(raw "trim(both from a.name)") (string/trim app-name) + :a.deleted false + :aca.app_category_id [in category-id-set]}))) + +(defn- app-category-id-subselect + [app-id beta-app-category-id] + (subselect :app_category_app + (fields :app_category_id) + (where {:app_id app-id + :app_category_id [not= beta-app-category-id]}))) + +(defn list-duplicate-apps + "List apps with the same name that exist in the same category as the new app." + ([app-name category-ids] + (list-duplicate-apps* app-name category-ids)) + ([app-name app-id beta-app-category-id] + (list-duplicate-apps* app-name (app-category-id-subselect app-id beta-app-category-id)))) diff --git a/services/apps/src/apps/service/apps/de/edit.clj b/services/apps/src/apps/service/apps/de/edit.clj index 67e3d2cf0..9eb983a3a 100644 --- a/services/apps/src/apps/service/apps/de/edit.clj +++ b/services/apps/src/apps/service/apps/de/edit.clj @@ -9,7 +9,7 @@ [kameleon.uuids :only [uuidify]] [apps.metadata.params :only [format-reference-genome-value]] [apps.service.apps.de.validation :only [verify-app-editable verify-app-permission validate-app-name]] - [apps.util.config :only [workspace-dev-app-category-index]] + [apps.util.config :only [workspace-dev-app-category-index workspace-beta-app-category-id]] [apps.util.conversions :only [remove-nil-vals convert-rule-argument]] [apps.validation :only [validate-parameter]] [apps.workspace :only [get-workspace]] @@ -459,7 +459,8 @@ (defn relabel-app "This service allows labels to be updated in any app, whether or not the app has been submitted for public use." - [user {app-id :id :as body}] + [user {app-name :name app-id :id :as body}] + (validate-app-name app-name app-id (workspace-beta-app-category-id)) (let [app (persistence/get-app app-id)] (when-not (user-owns-app? user app) (verify-app-permission user app "write"))) diff --git a/services/apps/src/apps/service/apps/de/validation.clj b/services/apps/src/apps/service/apps/de/validation.clj index bf2dd9556..6eab93868 100644 --- a/services/apps/src/apps/service/apps/de/validation.clj +++ b/services/apps/src/apps/service/apps/de/validation.clj @@ -141,7 +141,11 @@ (defn validate-app-name "Verifies that an app with the same name doesn't already exist in any of the same app categories. The beta category is treated as an exception because it's intended to be a staging area for new apps." - [app-name category-ids] - (when (seq (list-duplicate-apps app-name category-ids)) - (exists "An app with the same name already exists in one of the same categories." - :app_name app-name :category_ids category-ids))) + ([app-name category-ids] + (when (seq (list-duplicate-apps app-name category-ids)) + (exists "An app with the same name already exists in one of the selected categories." + :app_name app-name :category_ids category-ids))) + ([app-name app-id beta-category-id] + (when (seq (list-duplicate-apps app-name app-id beta-category-id)) + (exists "An app with the same name already exists in one of the same categories." + :app_name app-name :app_id app-id)))) From 7d8a58a006c86d4d4be46f471726e1156566b676 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Wed, 24 Feb 2016 16:43:00 -0700 Subject: [PATCH 062/183] CORE-3358: modified the PUT /apps/:app-id endpoint to check for duplicate app names --- services/apps/src/apps/service/apps/de/edit.clj | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/services/apps/src/apps/service/apps/de/edit.clj b/services/apps/src/apps/service/apps/de/edit.clj index 9eb983a3a..3567be3c6 100644 --- a/services/apps/src/apps/service/apps/de/edit.clj +++ b/services/apps/src/apps/service/apps/de/edit.clj @@ -347,8 +347,9 @@ (defn update-app "This service will update a single-step App, including the information at its top level and the tool used by its single task, as long as the App has not been submitted for public use." - [user {app-id :id :keys [references groups] :as app}] + [user {app-id :id app-name :name :keys [references groups] :as app}] (verify-app-editable user (persistence/get-app app-id)) + (validate-app-name app-name app-id (workspace-beta-app-category-id)) (transaction (persistence/update-app app) (let [tool-id (->> app :tools first :id) From 883c6894286bf37e386256d7ca5a3c871e81cb98 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Wed, 24 Feb 2016 17:40:52 -0700 Subject: [PATCH 063/183] CORE-3358: modified the POST /admin/apps/categories endpoint to check for duplicate names --- .../src/apps/persistence/app_metadata.clj | 8 ++++++++ .../apps/service/apps/de/categorization.clj | 13 +++++++++++-- .../src/apps/service/apps/de/validation.clj | 19 +++++++++++++++---- 3 files changed, 34 insertions(+), 6 deletions(-) diff --git a/services/apps/src/apps/persistence/app_metadata.clj b/services/apps/src/apps/persistence/app_metadata.clj index 75266e00a..0f0504c27 100644 --- a/services/apps/src/apps/persistence/app_metadata.clj +++ b/services/apps/src/apps/persistence/app_metadata.clj @@ -692,6 +692,14 @@ (map (juxt :id :name)) (into {}))) +(defn get-app-name + [app-id] + (->> (select :apps + (fields :name) + (where {:id (uuidify app-id)})) + first + :name)) + (defn- user-favorite-subselect [root-category-field faves-idx] (subselect [:app_category_group :acg] diff --git a/services/apps/src/apps/service/apps/de/categorization.clj b/services/apps/src/apps/service/apps/de/categorization.clj index 109313d34..1635c6490 100644 --- a/services/apps/src/apps/service/apps/de/categorization.clj +++ b/services/apps/src/apps/service/apps/de/categorization.clj @@ -4,7 +4,9 @@ [kameleon.app-groups] [kameleon.entities] [apps.validation] - [slingshot.slingshot :only [throw+]])) + [slingshot.slingshot :only [throw+]]) + (:require [apps.persistence.app-metadata :as ap] + [apps.service.apps.de.validation :as av])) (defn- categorize-app "Associates an app with an app category." @@ -48,11 +50,18 @@ :path path})) (dorun (map (partial validate-category-id path) category-ids))) +(defn- validate-app-name + "Validates the app name to ensure that there are no apps with the same name in any of the + destination categories." + [app-id category-ids path] + (av/validate-app-name-with-path (ap/get-app-name app-id) category-ids path)) + (defn- validate-category "Validates each categorized app in the request." [{app-id :app_id category-ids :category_ids :as category} path] (validate-app-info app-id path) - (validate-category-ids category-ids path)) + (validate-category-ids category-ids path) + (validate-app-name app-id category-ids path)) (defn- validate-request-body "Validates the request body." diff --git a/services/apps/src/apps/service/apps/de/validation.clj b/services/apps/src/apps/service/apps/de/validation.clj index 6eab93868..6d2cd61b9 100644 --- a/services/apps/src/apps/service/apps/de/validation.clj +++ b/services/apps/src/apps/service/apps/de/validation.clj @@ -138,14 +138,25 @@ (verify-app-permission user app "write") (verify-app-not-public app)) +(def ^:private duplicate-app-selected-categories-msg + "An app with the same name already exists in one of the selected categories.") + +(def ^:private duplicate-app-existing-categories-msg + "An app with the same name already exists in one of the same categories.") + (defn validate-app-name "Verifies that an app with the same name doesn't already exist in any of the same app categories. The beta category is treated as an exception because it's intended to be a staging area for new apps." ([app-name category-ids] (when (seq (list-duplicate-apps app-name category-ids)) - (exists "An app with the same name already exists in one of the selected categories." - :app_name app-name :category_ids category-ids))) + (exists duplicate-app-selected-categories-msg :app_name app-name :category_ids category-ids))) ([app-name app-id beta-category-id] (when (seq (list-duplicate-apps app-name app-id beta-category-id)) - (exists "An app with the same name already exists in one of the same categories." - :app_name app-name :app_id app-id)))) + (exists duplicate-app-existing-categories-msg :app_name app-name :app_id app-id)))) + +(defn validate-app-name-with-path + "Verifies that an app with the same name doesn't already exist in any of the same app categories. The beta + category is treated as an exception because it's intended to be a staging area for new apps." + [app-name category-ids path] + (when (seq (list-duplicate-apps app-name category-ids)) + (exists duplicate-app-selected-categories-msg :app_name app-name :category_ids category-ids :path path))) From e7f2e04aca4b63f1c83088ba0868a0aca0874a1e Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Wed, 24 Feb 2016 18:12:30 -0700 Subject: [PATCH 064/183] CORE-3358: modified the PATCH /admin/apps/:app-id endpoint to check for duplicate app names --- services/apps/src/apps/service/apps/de/admin.clj | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/services/apps/src/apps/service/apps/de/admin.clj b/services/apps/src/apps/service/apps/de/admin.clj index f9b98027d..04bb62e71 100644 --- a/services/apps/src/apps/service/apps/de/admin.clj +++ b/services/apps/src/apps/service/apps/de/admin.clj @@ -2,11 +2,12 @@ (:use [korma.db :only [transaction]] [apps.persistence.app-metadata.relabel :only [update-app-labels]] [apps.util.assertions :only [assert-not-nil]] - [apps.util.config :only [workspace-public-id]] + [apps.util.config :only [workspace-public-id workspace-beta-app-category-id]] [slingshot.slingshot :only [throw+]]) (:require [clojure.tools.logging :as log] [kameleon.app-groups :as app-groups] - [apps.persistence.app-metadata :as persistence])) + [apps.persistence.app-metadata :as persistence] + [apps.service.apps.de.validation :as av])) (def ^:private max-app-category-name-len 255) @@ -90,8 +91,10 @@ (defn update-app "This service updates high-level details and labels in an App, and can mark or unmark the app as deleted or disabled in the database." - [{app-id :id :as app}] + [{app-name :name app-id :id :as app}] (validate-app-existence app-id) + (when-not (nil? app-name) + (av/validate-app-name app-name app-id (workspace-beta-app-category-id))) (transaction (if (empty? (select-keys app [:name :description :wiki_url :references :groups])) (update-app-deleted-disabled app) From 8cfb90aa454673a1d9cc73c74f14eb2db96ceb7e Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Wed, 24 Feb 2016 19:02:30 -0700 Subject: [PATCH 065/183] CORE-3358: fixed a bug that prevented apps from being updated when their names were not changed --- .../src/apps/persistence/app_metadata.clj | 14 ++++++----- .../apps/service/apps/de/categorization.clj | 5 ++-- .../apps/src/apps/service/apps/de/edit.clj | 5 ++-- .../src/apps/service/apps/de/validation.clj | 25 +++++++++---------- 4 files changed, 26 insertions(+), 23 deletions(-) diff --git a/services/apps/src/apps/persistence/app_metadata.clj b/services/apps/src/apps/persistence/app_metadata.clj index 0f0504c27..bf9f9f416 100644 --- a/services/apps/src/apps/persistence/app_metadata.clj +++ b/services/apps/src/apps/persistence/app_metadata.clj @@ -721,13 +721,14 @@ :l.id [not= (user-favorite-subselect :w.root_category_id faves-idx)]}))))) (defn- list-duplicate-apps* - [app-name category-id-set] + [app-name app-id category-id-set] (select [:apps :a] (fields :a.id :a.name :a.description) (join [:app_category_app :aca] {:a.id :aca.app_id}) (where {(raw "trim(both from a.name)") (string/trim app-name) :a.deleted false - :aca.app_category_id [in category-id-set]}))) + :aca.app_category_id [in category-id-set] + :a.id [not= app-id]}))) (defn- app-category-id-subselect [app-id beta-app-category-id] @@ -738,7 +739,8 @@ (defn list-duplicate-apps "List apps with the same name that exist in the same category as the new app." - ([app-name category-ids] - (list-duplicate-apps* app-name category-ids)) - ([app-name app-id beta-app-category-id] - (list-duplicate-apps* app-name (app-category-id-subselect app-id beta-app-category-id)))) + [app-name app-id beta-app-category-id category-ids] + (->> (if (seq category-ids) + (remove (partial = beta-app-category-id) category-ids) + (app-category-id-subselect app-id beta-app-category-id)) + (list-duplicate-apps* app-name app-id))) diff --git a/services/apps/src/apps/service/apps/de/categorization.clj b/services/apps/src/apps/service/apps/de/categorization.clj index 1635c6490..a520180e6 100644 --- a/services/apps/src/apps/service/apps/de/categorization.clj +++ b/services/apps/src/apps/service/apps/de/categorization.clj @@ -6,7 +6,8 @@ [apps.validation] [slingshot.slingshot :only [throw+]]) (:require [apps.persistence.app-metadata :as ap] - [apps.service.apps.de.validation :as av])) + [apps.service.apps.de.validation :as av] + [apps.util.config :as cfg])) (defn- categorize-app "Associates an app with an app category." @@ -54,7 +55,7 @@ "Validates the app name to ensure that there are no apps with the same name in any of the destination categories." [app-id category-ids path] - (av/validate-app-name-with-path (ap/get-app-name app-id) category-ids path)) + (av/validate-app-name (ap/get-app-name app-id) app-id (cfg/workspace-beta-app-category-id) category-ids path)) (defn- validate-category "Validates each categorized app in the request." diff --git a/services/apps/src/apps/service/apps/de/edit.clj b/services/apps/src/apps/service/apps/de/edit.clj index 3567be3c6..d84e7224c 100644 --- a/services/apps/src/apps/service/apps/de/edit.clj +++ b/services/apps/src/apps/service/apps/de/edit.clj @@ -386,9 +386,10 @@ (defn add-app "This service will add a single-step App, including the information at its top level." [{:keys [username] :as user} {app-name :name :keys [references groups] :as app}] - (validate-app-name app-name [(get-user-subcategory username (workspace-dev-app-category-index))]) (transaction - (let [app-id (:id (persistence/add-app app)) + (let [cat-id (get-user-subcategory username (workspace-dev-app-category-index)) + _ (validate-app-name app-name nil (workspace-beta-app-category-id) [cat-id]) + app-id (:id (persistence/add-app app)) tool-id (->> app :tools first :id) task-id (-> (assoc app :id app-id :tool_id tool-id) (add-single-step-task) diff --git a/services/apps/src/apps/service/apps/de/validation.clj b/services/apps/src/apps/service/apps/de/validation.clj index 6d2cd61b9..e9e511d4e 100644 --- a/services/apps/src/apps/service/apps/de/validation.clj +++ b/services/apps/src/apps/service/apps/de/validation.clj @@ -147,16 +147,15 @@ (defn validate-app-name "Verifies that an app with the same name doesn't already exist in any of the same app categories. The beta category is treated as an exception because it's intended to be a staging area for new apps." - ([app-name category-ids] - (when (seq (list-duplicate-apps app-name category-ids)) - (exists duplicate-app-selected-categories-msg :app_name app-name :category_ids category-ids))) - ([app-name app-id beta-category-id] - (when (seq (list-duplicate-apps app-name app-id beta-category-id)) - (exists duplicate-app-existing-categories-msg :app_name app-name :app_id app-id)))) - -(defn validate-app-name-with-path - "Verifies that an app with the same name doesn't already exist in any of the same app categories. The beta - category is treated as an exception because it's intended to be a staging area for new apps." - [app-name category-ids path] - (when (seq (list-duplicate-apps app-name category-ids)) - (exists duplicate-app-selected-categories-msg :app_name app-name :category_ids category-ids :path path))) + ([app-name app-id beta-app-category-id] + (validate-app-name app-name app-id beta-app-category-id nil)) + ([app-name app-id beta-app-category-id category-ids] + (when (seq (list-duplicate-apps app-name app-id beta-app-category-id category-ids)) + (if (seq category-ids) + (exists duplicate-app-selected-categories-msg :app_name app-name :category_ids category-ids) + (exists duplicate-app-existing-categories-msg :app_name app-name :app_id app-id)))) + ([app-name app-id beta-app-category-id category-ids path] + (when (seq (list-duplicate-apps app-name app-id beta-app-category-id category-ids)) + (if (seq category-ids) + (exists duplicate-app-selected-categories-msg :app_name app-name :category_ids category-ids :path path) + (exists duplicate-app-existing-categories-msg :app_name app-name :app_id app-id :path path))))) From bf0b06c497d6f94b926ab944d10bf7496dadc486 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Thu, 25 Feb 2016 13:49:23 -0700 Subject: [PATCH 066/183] CORE-3358: implemented some code review suggestsions --- libs/kameleon/src/kameleon/util.clj | 7 +++++++ services/apps/src/apps/persistence/app_metadata.clj | 9 +++++---- services/apps/src/apps/service/apps/de/admin.clj | 6 +++--- 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/libs/kameleon/src/kameleon/util.clj b/libs/kameleon/src/kameleon/util.clj index 0662e9d91..250e00bb5 100644 --- a/libs/kameleon/src/kameleon/util.clj +++ b/libs/kameleon/src/kameleon/util.clj @@ -6,3 +6,10 @@ [desc query] (log/debug desc (sql-only (select query))) query) + +(defn normalize-string + "Normalizes a string for use in comparisons. Comparisons in which this function is used on both sides will be + case-insensitive with leading and trailing whitespace removed and consecutive whitespace collapsed to a single + space." + [s] + (sqlfn lower (sqlfn regexp_replace (sqlfn trim s) "\\s+" " "))) diff --git a/services/apps/src/apps/persistence/app_metadata.clj b/services/apps/src/apps/persistence/app_metadata.clj index bf9f9f416..7c18c468b 100644 --- a/services/apps/src/apps/persistence/app_metadata.clj +++ b/services/apps/src/apps/persistence/app_metadata.clj @@ -1,6 +1,7 @@ (ns apps.persistence.app-metadata "Persistence layer for app metadata." (:use [kameleon.entities] + [kameleon.util :only [normalize-string]] [kameleon.uuids :only [uuidify]] [korma.core :exclude [update]] [korma.db :only [transaction]] @@ -725,10 +726,10 @@ (select [:apps :a] (fields :a.id :a.name :a.description) (join [:app_category_app :aca] {:a.id :aca.app_id}) - (where {(raw "trim(both from a.name)") (string/trim app-name) - :a.deleted false - :aca.app_category_id [in category-id-set] - :a.id [not= app-id]}))) + (where {(normalize-string :a.name) (normalize-string app-name) + :a.deleted false + :aca.app_category_id [in category-id-set] + :a.id [not= app-id]}))) (defn- app-category-id-subselect [app-id beta-app-category-id] diff --git a/services/apps/src/apps/service/apps/de/admin.clj b/services/apps/src/apps/service/apps/de/admin.clj index 04bb62e71..39c6a5839 100644 --- a/services/apps/src/apps/service/apps/de/admin.clj +++ b/services/apps/src/apps/service/apps/de/admin.clj @@ -92,10 +92,10 @@ "This service updates high-level details and labels in an App, and can mark or unmark the app as deleted or disabled in the database." [{app-name :name app-id :id :as app}] - (validate-app-existence app-id) - (when-not (nil? app-name) - (av/validate-app-name app-name app-id (workspace-beta-app-category-id))) (transaction + (validate-app-existence app-id) + (when-not (nil? app-name) + (av/validate-app-name app-name app-id (workspace-beta-app-category-id))) (if (empty? (select-keys app [:name :description :wiki_url :references :groups])) (update-app-deleted-disabled app) (update-app-details app)))) From 7fff047943c332a57c4ea8e4894c0a806fab01f1 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Thu, 25 Feb 2016 15:29:15 -0700 Subject: [PATCH 067/183] CORE-3358: fixed a couple of potential race conditions --- services/apps/src/apps/service/apps/de/categorization.clj | 5 +++-- services/apps/src/apps/service/apps/de/edit.clj | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/services/apps/src/apps/service/apps/de/categorization.clj b/services/apps/src/apps/service/apps/de/categorization.clj index a520180e6..ffa53a17d 100644 --- a/services/apps/src/apps/service/apps/de/categorization.clj +++ b/services/apps/src/apps/service/apps/de/categorization.clj @@ -73,5 +73,6 @@ (defn categorize-apps "A service that categorizes one or more apps in the database." [{:keys [categories] :as body}] - (validate-request-body body) - (transaction (dorun (map categorize-app categories)))) + (transaction + (validate-request-body body) + (dorun (map categorize-app categories)))) diff --git a/services/apps/src/apps/service/apps/de/edit.clj b/services/apps/src/apps/service/apps/de/edit.clj index d84e7224c..e45076bc6 100644 --- a/services/apps/src/apps/service/apps/de/edit.clj +++ b/services/apps/src/apps/service/apps/de/edit.clj @@ -349,8 +349,8 @@ tool used by its single task, as long as the App has not been submitted for public use." [user {app-id :id app-name :name :keys [references groups] :as app}] (verify-app-editable user (persistence/get-app app-id)) - (validate-app-name app-name app-id (workspace-beta-app-category-id)) (transaction + (validate-app-name app-name app-id (workspace-beta-app-category-id)) (persistence/update-app app) (let [tool-id (->> app :tools first :id) app-task (->> (get-app-details app-id) :tasks first) @@ -462,8 +462,8 @@ "This service allows labels to be updated in any app, whether or not the app has been submitted for public use." [user {app-name :name app-id :id :as body}] - (validate-app-name app-name app-id (workspace-beta-app-category-id)) (let [app (persistence/get-app app-id)] + (validate-app-name app-name app-id (workspace-beta-app-category-id)) (when-not (user-owns-app? user app) (verify-app-permission user app "write"))) (transaction (persistence/update-app-labels body)) From 50e1f105e2fab77b2b0d064bad11d15f91c34406 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Thu, 25 Feb 2016 15:49:32 -0700 Subject: [PATCH 068/183] CORE-3358: fixed yet another editing bug --- services/apps/src/apps/service/apps/de/edit.clj | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/services/apps/src/apps/service/apps/de/edit.clj b/services/apps/src/apps/service/apps/de/edit.clj index e45076bc6..23c55ee8b 100644 --- a/services/apps/src/apps/service/apps/de/edit.clj +++ b/services/apps/src/apps/service/apps/de/edit.clj @@ -463,8 +463,9 @@ for public use." [user {app-name :name app-id :id :as body}] (let [app (persistence/get-app app-id)] - (validate-app-name app-name app-id (workspace-beta-app-category-id)) (when-not (user-owns-app? user app) (verify-app-permission user app "write"))) - (transaction (persistence/update-app-labels body)) + (transaction + (validate-app-name app-name app-id (workspace-beta-app-category-id)) + (persistence/update-app-labels body)) (get-app-ui user app-id)) From de1cd7914fba8acd4cef68b191cc37c94902c3ed Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Thu, 25 Feb 2016 17:44:20 -0700 Subject: [PATCH 069/183] CORE-7180: A partial implementation of using Agave app search for the analysis listing endpoints. This still needs to be tested --- libs/mescal/src/mescal/agave_de_v2.clj | 6 ++++-- .../src/mescal/agave_de_v2/app_listings.clj | 17 ++++++++++------- libs/mescal/src/mescal/agave_v2.clj | 9 ++++++--- libs/mescal/src/mescal/core.clj | 5 ++++- libs/mescal/src/mescal/de.clj | 4 +++- services/apps/src/apps/service/apps/agave.clj | 8 ++++---- .../src/apps/service/apps/agave/listings.clj | 2 +- 7 files changed, 32 insertions(+), 19 deletions(-) diff --git a/libs/mescal/src/mescal/agave_de_v2.clj b/libs/mescal/src/mescal/agave_de_v2.clj index c6d2ce70c..f281780a3 100644 --- a/libs/mescal/src/mescal/agave_de_v2.clj +++ b/libs/mescal/src/mescal/agave_de_v2.clj @@ -14,8 +14,10 @@ (into {} (map (juxt :id :status) (.listSystems agave)))) (defn list-apps - [agave jobs-enabled?] - (app-listings/list-apps agave (get-system-statuses agave) jobs-enabled?)) + ([agave jobs-enabled?] + (app-listings/list-apps agave (get-system-statuses agave) jobs-enabled?)) + ([agave jbos-enabled? app-ids] + (app-listings/list-apps agave (get-system-statuses agave) jobs-enabled? app-ids))) (defn- app-matches? [search-term app] diff --git a/libs/mescal/src/mescal/agave_de_v2/app_listings.clj b/libs/mescal/src/mescal/agave_de_v2/app_listings.clj index ada030b46..c9366a278 100644 --- a/libs/mescal/src/mescal/agave_de_v2/app_listings.clj +++ b/libs/mescal/src/mescal/agave_de_v2/app_listings.clj @@ -38,11 +38,14 @@ :permission "read" :wiki_url ""})) +(defn- format-app-listing-response + [listing statuses jobs-enabled?] + (assoc (hpc-app-group) + :apps (map (partial format-app-listing statuses jobs-enabled?) listing) + :app_count (count listing))) + (defn list-apps - [agave statuses jobs-enabled?] - (let [listing (.listApps agave) - total (count listing) - listing (map (partial format-app-listing statuses jobs-enabled?) listing)] - (assoc (hpc-app-group) - :apps listing - :app_count total))) + ([agave statuses jobs-enabled?] + (format-app-listing-response (.listApps agave) statuses jobs-enabled?)) + ([agave statuses jobs-enabled? app-ids] + (format-app-listing-response (.listApps agave app-ids) statuses jobs-enabled?))) diff --git a/libs/mescal/src/mescal/agave_v2.clj b/libs/mescal/src/mescal/agave_v2.clj index 9134427a2..14845deca 100644 --- a/libs/mescal/src/mescal/agave_v2.clj +++ b/libs/mescal/src/mescal/agave_v2.clj @@ -11,7 +11,7 @@ [mescal.util :as util]) (:import [java.io IOException])) -; FIXME Update apps service exception handling when this exception handling is updated + ; FIXME Update apps service exception handling when this exception handling is updated (defn- agave-unavailable [e] (let [msg "Agave appears to be unavailable at this time"] @@ -95,8 +95,11 @@ (agave-get token-info-fn timeout (curl/url base-url "/systems/v2/" system-name))) (defn list-apps - [base-url token-info-fn timeout page-len] - (agave-get token-info-fn timeout (curl/url base-url "/apps/v2/") {:page-len page-len})) + ([base-url token-info-fn timeout page-len] + (agave-get token-info-fn timeout (curl/url base-url "/apps/v2/") {:page-len page-len})) + ([base-url token-info-fn timeout page-len app-ids] + (->> {:page-len page-len :id.in (string/join "," app-ids)} + (agave-get token-info-fn timeout (curl/url base-url "/apps/v2/"))))) (defn get-app [base-url token-info-fn timeout app-id] diff --git a/libs/mescal/src/mescal/core.clj b/libs/mescal/src/mescal/core.clj index 4c1c85405..1492d52af 100644 --- a/libs/mescal/src/mescal/core.clj +++ b/libs/mescal/src/mescal/core.clj @@ -5,7 +5,7 @@ "A client for the Agave API." (listSystems [_]) (getSystemInfo [_ system-name]) - (listApps [_]) + (listApps [_] [_ app-ids]) (getApp [_ app-id]) (submitJob [_ submission]) (listJobs [_] [_ job-ids]) @@ -29,6 +29,9 @@ (listApps [_] (v2/check-access-token token-info-fn timeout) (v2/list-apps base-url token-info-fn timeout page-len)) + (listApps [_ app-ids] + (v2/check-access-token token-info-fn timeout) + (v2/list-apps base-url token-info-fn timeout page-len app-ids)) (getApp [_ app-id] (v2/check-access-token token-info-fn timeout) (v2/get-app base-url token-info-fn timeout app-id)) diff --git a/libs/mescal/src/mescal/de.clj b/libs/mescal/src/mescal/de.clj index ad2c1bc72..f5f2be9fa 100644 --- a/libs/mescal/src/mescal/de.clj +++ b/libs/mescal/src/mescal/de.clj @@ -5,7 +5,7 @@ (defprotocol DeAgaveClient "An Agave client with customizations that are specific to the discovery environment." (hpcAppGroup [_]) - (listApps [_]) + (listApps [_] [_ app-ids]) (searchApps [_ search-term]) (getApp [_ app-id]) (getAppDetails [_ app-id]) @@ -30,6 +30,8 @@ (v2/hpc-app-group)) (listApps [_] (v2/list-apps agave jobs-enabled?)) + (listApps [_ app-ids] + (v2/list-apps agave jobs-enabled? app-ids)) (searchApps [_ search-term] (v2/search-apps agave jobs-enabled? search-term)) (getApp [_ app-id] diff --git a/services/apps/src/apps/service/apps/agave.clj b/services/apps/src/apps/service/apps/agave.clj index 7c4f128e2..47c4c9c2a 100644 --- a/services/apps/src/apps/service/apps/agave.clj +++ b/services/apps/src/apps/service/apps/agave.clj @@ -81,10 +81,10 @@ (job-listings/list-jobs self user params)) (loadAppTables [_ app-ids] - (if (and (user-has-access-token?) - (some (complement util/uuid?) app-ids)) - (listings/load-app-tables agave) - [])) + (let [agave-app-ids (remove util/uuid? app-ids)] + (if (and (seq agave-app-ids) (user-has-access-token?)) + (listings/load-app-tables agave agave-app-ids) + []))) (submitJob [this submission] (when-not (util/uuid? (:app_id submission)) diff --git a/services/apps/src/apps/service/apps/agave/listings.clj b/services/apps/src/apps/service/apps/agave/listings.clj index afa272014..bf48a936d 100644 --- a/services/apps/src/apps/service/apps/agave/listings.clj +++ b/services/apps/src/apps/service/apps/agave/listings.clj @@ -27,7 +27,7 @@ nil))) (defn load-app-tables - [agave] + [agave app-ids] (try+ (->> (.listApps agave) (:apps) From 8563d6198d8831535858c1f0882287941ce1e381 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Fri, 26 Feb 2016 16:59:25 -0700 Subject: [PATCH 070/183] CORE-7180: created an easier way to test mescal in the REPL --- libs/mescal/src/mescal/test.clj | 79 +++++++++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) create mode 100644 libs/mescal/src/mescal/test.clj diff --git a/libs/mescal/src/mescal/test.clj b/libs/mescal/src/mescal/test.clj new file mode 100644 index 000000000..75b400850 --- /dev/null +++ b/libs/mescal/src/mescal/test.clj @@ -0,0 +1,79 @@ +(ns mescal.test + (:require [authy.core :as authy] + [cemerick.url :as curl] + [mescal.core :as mc] + [mescal.de :as md])) + +(defn- get-agave-base-url [] + (System/getenv "AGAVE_BASE_URL")) + +(defn- get-agave-storage-system [] + (System/getenv "AGAVE_STORAGE_SYSTEM")) + +(defn- get-api-key [] + (System/getenv "AGAVE_API_KEY")) + +(defn- get-api-secret [] + (System/getenv "AGAVE_API_SECRET")) + +(defn- prompt-for-username [] + (print "username: ") + (flush) + (read-line)) + +(defn- prompt-for-password [] + (print "password: ") + (flush) + (.. System console readPassword)) + +(defn- get-username [] + (or (System/getenv "IPLANT_CAS_SHORT") + (prompt-for-username))) + +(defn- get-password [] + (or (System/getenv "IPLANT_CAS_PASS") + (prompt-for-password))) + +(defn- get-oauth-info [base-url api-key api-secret] + {:api-name "agave" + :client-key api-key + :client-secret api-secret + :token-uri (str (curl/url base-url "oauth2" "token"))}) + +(defn- get-token [base-url api-key api-secret username password] + (let [oauth-info (get-oauth-info base-url api-key api-secret)] + (authy/get-access-token-for-credentials oauth-info username password))) + +(defn get-test-agave-client + ([] + (get-test-agave-client {})) + ([agave-params] + (get-test-agave-client agave-params (get-username))) + ([agave-params username] + (get-test-agave-client agave-params username (get-password))) + ([agave-params username password] + (get-test-agave-client agave-params username password (get-api-key) (get-api-secret))) + ([agave-params username password api-key api-secret] + (let [base-url (get-agave-base-url) + storage-system (get-agave-storage-system) + token-info (get-token base-url api-key api-secret username password) + agave-params (flatten (seq agave-params))] + (apply mc/agave-client-v2 base-url storage-system (constantly token-info) agave-params)))) + +(defn get-test-de-agave-client + ([] + (get-test-de-agave-client {})) + ([agave-params] + (get-test-de-agave-client agave-params true)) + ([agave-params jobs-enabled?] + (get-test-de-agave-client agave-params jobs-enabled? (get-username))) + ([agave-params jobs-enabled? username] + (get-test-de-agave-client agave-params jobs-enabled? username (get-password))) + ([agave-params jobs-enabled? username password] + (get-test-de-agave-client agave-params jobs-enabled? username password (get-api-key) (get-api-secret))) + ([agave-params jobs-enabled? username password api-key api-secret] + (let [base-url (get-agave-base-url) + storage-system (get-agave-storage-system) + token-info (get-token base-url api-key api-secret username password) + agave-params (flatten (seq agave-params))] + (apply md/de-agave-client-v2 base-url storage-system (constantly token-info) jobs-enabled? agave-params)))) From 19f41af6444d5d3bac9359571ece3198723dc2bb Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Fri, 26 Feb 2016 17:01:01 -0700 Subject: [PATCH 071/183] CORE-7180: added a limit for the maximum number of items in a search query parameter that is sent to Agave --- libs/mescal/src/mescal/agave_de_v2.clj | 2 +- libs/mescal/src/mescal/agave_v2.clj | 12 ++++++------ libs/mescal/src/mescal/core.clj | 15 +++++++++------ 3 files changed, 16 insertions(+), 13 deletions(-) diff --git a/libs/mescal/src/mescal/agave_de_v2.clj b/libs/mescal/src/mescal/agave_de_v2.clj index f281780a3..c684f7c40 100644 --- a/libs/mescal/src/mescal/agave_de_v2.clj +++ b/libs/mescal/src/mescal/agave_de_v2.clj @@ -16,7 +16,7 @@ (defn list-apps ([agave jobs-enabled?] (app-listings/list-apps agave (get-system-statuses agave) jobs-enabled?)) - ([agave jbos-enabled? app-ids] + ([agave jobs-enabled? app-ids] (app-listings/list-apps agave (get-system-statuses agave) jobs-enabled? app-ids))) (defn- app-matches? diff --git a/libs/mescal/src/mescal/agave_v2.clj b/libs/mescal/src/mescal/agave_v2.clj index 14845deca..d93d00e4a 100644 --- a/libs/mescal/src/mescal/agave_v2.clj +++ b/libs/mescal/src/mescal/agave_v2.clj @@ -11,7 +11,7 @@ [mescal.util :as util]) (:import [java.io IOException])) - ; FIXME Update apps service exception handling when this exception handling is updated +; FIXME Update apps service exception handling when this exception handling is updated (defn- agave-unavailable [e] (let [msg "Agave appears to be unavailable at this time"] @@ -60,19 +60,19 @@ :socket-timeout timeout})))) (defn- agave-get-paged - [token-info-fn timeout page-len url] + [token-info-fn timeout page-len url & [params]] (->> (iterate (partial + page-len) 0) - (map (partial hash-map :limit page-len :offset)) + (map (partial assoc (or params {}) :limit page-len :offset)) (map (partial agave-get* token-info-fn timeout url)) (take-upto (comp (partial > page-len) count)) (apply concat))) (defn agave-get - [token-info-fn timeout url & [{:keys [page-len]}]] + [token-info-fn timeout url & [{:keys [page-len] :as params}]] (set-ext-svc-tag! "agave") (if page-len - (agave-get-paged token-info-fn timeout page-len url) - (agave-get* token-info-fn timeout url))) + (agave-get-paged token-info-fn timeout page-len url (dissoc params :page-len)) + (agave-get* token-info-fn timeout url params))) (defn agave-post [token-info-fn timeout url body] diff --git a/libs/mescal/src/mescal/core.clj b/libs/mescal/src/mescal/core.clj index 1492d52af..3f3f4636f 100644 --- a/libs/mescal/src/mescal/core.clj +++ b/libs/mescal/src/mescal/core.clj @@ -18,7 +18,7 @@ (agaveFilePath [_ file-url]) (storageSystem [_])) -(deftype AgaveClientV2 [base-url storage-system token-info-fn timeout page-len] +(deftype AgaveClientV2 [base-url storage-system token-info-fn timeout page-len max-query-items] AgaveClient (listSystems [_] (v2/check-access-token token-info-fn timeout) @@ -31,7 +31,9 @@ (v2/list-apps base-url token-info-fn timeout page-len)) (listApps [_ app-ids] (v2/check-access-token token-info-fn timeout) - (v2/list-apps base-url token-info-fn timeout page-len app-ids)) + (if (> (count app-ids) max-query-items) + (v2/list-apps base-url token-info-fn timeout page-len) + (v2/list-apps base-url token-info-fn timeout page-len app-ids))) (getApp [_ app-id] (v2/check-access-token token-info-fn timeout) (v2/get-app base-url token-info-fn timeout app-id)) @@ -69,8 +71,9 @@ storage-system)) (defn agave-client-v2 - [base-url storage-system token-info-fn & {:keys [timeout page-len] - :or {timeout 5000 - page-len 100}}] + [base-url storage-system token-info-fn & {:keys [timeout page-len max-query-items] + :or {timeout 5000 + page-len 100 + max-query-items 50}}] (let [token-info-wrapper-fn (memoize #(ref (token-info-fn)))] - (AgaveClientV2. base-url storage-system token-info-wrapper-fn timeout page-len))) + (AgaveClientV2. base-url storage-system token-info-wrapper-fn timeout page-len max-query-items))) From 6b79567b91d8da9fc5662ee9e154b850539d2458 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Fri, 26 Feb 2016 18:00:10 -0700 Subject: [PATCH 072/183] CORE-7180: fixed a missed code modification --- services/apps/src/apps/service/apps/agave/listings.clj | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/apps/src/apps/service/apps/agave/listings.clj b/services/apps/src/apps/service/apps/agave/listings.clj index bf48a936d..5c6a1f44c 100644 --- a/services/apps/src/apps/service/apps/agave/listings.clj +++ b/services/apps/src/apps/service/apps/agave/listings.clj @@ -29,7 +29,7 @@ (defn load-app-tables [agave app-ids] (try+ - (->> (.listApps agave) + (->> (.listApps agave app-ids) (:apps) (map (juxt :id identity)) (into {}) From 71d793629f55b329b880ac5a098abb3dd70c167a Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Fri, 26 Feb 2016 18:23:45 -0700 Subject: [PATCH 073/183] CORE-7180: removed the optional username and password prompts from mescal.test since doesn't work in the REPL --- libs/mescal/src/mescal/test.clj | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/libs/mescal/src/mescal/test.clj b/libs/mescal/src/mescal/test.clj index 75b400850..403638813 100644 --- a/libs/mescal/src/mescal/test.clj +++ b/libs/mescal/src/mescal/test.clj @@ -16,23 +16,11 @@ (defn- get-api-secret [] (System/getenv "AGAVE_API_SECRET")) -(defn- prompt-for-username [] - (print "username: ") - (flush) - (read-line)) - -(defn- prompt-for-password [] - (print "password: ") - (flush) - (.. System console readPassword)) - (defn- get-username [] - (or (System/getenv "IPLANT_CAS_SHORT") - (prompt-for-username))) + (System/getenv "IPLANT_CAS_SHORT")) (defn- get-password [] - (or (System/getenv "IPLANT_CAS_PASS") - (prompt-for-password))) + (System/getenv "IPLANT_CAS_PASS")) (defn- get-oauth-info [base-url api-key api-secret] {:api-name "agave" From 80dd82a2bedea9862907740a2fe23ccb60a0a37c Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Mon, 29 Feb 2016 16:17:26 -0700 Subject: [PATCH 074/183] CORE-3599: updated the notification agent documentation in a couple of places --- services/NotificationAgent/README.markdown | 40 ++++++++++++++++++++-- 1 file changed, 37 insertions(+), 3 deletions(-) diff --git a/services/NotificationAgent/README.markdown b/services/NotificationAgent/README.markdown index 2c57daf2b..c24b8c9ec 100644 --- a/services/NotificationAgent/README.markdown +++ b/services/NotificationAgent/README.markdown @@ -368,7 +368,22 @@ Marking a notification as seen prevents it from being returned by the `/unseen-messages` endpoint. The intent is for this endpoint to be called when the user has seen a notification for the first time. This services requires a `user` query parameter that provides the name of the user who is marking these messages as -seen. This service accepts a request body in the following format: +seen. This service accepts the following query parameters: + + + + + + + + + + + + +
NameDescriptionRequired/Optional
userThe name of the user to mark notifications as seen for.Required
+ +This service accepts a request body in the following format. ```json { @@ -403,6 +418,9 @@ $ curl -sd ' } ``` +Note that the UUIDs provided in the request body must be obtained from the +message -> id element of the notification the user wishes to mark as seen. + ### Marking All Notifications as Seen * Endpoint: POST /mark-all-seen @@ -445,8 +463,24 @@ $ curl -sd ' "Deleting" a notification entails marking the notification as deleted in the notification database so that it won't be returned by either the `/messages` -service or the `/unseen-messages` service. This service accepts a request body -in the following format: +service or the `/unseen-messages` service. This service accepts the following +query parameters: + + + + + + + + + + + + +
NameDescriptionRequired/Optional
userThe name of the user to delete notifications for.Required
+ + +This service accepts a request body in the following format: ```json { From fb234005598f8d68cd9b95f3edeeb9979c613528 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Mon, 29 Feb 2016 17:53:21 -0700 Subject: [PATCH 075/183] CORE-4531: added the debug flag to the response body the GET /analyses/:id/relaunch-info endpoint in apps --- services/apps/src/apps/routes/domain/app.clj | 2 ++ services/apps/src/apps/service/apps/jobs/params.clj | 13 +++++++------ 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/services/apps/src/apps/routes/domain/app.clj b/services/apps/src/apps/routes/domain/app.clj index 25cf21879..50c71fd79 100644 --- a/services/apps/src/apps/routes/domain/app.clj +++ b/services/apps/src/apps/routes/domain/app.clj @@ -17,6 +17,7 @@ (def AppPublicParam (describe Boolean "Whether the App has been published and is viewable by all users")) +(def OptionalDebugKey (optional-key :debug)) (def OptionalGroupsKey (optional-key :groups)) (def OptionalParametersKey (optional-key :parameters)) (def OptionalParameterArgumentsKey (optional-key :arguments)) @@ -243,6 +244,7 @@ :label (describe String "An alias for the App's name") :deleted AppDeletedParam :disabled AppDisabledParam + OptionalDebugKey (describe Boolean "True if input files should be retained for the job by default.") OptionalGroupsKey (describe [AppGroupJobView] GroupListDocs))) (defschema AppDetailCategory diff --git a/services/apps/src/apps/service/apps/jobs/params.clj b/services/apps/src/apps/service/apps/jobs/params.clj index 672f336b3..7beb42db0 100644 --- a/services/apps/src/apps/service/apps/jobs/params.clj +++ b/services/apps/src/apps/service/apps/jobs/params.clj @@ -6,13 +6,13 @@ [apps.persistence.app-metadata :as ap] [apps.service.util :as util])) -(defn- get-job-submission-config +(defn- get-job-submission [job] (let [submission (:submission job)] (when-not submission (throw+ {:type :clojure-commons.exception/not-found :error "Job submission values could not be found."})) - (:config (cheshire/decode (.getValue submission) true)))) + (cheshire/decode (.getValue submission) true))) (defn- load-mapped-params [app-id] @@ -98,7 +98,7 @@ (defn get-parameter-values [apps-client {:keys [app-id] :as job}] - (let [config (get-job-submission-config job)] + (let [config (:config (get-job-submission job))] (->> (.getParamDefinitions apps-client app-id) (remove-mapped-params app-id) (remove omit-param?) @@ -127,6 +127,7 @@ (defn get-job-relaunch-info [apps-client job] - (update-in (.getAppJobView apps-client (:app-id job)) - [:groups] - (partial update-app-groups (get-job-submission-config job)))) + (let [submission (get-job-submission job)] + (update-in (assoc (.getAppJobView apps-client (:app-id job)) :debug (:debug submission false)) + [:groups] + (partial update-app-groups (:config submission))))) From ea5952834060636554f010c8718fd2a4643ee537 Mon Sep 17 00:00:00 2001 From: Jonathan Strootman Date: Tue, 1 Mar 2016 16:04:53 -0700 Subject: [PATCH 076/183] Changed ansible `sudo` to `become` for ansible 2.0 --- ansible/db-migrations.yaml | 2 +- ansible/playbooks/de-jex.yaml | 2 +- ansible/playbooks/de-porklock.yaml | 2 +- ansible/playbooks/de-pull-images.yaml | 56 ++++++------ ansible/playbooks/de-rm-containers.yml | 100 +++++++++++----------- ansible/playbooks/de-services-common.yml | 2 +- ansible/playbooks/de-start-containers.yml | 46 +++++----- ansible/playbooks/de-stop-containers.yml | 46 +++++----- 8 files changed, 128 insertions(+), 128 deletions(-) diff --git a/ansible/db-migrations.yaml b/ansible/db-migrations.yaml index 0b431c445..70660c418 100644 --- a/ansible/db-migrations.yaml +++ b/ansible/db-migrations.yaml @@ -3,7 +3,7 @@ hosts: db-proxy:&systems tags: - databases - sudo: False + become: False roles: - role: util-notify-chat diff --git a/ansible/playbooks/de-jex.yaml b/ansible/playbooks/de-jex.yaml index 25f9440e5..91c38b65e 100644 --- a/ansible/playbooks/de-jex.yaml +++ b/ansible/playbooks/de-jex.yaml @@ -1,7 +1,7 @@ --- - name: Configure JEX hosts: jex:&systems - sudo: yes + become: yes tags: - jex pre_tasks: diff --git a/ansible/playbooks/de-porklock.yaml b/ansible/playbooks/de-porklock.yaml index a4c42c04e..c6be9dce7 100644 --- a/ansible/playbooks/de-porklock.yaml +++ b/ansible/playbooks/de-porklock.yaml @@ -1,7 +1,7 @@ --- - name: Deploy porklock hosts: condor:&systems - sudo: True + become: True gather_facts: false tags: - porklock diff --git a/ansible/playbooks/de-pull-images.yaml b/ansible/playbooks/de-pull-images.yaml index 52ae3785a..a7d932ee2 100644 --- a/ansible/playbooks/de-pull-images.yaml +++ b/ansible/playbooks/de-pull-images.yaml @@ -1,7 +1,7 @@ --- # - name: Update iplant-data # hosts: docker-ready:&systems -# sudo: true +# become: true # gather_facts: false # tags: # - services @@ -13,7 +13,7 @@ - name: Update anon-files hosts: anon-files:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -29,7 +29,7 @@ - name: Update apps hosts: apps:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -44,7 +44,7 @@ - name: Update iplant-data-apps hosts: docker-ready:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -56,7 +56,7 @@ - name: Update clockwork hosts: clockwork:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -71,7 +71,7 @@ - name: Update clm hosts: condor-log-monitor:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -86,7 +86,7 @@ - name: Update data-info hosts: data-info:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -101,7 +101,7 @@ - name: Update dewey hosts: dewey:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -118,7 +118,7 @@ - name: Update de-ui hosts: ui:&systems - sudo: true + become: true gather_facts: false tags: - ui @@ -132,7 +132,7 @@ - name: Update iplant-data-de-ui hosts: docker-ready:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -144,7 +144,7 @@ - name: Update de-ui-nginx hosts: ui:&systems - sudo: true + become: true gather_facts: false tags: - ui @@ -159,7 +159,7 @@ - name: Update iplant-data-de-ui-nginx hosts: docker-ready:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -171,7 +171,7 @@ - name: Update exim-sender hosts: exim-sender:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -183,7 +183,7 @@ - name: Update info-typer hosts: info-typer:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -200,7 +200,7 @@ - name: Update infosquito hosts: infosquito:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -217,7 +217,7 @@ - name: Update iplant-email hosts: iplant-email:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -232,7 +232,7 @@ - name: Update iplant-groups hosts: iplant-groups:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -247,7 +247,7 @@ - name: Update jex-events hosts: jexevents:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -262,7 +262,7 @@ - name: Update kifshare hosts: kifshare:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -277,7 +277,7 @@ - name: Update metadata hosts: metadata:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -292,7 +292,7 @@ - name: Update monkey hosts: monkey:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -309,7 +309,7 @@ - name: Update notification-agent hosts: notificationagent:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -324,7 +324,7 @@ - name: Update saved-searches hosts: saved-searches:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -339,7 +339,7 @@ - name: Update terrain hosts: terrain:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -354,7 +354,7 @@ - name: Update iplant-data-terrain hosts: docker-ready:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -366,7 +366,7 @@ - name: Update tree-urls hosts: tree-urls:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -381,7 +381,7 @@ - name: Update user-preferences hosts: user-preferences:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -396,7 +396,7 @@ - name: Update user-sessions hosts: user-sessions:&systems - sudo: true + become: true gather_facts: false tags: - services diff --git a/ansible/playbooks/de-rm-containers.yml b/ansible/playbooks/de-rm-containers.yml index 6faf1ae11..3b3023038 100644 --- a/ansible/playbooks/de-rm-containers.yml +++ b/ansible/playbooks/de-rm-containers.yml @@ -1,7 +1,7 @@ --- # - name: Remove iplant-data # hosts: docker-ready:&systems -# sudo: true +# become: true # gather_facts: false # tags: # - services @@ -14,7 +14,7 @@ - name: Remove anon-files hosts: anon-files:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -31,12 +31,12 @@ post_tasks: - name: annihilate anon-files ignore_errors: yes - sudo: true + become: true shell: docker rm -v {{anon_files.service_name_short}} - name: Remove apps hosts: apps:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -57,12 +57,12 @@ post_tasks: - name: annihilate apps ignore_errors: yes - sudo: true + become: true shell: docker rm -v {{apps.service_name_short}} - name: Remove clockwork hosts: clockwork:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -79,12 +79,12 @@ post_tasks: - name: annihilate clockwork ignore_errors: yes - sudo: true + become: true shell: docker rm -v {{clockwork.service_name_short}} - name: Remove clm hosts: condor-log-monitor:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -101,16 +101,16 @@ post_tasks: - name: annihilate clm ignore_errors: yes - sudo: true + become: true shell: docker rm -v {{condor_log_monitor.service_name_short}} - name: annihilate clm part 2 ignore_errors: yes - sudo: true + become: true shell: docker rm -v clm - name: Remove data-info hosts: data-info:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -127,12 +127,12 @@ post_tasks: - name: annihilate data-info ignore_errors: yes - sudo: true + become: true shell: docker rm -v {{data_info.service_name_short}} - name: Remove dewey hosts: dewey:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -149,12 +149,12 @@ post_tasks: - name: annihilate dewey ignore_errors: yes - sudo: true + become: true shell: docker rm -v {{dewey.service_name_short}} - name: Remove de-ui hosts: ui:&systems - sudo: true + become: true gather_facts: false tags: - ui @@ -174,12 +174,12 @@ post_tasks: - name: annihilate de-ui ignore_errors: yes - sudo: true + become: true shell: docker rm -v {{de.service_name_short}} - name: Remove de-ui-nginx hosts: ui:&systems - sudo: true + become: true gather_facts: false tags: - ui @@ -200,12 +200,12 @@ post_tasks: - name: annihilate de-ui-nginx ignore_errors: yes - sudo: true + become: true shell: docker rm -v {{de.http_server.service_name_short}} - name: Remove exim-sender hosts: exim-sender:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -218,16 +218,16 @@ post_tasks: - name: annihilate exim-sender ignore_errors: yes - sudo: true + become: true shell: docker rm -v {{exim.service_name_short}} - name: annihilate exim-sender part 2 ignore_errors: yes - sudo: true + become: true shell: docker rm -v exim - name: Remove info-typer hosts: info-typer:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -244,12 +244,12 @@ post_tasks: - name: annihilate info-typer ignore_errors: yes - sudo: true + become: true shell: docker rm -v {{info_typer.service_name_short}} - name: Remove infosquito hosts: infosquito:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -266,12 +266,12 @@ post_tasks: - name: annihilate infosquito ignore_errors: yes - sudo: true + become: true shell: docker rm -v {{infosquito.service_name_short}} - name: Remove iplant-email hosts: iplant-email:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -288,12 +288,12 @@ post_tasks: - name: annihilate iplant-email ignore_errors: yes - sudo: true + become: true shell: docker rm -v {{iplant_email.service_name_short}} - name: Remove iplant-groups hosts: iplant-groups:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -310,12 +310,12 @@ post_tasks: - name: annihilate iplant-groups ignore_errors: yes - sudo: true + become: true shell: docker rm -v {{iplant_groups.service_name_short}} - name: Remove jex-events hosts: jexevents:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -332,16 +332,16 @@ post_tasks: - name: annihilate jex-events ignore_errors: yes - sudo: true + become: true shell: docker rm -v {{jexevents.service_name_short}} - name: annihilate jex-events part 2 ignore_errors: yes - sudo: true + become: true shell: docker rm -v jex-events - name: Remove kifshare hosts: kifshare:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -358,12 +358,12 @@ post_tasks: - name: annihilate kifshare ignore_errors: yes - sudo: true + become: true shell: docker rm -v {{kifshare.service_name_short}} - name: Remove metadata hosts: metadata:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -380,12 +380,12 @@ post_tasks: - name: annihilate metadata ignore_errors: yes - sudo: true + become: true shell: docker rm -v {{metadata.service_name_short}} - name: Remove monkey hosts: monkey:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -402,12 +402,12 @@ post_tasks: - name: annihilate monkey ignore_errors: yes - sudo: true + become: true shell: docker rm -v {{monkey.service_name_short}} - name: Remove notification-agent hosts: notificationagent:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -424,12 +424,12 @@ post_tasks: - name: annihilate notificationagent ignore_errors: yes - sudo: true + become: true shell: docker rm -v {{notificationagent.service_name_short}} - name: Remove saved-searches hosts: saved-searches:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -446,12 +446,12 @@ post_tasks: - name: annihilate saved-searches ignore_errors: yes - sudo: true + become: true shell: docker rm -v {{saved_searches.service_name_short}} - name: Remove terrain hosts: terrain:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -472,12 +472,12 @@ post_tasks: - name: annihilate terrain ignore_errors: yes - sudo: true + become: true shell: docker rm -v {{terrain.service_name_short}} - name: Remove tree-urls hosts: tree-urls:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -494,12 +494,12 @@ post_tasks: - name: annihilate tree-urls ignore_errors: yes - sudo: true + become: true shell: docker rm -v {{tree_urls.service_name_short}} - name: Remove user-preferences hosts: user-preferences:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -516,12 +516,12 @@ post_tasks: - name: annihilate user-preferences ignore_errors: yes - sudo: true + become: true shell: docker rm -v {{user_preferences.service_name_short}} - name: Remove user-sessions hosts: user-sessions:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -538,5 +538,5 @@ post_tasks: - name: annihilate user-sessions ignore_errors: yes - sudo: true + become: true shell: docker rm -v {{user_sessions.service_name_short}} diff --git a/ansible/playbooks/de-services-common.yml b/ansible/playbooks/de-services-common.yml index a8e3d5ece..a9c9445c4 100644 --- a/ansible/playbooks/de-services-common.yml +++ b/ansible/playbooks/de-services-common.yml @@ -1,7 +1,7 @@ --- - name: Perform common systems configuration hosts: systems - sudo: yes + become: yes gather_facts: false tags: - config diff --git a/ansible/playbooks/de-start-containers.yml b/ansible/playbooks/de-start-containers.yml index 295de6a0e..f97a1e856 100644 --- a/ansible/playbooks/de-start-containers.yml +++ b/ansible/playbooks/de-start-containers.yml @@ -1,7 +1,7 @@ --- - name: Start anon-files hosts: anon-files:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -13,7 +13,7 @@ - name: Start apps hosts: apps:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -25,7 +25,7 @@ - name: Start clockwork hosts: clockwork:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -37,7 +37,7 @@ - name: Start clm hosts: condor-log-monitor:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -49,7 +49,7 @@ - name: Start data-info hosts: data-info:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -61,7 +61,7 @@ - name: Start dewey hosts: dewey:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -74,7 +74,7 @@ - name: Start de-ui hosts: ui:&systems - sudo: true + become: true gather_facts: false tags: - ui @@ -85,7 +85,7 @@ - name: Start de-ui-nginx hosts: ui:&systems - sudo: true + become: true gather_facts: false tags: - ui @@ -97,7 +97,7 @@ - name: Start exim-sender hosts: exim-sender:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -109,7 +109,7 @@ - name: Start info-typer hosts: info-typer:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -122,7 +122,7 @@ - name: Start infosquito hosts: infosquito:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -135,7 +135,7 @@ - name: Start iplant-email hosts: iplant-email:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -147,7 +147,7 @@ - name: Start iplant-groups hosts: iplant-groups:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -159,7 +159,7 @@ - name: Start jex-events hosts: jexevents:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -171,7 +171,7 @@ - name: Start kifshare hosts: kifshare:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -183,7 +183,7 @@ - name: Start metadata hosts: metadata:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -195,7 +195,7 @@ - name: Start monkey hosts: monkey:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -208,7 +208,7 @@ - name: Start notification-agent hosts: notificationagent:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -220,7 +220,7 @@ - name: Start saved-searches hosts: saved-searches:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -232,7 +232,7 @@ - name: Start terrain hosts: terrain:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -244,7 +244,7 @@ - name: Start tree-urls hosts: tree-urls:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -256,7 +256,7 @@ - name: Start user-preferences hosts: user-preferences:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -268,7 +268,7 @@ - name: Start user-sessions hosts: user-sessions:&systems - sudo: true + become: true gather_facts: false tags: - services diff --git a/ansible/playbooks/de-stop-containers.yml b/ansible/playbooks/de-stop-containers.yml index cb32ecc18..799680adf 100644 --- a/ansible/playbooks/de-stop-containers.yml +++ b/ansible/playbooks/de-stop-containers.yml @@ -1,7 +1,7 @@ --- - name: Stop anon-files hosts: anon-files:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -14,7 +14,7 @@ - name: Stop apps hosts: apps:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -27,7 +27,7 @@ - name: Stop clockwork hosts: clockwork:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -40,7 +40,7 @@ - name: Stop clm hosts: condor-log-monitor:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -53,7 +53,7 @@ - name: Stop data-info hosts: data-info:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -66,7 +66,7 @@ - name: Stop dewey hosts: dewey:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -79,7 +79,7 @@ - name: Stop de-ui hosts: ui:&systems - sudo: true + become: true gather_facts: false tags: - ui @@ -91,7 +91,7 @@ - name: Stop de-ui-nginx hosts: ui:&systems - sudo: true + become: true gather_facts: false tags: - ui @@ -104,7 +104,7 @@ - name: Stop exim-sender hosts: exim-sender:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -117,7 +117,7 @@ - name: Stop info-typer hosts: info-typer:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -130,7 +130,7 @@ - name: Stop infosquito hosts: infosquito:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -143,7 +143,7 @@ - name: Stop iplant-email hosts: iplant-email:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -156,7 +156,7 @@ - name: Stop iplant-groups hosts: iplant-groups:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -169,7 +169,7 @@ - name: Stop jex-events hosts: jexevents:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -182,7 +182,7 @@ - name: Stop kifshare hosts: kifshare:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -195,7 +195,7 @@ - name: Stop metadata hosts: metadata:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -208,7 +208,7 @@ - name: Stop monkey hosts: monkey:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -221,7 +221,7 @@ - name: Stop notification-agent hosts: notificationagent:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -234,7 +234,7 @@ - name: Stop saved-searches hosts: saved-searches:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -247,7 +247,7 @@ - name: Stop terrain hosts: terrain:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -260,7 +260,7 @@ - name: Stop tree-urls hosts: tree-urls:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -273,7 +273,7 @@ - name: Stop user-preferences hosts: user-preferences:&systems - sudo: true + become: true gather_facts: false tags: - services @@ -286,7 +286,7 @@ - name: Stop user-sessions hosts: user-sessions:&systems - sudo: true + become: true gather_facts: false tags: - services From 9450e94f95b5923045a4f7f5833741f915cbe80c Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Wed, 2 Mar 2016 10:23:38 -0700 Subject: [PATCH 077/183] CORE-7445 fix for large file upload failure with nginx throwing 413. --- .../views/dialogs/SimpleFileUploadDialog.java | 24 ++++++++++++------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/diskResource/client/views/dialogs/SimpleFileUploadDialog.java b/ui/de-lib/src/main/java/org/iplantc/de/diskResource/client/views/dialogs/SimpleFileUploadDialog.java index 85a4ac9bf..227718ad6 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/diskResource/client/views/dialogs/SimpleFileUploadDialog.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/diskResource/client/views/dialogs/SimpleFileUploadDialog.java @@ -10,10 +10,8 @@ import org.iplantc.de.commons.client.info.IplantAnnouncer; import org.iplantc.de.commons.client.validators.DiskResourceNameValidator; import org.iplantc.de.commons.client.views.dialogs.IPlantDialog; -import org.iplantc.de.commons.client.views.dialogs.IplantErrorDialog; import org.iplantc.de.commons.client.widgets.IPCFileUploadField; import org.iplantc.de.diskResource.client.events.FileUploadedEvent; -import org.iplantc.de.resources.client.messages.I18N; import com.google.common.base.Strings; import com.google.common.collect.Lists; @@ -251,22 +249,30 @@ void onSubmitComplete(SubmitCompleteEvent event) { statList.get(formList.indexOf(event.getSource())).clearStatus(""); } - String results2 = event.getResults(); - String results = Format.stripTags(results2); - Splittable split = StringQuoter.split(results); IPCFileUploadField field = fufList.get(formList.indexOf(event.getSource())); - if (split == null) { + String results2 = event.getResults(); + + if (Strings.isNullOrEmpty(results2)) { IplantAnnouncer.getInstance() .schedule(new ErrorAnnouncementConfig(appearance.fileUploadsFailed(Lists.newArrayList( field.getValue())))); } else { - if (split.isUndefined("file") || (split.get("file") == null)) { - field.markInvalid(appearance.fileUploadsFailed(Lists.newArrayList(field.getValue()))); + String results = Format.stripTags(results2); + Splittable split = StringQuoter.split(results); + + if (split == null) { IplantAnnouncer.getInstance() .schedule(new ErrorAnnouncementConfig(appearance.fileUploadsFailed(Lists.newArrayList( field.getValue())))); } else { - eventBus.fireEvent(new FileUploadedEvent(uploadDest, field.getValue(), results)); + if (split.isUndefined("file") || (split.get("file") == null)) { + field.markInvalid(appearance.fileUploadsFailed(Lists.newArrayList(field.getValue()))); + IplantAnnouncer.getInstance() + .schedule(new ErrorAnnouncementConfig(appearance.fileUploadsFailed( + Lists.newArrayList(field.getValue())))); + } else { + eventBus.fireEvent(new FileUploadedEvent(uploadDest, field.getValue(), results)); + } } } From 6efe5a3d1aed083e3ce343a3cae78e98ea9b6753 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Wed, 2 Mar 2016 14:58:34 -0700 Subject: [PATCH 078/183] CORE-7499: Add has_child queries for file and folder (template) metadata --- .../services/impl/DataSearchQueryBuilder.java | 56 +++++++++++++++---- .../impl/DataSearchQueryBuilderTest.java | 10 +++- 2 files changed, 52 insertions(+), 14 deletions(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/DataSearchQueryBuilder.java b/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/DataSearchQueryBuilder.java index b79dba54f..213a11a65 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/DataSearchQueryBuilder.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/DataSearchQueryBuilder.java @@ -43,6 +43,7 @@ public class DataSearchQueryBuilder { public static final String PATH = "path"; public static final String METADATA2 = "metadata"; public static final String NESTED2 = "nested"; + public static final String HAS_CHILD = "has_child"; public static final String FILE_SIZE = "fileSize"; public static final String LABEL = "label"; public static final String DATE_CREATED = "dateCreated"; @@ -188,19 +189,44 @@ public String getQuery() { return toString(); } + private Splittable metadataNested(String content, String field) { + // {"nested":{"path":"metadata","query":{"query_string":{"query":"*ipc* OR *attrib*","fields":["metadata.attribute"]}}}} + Splittable metadata = StringQuoter.createSplittable(); + + Splittable nested = addChild(metadata, NESTED2); + StringQuoter.create(METADATA2).assign(nested, PATH); + + getSimpleQuery(field, content).assign(nested, QUERY2); + return metadata; + } + + private Splittable childQuery(String type, Splittable innerQuery) { + // {"has_child": {"type": "...", "score_mode": "max", "query": {...}} + Splittable query = StringQuoter.createSplittable(); + Splittable hasChild = addChild(query, HAS_CHILD); + StringQuoter.create(type).assign(hasChild, "type"); + StringQuoter.create("max").assign(hasChild, "score_mode"); + innerQuery.assign(hasChild, QUERY2); + return query; + } + public DataSearchQueryBuilder metadataAttribute() { String content = dsf.getMetadataAttributeQuery(); if (!Strings.isNullOrEmpty(content)) { + Splittable attr = StringQuoter.createSplittable(); + Splittable attrBool = addChild(attr, BOOL); + Splittable attrShouldList = addArray(attrBool, "should"); - // {"nested":{"path":"metadata","query":{"query_string":{"query":"*ipc* OR *attrib*","fields":["metadata.attribute"]}}}} - Splittable metadata = StringQuoter.createSplittable(); + Splittable metadata = metadataNested(content, METADATA_ATTRIBUTE); + appendArrayItem(attrShouldList, metadata); - Splittable nested = addChild(metadata, NESTED2); - StringQuoter.create(METADATA2).assign(nested, PATH); + Splittable childFileQuery = childQuery("file_metadata", metadata.deepCopy()); + appendArrayItem(attrShouldList, childFileQuery); - getSimpleQuery(METADATA_ATTRIBUTE, content).assign(nested, QUERY2); + Splittable childFolderQuery = childQuery("folder_metadata", metadata.deepCopy()); + appendArrayItem(attrShouldList, childFolderQuery); - appendArrayItem(mustList, metadata); + appendArrayItem(mustList, attr); } return this; } @@ -208,14 +234,20 @@ public DataSearchQueryBuilder metadataAttribute() { public DataSearchQueryBuilder metadataValue() { String content = dsf.getMetadataValueQuery(); if (!Strings.isNullOrEmpty(content)) { - // {"nested":{"path":"metadata","query":{"query_string":{"query":"*ipc* OR *attrib*","fields":["metadata.value"]}}}} - Splittable metadata = StringQuoter.createSplittable(); + Splittable value = StringQuoter.createSplittable(); + Splittable valueBool = addChild(value, BOOL); + Splittable valueShouldList = addArray(valueBool, "should"); + + Splittable metadata = metadataNested(content, METADATA_VALUE); + appendArrayItem(valueShouldList, metadata); + + Splittable childFileQuery = childQuery("file_metadata", metadata.deepCopy()); + appendArrayItem(valueShouldList, childFileQuery); - Splittable nested = addChild(metadata, NESTED2); - StringQuoter.create(METADATA2).assign(nested, PATH); - getSimpleQuery(METADATA_VALUE, content).assign(nested, QUERY2); + Splittable childFolderQuery = childQuery("folder_metadata", metadata.deepCopy()); + appendArrayItem(valueShouldList, childFolderQuery); - appendArrayItem(mustList, metadata); + appendArrayItem(mustList, value); } return this; } diff --git a/ui/de-lib/src/test/java/org/iplantc/de/client/services/impl/DataSearchQueryBuilderTest.java b/ui/de-lib/src/test/java/org/iplantc/de/client/services/impl/DataSearchQueryBuilderTest.java index 8929c8310..ab3ce2a98 100644 --- a/ui/de-lib/src/test/java/org/iplantc/de/client/services/impl/DataSearchQueryBuilderTest.java +++ b/ui/de-lib/src/test/java/org/iplantc/de/client/services/impl/DataSearchQueryBuilderTest.java @@ -355,9 +355,12 @@ private String setFileSizeRange(final Double min, final Double max) { private String setMetadataAttributeQuery(final String givenQuery) { when(dsf.getMetadataAttributeQuery()).thenReturn(givenQuery); DataSearchQueryBuilder uut = new DataSearchQueryBuilder(dsf, userInfoMock); - return "{\"nested\":{\"query\":" + String nestedQuery = "{\"nested\":{\"query\":" + uut.getSimpleQuery(DataSearchQueryBuilder.METADATA_ATTRIBUTE, givenQuery).getPayload() + ",\"path\":\"metadata\"}}"; + String fileQuery = "{\"has_child\":{\"query\":"+nestedQuery+",\"score_mode\":\"max\",\"type\":\"file_metadata\"}}"; + String folderQuery = "{\"has_child\":{\"query\":"+nestedQuery+",\"score_mode\":\"max\",\"type\":\"folder_metadata\"}}"; + return "{\"bool\":{\"should\":[" + nestedQuery + "," + fileQuery + "," + folderQuery + "]}}"; } /** @@ -366,9 +369,12 @@ private String setMetadataAttributeQuery(final String givenQuery) { private String setMetadataValueQuery(final String givenQuery) { when(dsf.getMetadataValueQuery()).thenReturn(givenQuery); DataSearchQueryBuilder uut = new DataSearchQueryBuilder(dsf, userInfoMock); - return "{\"nested\":{\"query\":" + String nestedQuery = "{\"nested\":{\"query\":" + uut.getSimpleQuery(DataSearchQueryBuilder.METADATA_VALUE, givenQuery).getPayload() + ",\"path\":\"metadata\"}}"; + String fileQuery = "{\"has_child\":{\"query\":"+nestedQuery+",\"score_mode\":\"max\",\"type\":\"file_metadata\"}}"; + String folderQuery = "{\"has_child\":{\"query\":"+nestedQuery+",\"score_mode\":\"max\",\"type\":\"folder_metadata\"}}"; + return "{\"bool\":{\"should\":[" + nestedQuery + "," + fileQuery + "," + folderQuery + "]}}"; } /** From 60ed2a4f57a6ebd1d0e74f6c03cb559d5decac07 Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Wed, 2 Mar 2016 15:11:15 -0700 Subject: [PATCH 079/183] CORE-7445 fix for large file upload failure with nginx throwing 413. Second attempt. --- .../views/dialogs/SimpleFileUploadDialog.java | 21 +++++++++++++--- .../file/SecuredFileUploadController.java | 25 +++++++++++-------- 2 files changed, 32 insertions(+), 14 deletions(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/diskResource/client/views/dialogs/SimpleFileUploadDialog.java b/ui/de-lib/src/main/java/org/iplantc/de/diskResource/client/views/dialogs/SimpleFileUploadDialog.java index 227718ad6..91ed3da3a 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/diskResource/client/views/dialogs/SimpleFileUploadDialog.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/diskResource/client/views/dialogs/SimpleFileUploadDialog.java @@ -251,7 +251,7 @@ void onSubmitComplete(SubmitCompleteEvent event) { IPCFileUploadField field = fufList.get(formList.indexOf(event.getSource())); String results2 = event.getResults(); - + GWT.log("upload result->" + results2); if (Strings.isNullOrEmpty(results2)) { IplantAnnouncer.getInstance() .schedule(new ErrorAnnouncementConfig(appearance.fileUploadsFailed(Lists.newArrayList( @@ -350,7 +350,7 @@ public void markDuplicates(Collection duplicates) { destResourceMap.get(id).markInvalid(appearance.fileExist()); } } else { - for (IPCFileUploadField field : destResourceMap.values()) { + for (final IPCFileUploadField field : destResourceMap.values()) { int index = fufList.indexOf(field); statList.get(index).setBusy(""); FormPanel form = formList.get(index); @@ -358,10 +358,23 @@ public void markDuplicates(Collection duplicates) { @Override public void onSubmit(SubmitEvent event) { + if (event.isCanceled()) { + IplantAnnouncer.getInstance() + .schedule(new ErrorAnnouncementConfig(appearance.fileUploadsFailed( + Lists.newArrayList(field.getValue())))); + } + getOkButton().disable(); - } + } }); - form.submit(); + try { + form.submit(); + } catch(Exception e ) { + GWT.log("expcetion on submit" + e.getMessage()); + IplantAnnouncer.getInstance() + .schedule(new ErrorAnnouncementConfig(appearance.fileUploadsFailed( + Lists.newArrayList(field.getValue())))); + } submittedForms.add(form); } } diff --git a/ui/de-webapp/src/main/java/org/iplantc/de/server/controllers/file/SecuredFileUploadController.java b/ui/de-webapp/src/main/java/org/iplantc/de/server/controllers/file/SecuredFileUploadController.java index 2be97bd6a..31a5a8a38 100644 --- a/ui/de-webapp/src/main/java/org/iplantc/de/server/controllers/file/SecuredFileUploadController.java +++ b/ui/de-webapp/src/main/java/org/iplantc/de/server/controllers/file/SecuredFileUploadController.java @@ -1,5 +1,14 @@ package org.iplantc.de.server.controllers.file; +import static org.iplantc.de.server.AppLoggerConstants.API_METRICS_LOGGER; +import static org.iplantc.de.server.AppLoggerConstants.REQUEST_KEY; +import static org.iplantc.de.server.AppLoggerConstants.RESPONSE_KEY; + +import org.iplantc.de.server.AppLoggerUtil; +import org.iplantc.de.server.auth.DESecurityConstants; +import org.iplantc.de.server.auth.JwtBuilder; +import org.iplantc.de.shared.services.BaseServiceCallWrapper; + import org.apache.commons.io.IOUtils; import org.apache.http.HttpEntity; import org.apache.http.client.methods.CloseableHttpResponse; @@ -9,10 +18,6 @@ import org.apache.http.entity.mime.MultipartEntityBuilder; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; -import org.iplantc.de.server.AppLoggerUtil; -import org.iplantc.de.server.auth.DESecurityConstants; -import org.iplantc.de.server.auth.JwtBuilder; -import org.iplantc.de.shared.services.BaseServiceCallWrapper; import org.jose4j.lang.JoseException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -27,16 +32,12 @@ import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.multipart.MultipartFile; -import javax.servlet.ServletException; -import javax.servlet.http.HttpServletRequest; import java.io.IOException; import java.io.InputStream; import java.net.URI; import java.net.URISyntaxException; - -import static org.iplantc.de.server.AppLoggerConstants.API_METRICS_LOGGER; -import static org.iplantc.de.server.AppLoggerConstants.REQUEST_KEY; -import static org.iplantc.de.server.AppLoggerConstants.RESPONSE_KEY; +import javax.servlet.ServletException; +import javax.servlet.http.HttpServletRequest; /** * Performs secured file uploads. @@ -47,6 +48,7 @@ @Controller public class SecuredFileUploadController { + public static final int ENTITY_TOO_LARGE = 413; private final Logger API_REQUEST_LOG = LoggerFactory.getLogger(API_METRICS_LOGGER); private final AppLoggerUtil loggerUtil = AppLoggerUtil.getInstance(); @@ -82,6 +84,9 @@ public ResponseEntity doSecureFileUpload(@RequestParam("dest") final Str try { final long requestStartTime = System.currentTimeMillis(); final CloseableHttpResponse incomingResponse = loggerUtil.copyRequestIdHeader(post, client.execute(post)); + if(incomingResponse.getStatusLine().getStatusCode() == ENTITY_TOO_LARGE) { + throw new Exception("File too large to upload!"); + } final long responseRecvTime = System.currentTimeMillis(); final String responseJson = loggerUtil.createMdcResponseMapJson(incomingResponse, BaseServiceCallWrapper.Type.GET, From b259850a8651922eba181594052d4858d324a795 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Thu, 3 Mar 2016 10:39:23 -0700 Subject: [PATCH 080/183] CORE-7542: modified sharkbait to add analysis permissions to Grouper --- tools/sharkbait/src/sharkbait/analyses.clj | 38 ++++++++++++++++++++++ tools/sharkbait/src/sharkbait/core.clj | 10 +++++- tools/sharkbait/src/sharkbait/db.clj | 13 ++++++++ 3 files changed, 60 insertions(+), 1 deletion(-) create mode 100644 tools/sharkbait/src/sharkbait/analyses.clj diff --git a/tools/sharkbait/src/sharkbait/analyses.clj b/tools/sharkbait/src/sharkbait/analyses.clj new file mode 100644 index 000000000..5111daa2c --- /dev/null +++ b/tools/sharkbait/src/sharkbait/analyses.clj @@ -0,0 +1,38 @@ +(ns sharkbait.analyses + (:require [clojure.string :as string] + [sharkbait.consts :as consts] + [sharkbait.db :as db] + [sharkbait.members :as members] + [sharkbait.permissions :as perms] + [sharkbait.roles :as roles])) + +(defn- find-analysis-owner-member + "Searches for a membership for the user who submitted an analysis." + [session subjects {:keys [username]}] + (when-let [subject (subjects (string/replace username #"@iplantcollaborative.org$" ""))] + (members/find-subject-member session subject true))) + +(defn- grant-owner-permission + "Grants ownership permission to an analysis." + [session subjects de-users-role app-resource analysis] + (when-let [member (find-analysis-owner-member session subjects analysis)] + (perms/grant-role-membership-permission de-users-role member perms/own app-resource))) + +(defn- register-analysis + [session subjects de-users-role permission-def folder-name analysis] + (let [analysis-resource (perms/create-permission-resource session permission-def folder-name (:id analysis))] + (grant-owner-permission session subjects de-users-role analysis-resource analysis))) + +(defn register-de-analyses + [db-spec session subjects folder-names permission-def-name] + (let [analyses-folder-name (:de-analyses folder-names) + permission-def (perms/find-permission-def analyses-folder-name permission-def-name) + pre-existing-analyses (perms/load-permission-resource-name-set permission-def) + de-users-role-name (format "%s:%s" (:de-users folder-names) consts/de-users-role-name) + de-users-role (roles/find-role session de-users-role-name)] + (->> (db/list-de-analyses db-spec) + (remove (comp (partial contains? pre-existing-analyses) + (partial str analyses-folder-name ":") + :id)) + (map (partial register-analysis session subjects de-users-role permission-def analyses-folder-name)) + (dorun)))) diff --git a/tools/sharkbait/src/sharkbait/core.clj b/tools/sharkbait/src/sharkbait/core.clj index a19bb4a6a..918c56594 100644 --- a/tools/sharkbait/src/sharkbait/core.clj +++ b/tools/sharkbait/src/sharkbait/core.clj @@ -3,6 +3,7 @@ (:require [clojure.java.jdbc :as jdbc] [clojure.string :as string] [common-cli.core :as cli] + [sharkbait.analyses :as analyses] [sharkbait.apps :as apps] [sharkbait.consts :as consts] [sharkbait.db :as db] @@ -63,12 +64,19 @@ (let [subjects (into {} (map (juxt #(.getId %) identity) subjects))] (apps/register-de-apps db-spec session subjects folder-names consts/app-permission-def-name))) +(defn- register-de-analyses + [db-spec folder-names session subjects] + (println "Registering DE analyses...") + (let [subjects (into {} (map (juxt #(.getId %) identity) subjects))] + (analyses/register-de-analyses db-spec session subjects folder-names consts/analysis-permission-def-name))) + (defn- register-de-entities "Registers DE entities in Grouper." [db-spec folder-names session subjects] (time (register-de-users session folder-names subjects)) (time (create-permission-defs session folder-names)) - (time (register-de-apps db-spec folder-names session subjects))) + (time (register-de-apps db-spec folder-names session subjects)) + (time (register-de-analyses db-spec folder-names session subjects))) (defn- perform-de-user-actions "Performs the actions that do not require superuser privileges." diff --git a/tools/sharkbait/src/sharkbait/db.clj b/tools/sharkbait/src/sharkbait/db.clj index 1716d1984..1e71734a0 100644 --- a/tools/sharkbait/src/sharkbait/db.clj +++ b/tools/sharkbait/src/sharkbait/db.clj @@ -50,6 +50,19 @@ [db-spec] (jdbc/query db-spec (list-de-apps-query))) +(defn- list-de-analyses-query + "Formats an SQL query to list all of the analyses in the DE database." + [] + (-> (select :j.id :u.username) + (from [:jobs :j]) + (join [:users :u] [:= :j.user_id :u.id]) + sql/format)) + +(defn list-de-analyses + "Lists all of the analyses in the DE." + [db-spec] + (jdbc/query db-spec (list-de-analyses-query))) + (defn- list-de-users-query "Formats an SQL query to list all users in the DE database." [] From c123a9f954e728b80fe847b5f6fccc5eb500e702 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Thu, 3 Mar 2016 12:11:14 -0700 Subject: [PATCH 081/183] Add lb/qos calls to dewey/info-typer; standardize logging some. --- services/dewey/src/dewey/amq.clj | 1 + services/dewey/src/dewey/curation.clj | 2 +- services/info-typer/src/info_typer/amqp.clj | 2 ++ 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/services/dewey/src/dewey/amq.clj b/services/dewey/src/dewey/amq.clj index 82df686bb..83918b70a 100644 --- a/services/dewey/src/dewey/amq.clj +++ b/services/dewey/src/dewey/amq.clj @@ -39,6 +39,7 @@ exchange-auto-delete topics delivery-fn)))] + (lb/qos channel 100) (le/topic channel exchange-name :durable exchange-durable :auto-delete exchange-auto-delete) (lq/declare channel queue :durable true) (doseq [topic topics] (lq/bind channel queue exchange-name :routing-key topic)) diff --git a/services/dewey/src/dewey/curation.clj b/services/dewey/src/dewey/curation.clj index ff139570e..803e0b553 100644 --- a/services/dewey/src/dewey/curation.clj +++ b/services/dewey/src/dewey/curation.clj @@ -309,7 +309,7 @@ Throws: It throws any exception perculating up from below." [irods-cfg es routing-key msg] - (log/debug "received message: routing key =" routing-key ", message =" msg) + (log/info (format "[curation/consume-msg] [%s] [%s]" routing-key (String. msg "UTF-8"))) (if-let [consume (resolve-consumer routing-key)] (try+ (irods/with-jargon irods-cfg [irods] diff --git a/services/info-typer/src/info_typer/amqp.clj b/services/info-typer/src/info_typer/amqp.clj index 6a7146e42..da6dc63fb 100644 --- a/services/info-typer/src/info_typer/amqp.clj +++ b/services/info-typer/src/info_typer/amqp.clj @@ -1,6 +1,7 @@ (ns info-typer.amqp (:require [clojure.tools.logging :as log] [langohr.core :as rmq] + [langohr.basic :as lb] [langohr.channel :as lch] [langohr.exchange :as le] [langohr.queue :as lq] @@ -85,6 +86,7 @@ (log/info "configuring AMQP connection") (let [chan (lch/open (get-connection (connection-map))) q (declare-queue chan (str "info-typer." (cfg/environment-name)))] + (lb/qos chan 100) (declare-exchange chan (cfg/amqp-exchange) (cfg/amqp-exchange-type) :durable (cfg/amqp-exchange-durable?) :auto-delete (cfg/amqp-exchange-auto-delete?)) (doseq [topic topics] (bind chan q (cfg/amqp-exchange) topic)) From 96316729f58bb047bce29c29931bbf68f43d3ebf Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Thu, 11 Feb 2016 10:44:55 -0700 Subject: [PATCH 082/183] CORE-7496, CORE-7497: create a service for indexing template metadata into elasticsearch Much of the basic structure of this is copied from jobservices (in jex-go branch) or from jex-events: * configurate, logcabin packages * flag parsing, version handling * mode-setting logic * basic docker interaction At present, this service only operates in one of the three documented modes, --mode full. This mode indexes the metadata database's 'avus' table fully once and does not listen on AMQP. Vendored dependencies are in a separate commit for readability. --- services/templeton/.gitignore | 4 + services/templeton/Dockerfile | 15 ++ services/templeton/README.md | 20 +++ services/templeton/docker-build.sh | 31 ++++ .../templeton/src/configurate/configurate.go | 27 +++ .../src/configurate/configurate_test.go | 87 +++++++++ services/templeton/src/logcabin/logcabin.go | 76 ++++++++ .../templeton/src/logcabin/logcabin_test.go | 69 +++++++ .../src/templeton/database/database.go | 169 ++++++++++++++++++ .../templeton/elasticsearch/elasticsearch.go | 86 +++++++++ services/templeton/src/templeton/main.go | 135 ++++++++++++++ .../templeton/src/templeton/model/model.go | 50 ++++++ services/templeton/src/test/test_config.yaml | 9 + services/templeton/version | 1 + 14 files changed, 779 insertions(+) create mode 100644 services/templeton/.gitignore create mode 100644 services/templeton/Dockerfile create mode 100644 services/templeton/README.md create mode 100755 services/templeton/docker-build.sh create mode 100644 services/templeton/src/configurate/configurate.go create mode 100644 services/templeton/src/configurate/configurate_test.go create mode 100644 services/templeton/src/logcabin/logcabin.go create mode 100644 services/templeton/src/logcabin/logcabin_test.go create mode 100644 services/templeton/src/templeton/database/database.go create mode 100644 services/templeton/src/templeton/elasticsearch/elasticsearch.go create mode 100644 services/templeton/src/templeton/main.go create mode 100644 services/templeton/src/templeton/model/model.go create mode 100644 services/templeton/src/test/test_config.yaml create mode 100644 services/templeton/version diff --git a/services/templeton/.gitignore b/services/templeton/.gitignore new file mode 100644 index 000000000..d460b3197 --- /dev/null +++ b/services/templeton/.gitignore @@ -0,0 +1,4 @@ +bin/ +pkg/ +.hg/ +.git/ diff --git a/services/templeton/Dockerfile b/services/templeton/Dockerfile new file mode 100644 index 000000000..0c9b0ee93 --- /dev/null +++ b/services/templeton/Dockerfile @@ -0,0 +1,15 @@ +FROM jeanblanchard/alpine-glibc + +ADD bin/templeton /bin/ + +ARG git_commit=unknown +ARG buildenv_git_commit=unknown +ARG version=unknown +LABEL org.iplantc.de.templeton.git-ref="$git_commit" \ + org.iplantc.de.templeton.version="$version" \ + org.iplantc.de.buildenv.git-ref="$buildenv_git_commit" + +EXPOSE 60000 +ENTRYPOINT ["templeton"] +CMD ["--help"] + diff --git a/services/templeton/README.md b/services/templeton/README.md new file mode 100644 index 000000000..eac03c487 --- /dev/null +++ b/services/templeton/README.md @@ -0,0 +1,20 @@ +templeton +========= + +Templeton is a service which indexes template metadata, as stored in the +`metadata` database and interacted with ordinarily through the `metadata` +service. + +Templeton operates in three modes, controlled by the `--mode` command-line flag: + +| Mode | Operation | +| ------------- | --------- | +| `periodic` | listens on a configured AMQP queue and recieves only `index.templates` and `index.all` messages to trigger complete reindexes of the metadata template information (similar to `Infosquito` and `monkey`) | +| `incremental` | listens on a configured AMQP queue and recieves messages describing incremental changes to be indexed (similar to `dewey`) | +| `full` | completely reindexes the metadata template information once and then exits | + +It expects a configuration file in YAML format passed by the `--config` +command-line flag. In all modes it requires configuration for elasticsearch and +postgresql; in periodic and incremental modes it requires AMQP configuration. + +An example configuration can be found at `src/test/test_config.yaml`. diff --git a/services/templeton/docker-build.sh b/services/templeton/docker-build.sh new file mode 100755 index 000000000..f71384aaf --- /dev/null +++ b/services/templeton/docker-build.sh @@ -0,0 +1,31 @@ +#!/bin/sh +set -x +set -e + +if [ -z "$DOCKER_USER" ]; then + DOCKER_USER=discoenv +fi + +if [ -z "$DOCKER_REPO" ]; then + DOCKER_REPO=templeton +fi + +VERSION=$(cat version | sed -e 's/^ *//' -e 's/ *$//') +GIT_COMMIT=$(git rev-parse HEAD) +BUILD_USER=$(whoami) + +docker pull $DOCKER_USER/buildenv:latest + +BUILDENV_GIT_COMMIT=$(docker inspect -f '{{ (index .Config.Labels "org.iplantc.de.buildenv.git-ref")}}' $DOCKER_USER/buildenv:latest) + +docker run --rm \ + -v $(pwd):/build \ + -w /build \ + $DOCKER_USER/buildenv:latest \ + gb build -f -F --ldflags "-X main.appver=$VERSION -X main.gitref=$GIT_COMMIT -X main.builtby=$BUILD_USER" +docker build --build-arg git_commit=$GIT_COMMIT \ + --build-arg buildenv_git_commit=$BUILDENV_GIT_COMMIT \ + --build-arg version=$VERSION \ + --pull --rm -t "$DOCKER_USER/$DOCKER_REPO:dev" . +docker push $DOCKER_USER/$DOCKER_REPO:dev +docker rmi $DOCKER_USER/$DOCKER_REPO:dev diff --git a/services/templeton/src/configurate/configurate.go b/services/templeton/src/configurate/configurate.go new file mode 100644 index 000000000..35b2cd072 --- /dev/null +++ b/services/templeton/src/configurate/configurate.go @@ -0,0 +1,27 @@ +package configurate + +import ( + "io/ioutil" + "os" + + "github.com/olebedev/config" +) + +var ( + //C is a global *config.Config + C *config.Config +) + +// Init initializes the underlying config. +func Init(path string) error { + f, err := os.Open(path) + if err != nil { + return err + } + contents, err := ioutil.ReadAll(f) + if err != nil { + return err + } + C, err = config.ParseYaml(string(contents)) + return err +} diff --git a/services/templeton/src/configurate/configurate_test.go b/services/templeton/src/configurate/configurate_test.go new file mode 100644 index 000000000..d6bf1e203 --- /dev/null +++ b/services/templeton/src/configurate/configurate_test.go @@ -0,0 +1,87 @@ +package configurate + +import "testing" + +func configurator() error { + path := "../test/test_config.yaml" + return Init(path) +} + +func TestNew(t *testing.T) { + err := configurator() + if err != nil { + t.Error(err) + t.Fail() + } + if C == nil { + t.Errorf("configurate.New() returned nil") + } +} + +func TestAMQPConfig(t *testing.T) { + err := configurator() + if err != nil { + t.Error(err) + t.Fail() + } + actual, err := C.String("amqp.uri") + if err != nil { + t.Error(err) + t.Fail() + } + expected := "amqp://guest:guest@192.168.99.100:5672/" + if actual != expected { + t.Errorf("The amqp.uri was %s instead of %s", actual, expected) + } +} + +func TestDBConfig(t *testing.T) { + err := configurator() + if err != nil { + t.Error(err) + t.Fail() + } + actual, err := C.String("db.uri") + if err != nil { + t.Error(err) + t.Fail() + } + expected := "postgres://de:notprod@192.168.99.100:5432/metadata?sslmode=disable" + if actual != expected { + t.Errorf("The db.uri was %s instead of %s", actual, expected) + } +} + +func TestESBase(t *testing.T) { + err := configurator() + if err != nil { + t.Error(err) + t.Fail() + } + actual, err := C.String("elasticsearch.base") + if err != nil { + t.Error(err) + t.Fail() + } + expected := "http://localhost:9200" + if actual != expected { + t.Errorf("The elasticsearch.base was %s instead of %s", actual, expected) + } +} + +func TestESIndex(t *testing.T) { + err := configurator() + if err != nil { + t.Error(err) + t.Fail() + } + actual, err := C.String("elasticsearch.index") + if err != nil { + t.Error(err) + t.Fail() + } + expected := "data" + if actual != expected { + t.Errorf("The elasticsearch.index was %s instead of %s", actual, expected) + } +} diff --git a/services/templeton/src/logcabin/logcabin.go b/services/templeton/src/logcabin/logcabin.go new file mode 100644 index 000000000..ce0b3eb44 --- /dev/null +++ b/services/templeton/src/logcabin/logcabin.go @@ -0,0 +1,76 @@ +package logcabin + +import ( + "encoding/json" + "log" + "os" + "time" +) + +// LoggerFunc adapts a function so it can be used as an io.Writer. +type LoggerFunc func([]byte) (int, error) + +func (l LoggerFunc) Write(logbuf []byte) (n int, err error) { + return l(logbuf) +} + +// LogMessage represents a message that will be logged in JSON format. +type LogMessage struct { + Service string `json:"service"` + Artifact string `json:"art-id"` + Group string `json:"group-id"` + Level string `json:"level"` + Time int64 `json:"timeMillis"` + Message string `json:"message"` +} + +// NewLogMessage returns a pointer to a new instance of LogMessage. +func NewLogMessage(message string) *LogMessage { + lm := &LogMessage{ + Service: "templeton", + Artifact: "templeton", + Group: "org.iplantc", + Level: "INFO", + Time: time.Now().UnixNano() / int64(time.Millisecond), + Message: message, + } + return lm +} + +// LogWriter writes to stdout with a custom timestamp. +func LogWriter(logbuf []byte) (n int, err error) { + m := NewLogMessage(string(logbuf[:])) + j, err := json.Marshal(m) + if err != nil { + return 0, err + } + j = append(j, []byte("\n")...) + return os.Stdout.Write(j) +} + +// Lincoln is a logger for jex-events. +type Lincoln struct { + *log.Logger +} + +var ( + logger *Lincoln +) + +// New returns a pointer to a newly initialized Lincoln. +func New() *Lincoln { + if logger == nil { + logger = &Lincoln{log.New(LoggerFunc(LogWriter), "", log.Lshortfile)} + } + return logger +} + +func (l *Lincoln) Write(buf []byte) (n int, err error) { + m := NewLogMessage(string(buf[:])) + j, err := json.Marshal(m) + if err != nil { + return 0, err + } + j = append(j, []byte("\n")...) + return os.Stdout.Write(j) +} diff --git a/services/templeton/src/logcabin/logcabin_test.go b/services/templeton/src/logcabin/logcabin_test.go new file mode 100644 index 000000000..8c95c908c --- /dev/null +++ b/services/templeton/src/logcabin/logcabin_test.go @@ -0,0 +1,69 @@ +package logcabin + +import ( + "encoding/json" + "io/ioutil" + "os" + "testing" +) + +func TestNewLogMessage(t *testing.T) { + m := NewLogMessage("foo") + expected := "templeton" + if m.Service != expected { + t.Errorf("LogMessage.Service was %s instead of %s", m.Service, expected) + } + if m.Artifact != expected { + t.Errorf("LogMessage.Artifact was %s instead of %s", m.Artifact, expected) + } + expected = "org.iplantc" + if m.Group != expected { + t.Errorf("LogMessage.Group was %s instead of %s", m.Group, expected) + } + expected = "INFO" + if m.Level != expected { + t.Errorf("LogMessage.Level was %s instead of %s", m.Level, expected) + } + expected = "foo" + if m.Message != expected { + t.Errorf("LogMessage.Message was %s instead of %s", m.Message, expected) + } +} + +func TestLogWriter(t *testing.T) { + original := os.Stdout + r, w, err := os.Pipe() + if err != nil { + t.Error(err) + t.Fail() + } + os.Stdout = w + restore := func() { + os.Stdout = original + } + defer restore() + expected := "this is a test" + _, err = LogWriter([]byte(expected)) + if err != nil { + t.Error(err) + os.Stdout = original + t.Fail() + } + w.Close() + var msg LogMessage + actualBytes, err := ioutil.ReadAll(r) + if err != nil { + t.Error(err) + t.Fail() + } + err = json.Unmarshal(actualBytes, &msg) + if err != nil { + t.Error(err) + t.Fail() + } + actual := msg.Message + if actual != expected { + t.Errorf("LogWriter returned %s instead of %s", actual, expected) + } + +} diff --git a/services/templeton/src/templeton/database/database.go b/services/templeton/src/templeton/database/database.go new file mode 100644 index 000000000..9c34c6834 --- /dev/null +++ b/services/templeton/src/templeton/database/database.go @@ -0,0 +1,169 @@ +package database + +import ( + "database/sql" + "fmt" + + "templeton/model" + + _ "github.com/lib/pq" +) + +// Databaser is a type used to interact with the database. +type Databaser struct { + db *sql.DB + ConnString string +} + +// NewDatabaser returns a pointer to a Databaser instance that has already +// connected to the database by calling Ping(). +func NewDatabaser(connString string) (*Databaser, error) { + db, err := sql.Open("postgres", connString) + if err != nil { + return nil, err + } + err = db.Ping() + if err != nil { + return nil, err + } + databaser := &Databaser{ + db: db, + ConnString: connString, + } + return databaser, nil +} + +// avuRecordFromRow converts a sql.Rows from a result set to a AVU record +// It would be great if they'd provided an interface for *this* Scan method +// (sql.Scanner is for the other one) but we'll just have to live with being +// unable to use QueryRow for this +func avuRecordFromRow(row *sql.Rows) (*model.AVURecord, error) { + ar := &model.AVURecord{} + + err := row.Scan( + &ar.ID, + &ar.Attribute, + &ar.Value, + &ar.Unit, + &ar.TargetId, + &ar.TargetType, + &ar.CreatedBy, + &ar.ModifiedBy, + &ar.CreatedOn, + &ar.ModifiedOn, + ) + + return ar, err +} + +const _selectAVU = ` + SELECT cast(id as varchar), + attribute, + value, + unit, + cast(target_id as varchar), + cast(target_type as varchar), + created_by, + modified_by, + created_on, + modified_on + FROM avus +` + +// selectAVUsWhere generates a SELECT FROM avus with a given WHERE clause (or no WHERE, given an empty string) +func selectAVUsWhere(where string) string { + if where != "" { + return fmt.Sprintf("%s WHERE %s ORDER BY target_id", _selectAVU, where) + } + return fmt.Sprintf("%s ORDER BY target_id", _selectAVU) +} + +// GetAVU returns a model.AVURecord from the database +func (d *Databaser) GetAVU(uuid string) (*model.AVURecord, error) { + query := selectAVUsWhere("id = cast($1 as uuid)") + rows, err := d.db.Query(query, uuid) + defer rows.Close() + if err != nil { + return nil, err + } + if !rows.Next() { + err := rows.Err() + if err == nil { + err = sql.ErrNoRows + } + return nil, err + } + ar, err := avuRecordFromRow(rows) + if err != nil { + return nil, err + } + if rows.Next() { + return ar, fmt.Errorf("AVU Query for %s returned more than one row", uuid) + } + return ar, nil +} + +// GetObjectAVUs returns a slice of model.AVURecord structs by UUID +func (d *Databaser) GetObjectAVUs(uuid string) ([]model.AVURecord, error) { + query := selectAVUsWhere("target_id = cast($1 as uuid)") + + rows, err := d.db.Query(query, uuid) + defer rows.Close() + if err != nil { + return nil, err + } + var retval []model.AVURecord + for rows.Next() { + ar, err := avuRecordFromRow(rows) + if err != nil { + return nil, err + } + retval = append(retval, *ar) + } + err = rows.Err() + return retval, err +} + +// GetAllObjects returns a function to iterate through individual objects' worth of AVURecords, and a function to clean up +// The function it returns will return nil if all records have been read. +func (d *Databaser) GetAllObjects() (func() ([]model.AVURecord, error), func(), error) { + query := selectAVUsWhere("") + + rows, err := d.db.Query(query) + endFunc := func() { rows.Close() } + if err != nil { + return nil, endFunc, err + } + + lastRow := &model.AVURecord{TargetId: ""} + moreRows := true + + return func() ([]model.AVURecord, error) { + if !moreRows { + return nil, nil + } + var retval []model.AVURecord + if lastRow.TargetId != "" { + retval = append(retval, *lastRow) + } + for moreRows { + moreRows = rows.Next() + if !moreRows { + break + } + ar, err := avuRecordFromRow(rows) + if err != nil { + return nil, err + } + if lastRow.TargetId == "" || lastRow.TargetId == ar.TargetId { + lastRow = ar + retval = append(retval, *ar) + } else { + lastRow = ar + break + } + } + err = rows.Err() + return retval, err + }, endFunc, err +} diff --git a/services/templeton/src/templeton/elasticsearch/elasticsearch.go b/services/templeton/src/templeton/elasticsearch/elasticsearch.go new file mode 100644 index 000000000..757144633 --- /dev/null +++ b/services/templeton/src/templeton/elasticsearch/elasticsearch.go @@ -0,0 +1,86 @@ +package elasticsearch + +import ( + "logcabin" + + "encoding/json" + "github.com/mattbaird/elastigo/lib" + + "templeton/database" + "templeton/model" +) + +var ( + logger = logcabin.New() +) + +// Elasticer is a type used to interact with Elasticsearch +type Elasticer struct { + es *elastigo.Conn + baseURL string + index string +} + +// NewElasticer returns a pointer to an Elasticer instance that has already tested its connection +// by making a WaitForStatus call to the configured Elasticsearch cluster +func NewElasticer(elasticsearchBase string, elasticsearchIndex string) (*Elasticer, error) { + c := elastigo.NewConn() + + err := c.SetFromUrl(elasticsearchBase) + if err != nil { + return nil, err + } + + _, err = c.WaitForStatus("red", 10, elasticsearchIndex) + if err != nil { + return nil, err + } + + return &Elasticer{es: c, baseURL: elasticsearchBase, index: elasticsearchIndex}, nil +} + +func (e *Elasticer) Close() { + e.es.Close() +} + +// IndexEverything creates a bulk indexer and takes a database, and iterates to index its contents +func (e *Elasticer) IndexEverything(d *database.Databaser) error { + indexer := e.es.NewBulkIndexerErrors(10, 60) + indexer.Start() + defer indexer.Stop() + + nextObjFunc, endFunc, err := d.GetAllObjects() + defer endFunc() + + if err != nil { + logger.Fatal(err) + } + + for { + ids, err := nextObjFunc() + if err != nil { + logger.Print(err) + break + } + if ids == nil { + logger.Print("Done all rows, finishing.") + break + } + + formatted, err := model.AVUsToIndexedObject(ids) + if err != nil { + logger.Print(err) + break + } + logger.Printf("Indexing %s", formatted.ID) + + js, err := json.Marshal(formatted) + if err != nil { + logger.Print(err) + break + } + + indexer.Index(e.index, "metadata", formatted.ID, "", "", nil, js) + } + return nil +} diff --git a/services/templeton/src/templeton/main.go b/services/templeton/src/templeton/main.go new file mode 100644 index 000000000..590167ece --- /dev/null +++ b/services/templeton/src/templeton/main.go @@ -0,0 +1,135 @@ +package main + +import ( + "flag" + "logcabin" + + "configurate" + "fmt" + "os" + + "templeton/database" + "templeton/elasticsearch" +) + +var ( + logger = logcabin.New() + version = flag.Bool("version", false, "Print version information") + mode = flag.String("mode", "", "One of 'periodic', 'incremental', or 'full'. Required except for --version.") + cfgPath = flag.String("config", "", "Path to the configuration file. Required except for --version.") + amqpURI string + elasticsearchBase string + elasticsearchIndex string + dbURI string + gitref string + appver string + builtby string +) + +func init() { + flag.Parse() +} + +// AppVersion prints version information to stdout +func AppVersion() { + if appver != "" { + fmt.Printf("App-Version: %s\n", appver) + } + if gitref != "" { + fmt.Printf("Git-Ref: %s\n", gitref) + } + + if builtby != "" { + fmt.Printf("Built-By: %s\n", builtby) + } +} + +func checkMode() { + validModes := []string{"periodic", "incremental", "full"} + foundMode := false + + for _, v := range validModes { + if v == *mode { + foundMode = true + } + } + + if !foundMode { + fmt.Printf("Invalid mode: %s\n", *mode) + flag.PrintDefaults() + os.Exit(-1) + } +} + +func initConfig(cfgPath string) { + err := configurate.Init(cfgPath) + if err != nil { + logger.Fatal(err) + } +} + +func loadElasticsearchConfig() { + var err error + elasticsearchBase, err = configurate.C.String("elasticsearch.base") + if err != nil { + logger.Fatal(err) + } + elasticsearchIndex, err = configurate.C.String("elasticsearch.index") + if err != nil { + logger.Fatal(err) + } +} + +func loadAMQPConfig() { + var err error + amqpURI, err = configurate.C.String("amqp.uri") + if err != nil { + logger.Fatal(err) + } +} + +func loadDBConfig() { + var err error + dbURI, err = configurate.C.String("db.uri") + if err != nil { + logger.Fatal(err) + } +} + +func main() { + if *version { + AppVersion() + os.Exit(0) + } + + checkMode() + + if *cfgPath == "" { + fmt.Println("--config is required") + flag.PrintDefaults() + os.Exit(-1) + } + + initConfig(*cfgPath) + loadElasticsearchConfig() + es, err := elasticsearch.NewElasticer(elasticsearchBase, elasticsearchIndex) + if err != nil { + logger.Fatal(err) + } + defer es.Close() + + loadDBConfig() + d, err := database.NewDatabaser(dbURI) + if err != nil { + logger.Fatal(err) + } + + if *mode == "full" { + logger.Println("Full indexing mode selected.") + + es.IndexEverything(d) + return + } + + loadAMQPConfig() +} diff --git a/services/templeton/src/templeton/model/model.go b/services/templeton/src/templeton/model/model.go new file mode 100644 index 000000000..375fb7de9 --- /dev/null +++ b/services/templeton/src/templeton/model/model.go @@ -0,0 +1,50 @@ +package model + +import "time" + +// AVURecord is a type that contains info from the avus table +type AVURecord struct { + ID string + Attribute string + Value string + Unit string + TargetId string + TargetType string + CreatedBy string + ModifiedBy string + CreatedOn time.Time + ModifiedOn time.Time +} + +// IndexedAVU is a type that contains a single AVU as represented in ES +type IndexedAVU struct { + Attribute string `json:"attribute"` + Value string `json:"value"` + Unit string `json:"unit"` +} + +// IndexedObject is a type that contains info as it is sent to and recieved from ES +type IndexedObject struct { + ID string `json:"id"` + Metadata []IndexedAVU `json:"metadata"` +} + +// avuRecordToIndexedAVU turns a AVURecord into a *IndexedAVU +func avuRecordToIndexedAVU(avu AVURecord) (*IndexedAVU, error) { + ia := &IndexedAVU{Attribute: avu.Attribute, Value: avu.Value, Unit: avu.Unit} + return ia, nil +} + +// AVUsToIndexedObject takes []AVURecord and creates a *IndexedObject +func AVUsToIndexedObject(avus []AVURecord) (*IndexedObject, error) { + var ias []IndexedAVU + for _, avu := range avus { + ia, err := avuRecordToIndexedAVU(avu) + if err != nil { + return nil, err + } + ias = append(ias, *ia) + } + retval := &IndexedObject{ID: avus[0].TargetId, Metadata: ias} + return retval, nil +} diff --git a/services/templeton/src/test/test_config.yaml b/services/templeton/src/test/test_config.yaml new file mode 100644 index 000000000..d78711daf --- /dev/null +++ b/services/templeton/src/test/test_config.yaml @@ -0,0 +1,9 @@ +amqp: + uri: amqp://guest:guest@192.168.99.100:5672/ + +elasticsearch: + base: http://localhost:9200 + index: data + +db: + uri: postgres://de:notprod@192.168.99.100:5432/metadata?sslmode=disable diff --git a/services/templeton/version b/services/templeton/version new file mode 100644 index 000000000..da030363c --- /dev/null +++ b/services/templeton/version @@ -0,0 +1 @@ +5.2.5.0 From 3f1bab53e8039b4e78ff5ce47490eb06eadee096 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Mon, 15 Feb 2016 15:58:06 -0700 Subject: [PATCH 083/183] Add vendored dependencies for initial version of templeton. --- services/templeton/vendor/manifest | 42 + .../src/github.com/araddon/gou/LICENSE.md | 21 + .../src/github.com/araddon/gou/README.md | 129 + .../src/github.com/araddon/gou/coerce.go | 274 ++ .../src/github.com/araddon/gou/coerce_test.go | 25 + .../github.com/araddon/gou/goutest/assert.go | 33 + .../vendor/src/github.com/araddon/gou/http.go | 205 ++ .../src/github.com/araddon/gou/jsonhelper.go | 694 +++++ .../github.com/araddon/gou/jsonhelper_test.go | 221 ++ .../vendor/src/github.com/araddon/gou/log.go | 412 +++ .../src/github.com/araddon/gou/log_unix.go | 29 + .../src/github.com/araddon/gou/log_windows.go | 8 + .../src/github.com/araddon/gou/testutil.go | 75 + .../src/github.com/araddon/gou/throttle.go | 56 + .../github.com/araddon/gou/throttle_test.go | 30 + .../vendor/src/github.com/araddon/gou/uid.go | 94 + .../src/github.com/araddon/gou/uid_test.go | 11 + .../src/github.com/bitly/go-hostpool/LICENSE | 21 + .../github.com/bitly/go-hostpool/README.md | 17 + .../bitly/go-hostpool/epsilon_greedy.go | 205 ++ .../go-hostpool/epsilon_value_calculators.go | 40 + .../bitly/go-hostpool/example_test.go | 13 + .../bitly/go-hostpool/host_entry.go | 62 + .../github.com/bitly/go-hostpool/hostpool.go | 201 ++ .../bitly/go-hostpool/hostpool_test.go | 145 + .../src/github.com/lib/pq/CONTRIBUTING.md | 29 + .../vendor/src/github.com/lib/pq/LICENSE.md | 8 + .../vendor/src/github.com/lib/pq/README.md | 105 + .../src/github.com/lib/pq/bench_test.go | 435 +++ .../vendor/src/github.com/lib/pq/buf.go | 91 + .../vendor/src/github.com/lib/pq/certs/README | 3 + .../github.com/lib/pq/certs/postgresql.crt | 69 + .../github.com/lib/pq/certs/postgresql.key | 15 + .../src/github.com/lib/pq/certs/root.crt | 24 + .../src/github.com/lib/pq/certs/server.crt | 81 + .../src/github.com/lib/pq/certs/server.key | 27 + .../vendor/src/github.com/lib/pq/conn.go | 1847 +++++++++++ .../vendor/src/github.com/lib/pq/conn_test.go | 1433 +++++++++ .../vendor/src/github.com/lib/pq/copy.go | 267 ++ .../vendor/src/github.com/lib/pq/copy_test.go | 465 +++ .../vendor/src/github.com/lib/pq/doc.go | 212 ++ .../vendor/src/github.com/lib/pq/encode.go | 538 ++++ .../src/github.com/lib/pq/encode_test.go | 720 +++++ .../vendor/src/github.com/lib/pq/error.go | 508 +++ .../src/github.com/lib/pq/hstore/hstore.go | 118 + .../github.com/lib/pq/hstore/hstore_test.go | 148 + .../github.com/lib/pq/listen_example/doc.go | 102 + .../vendor/src/github.com/lib/pq/notify.go | 766 +++++ .../src/github.com/lib/pq/notify_test.go | 574 ++++ .../vendor/src/github.com/lib/pq/oid/doc.go | 6 + .../vendor/src/github.com/lib/pq/oid/gen.go | 74 + .../vendor/src/github.com/lib/pq/oid/types.go | 161 + .../vendor/src/github.com/lib/pq/ssl_test.go | 226 ++ .../vendor/src/github.com/lib/pq/url.go | 76 + .../vendor/src/github.com/lib/pq/url_test.go | 66 + .../src/github.com/lib/pq/user_posix.go | 24 + .../src/github.com/lib/pq/user_windows.go | 27 + .../mattbaird/elastigo/lib/baserequest.go | 141 + .../mattbaird/elastigo/lib/baseresponse.go | 146 + .../mattbaird/elastigo/lib/catindexinfo.go | 80 + .../elastigo/lib/catindexinfo_test.go | 117 + .../mattbaird/elastigo/lib/catnodeinfo.go | 249 ++ .../elastigo/lib/catnodeinfo_test.go | 58 + .../mattbaird/elastigo/lib/catresponses.go | 105 + .../mattbaird/elastigo/lib/catshardinfo.go | 106 + .../elastigo/lib/catshardinfo_test.go | 85 + .../mattbaird/elastigo/lib/clusterhealth.go | 128 + .../elastigo/lib/clusterhealthresponses.go | 45 + .../elastigo/lib/clusternodeshotthreads.go | 12 + .../elastigo/lib/clusternodesinfo.go | 184 ++ .../elastigo/lib/clusternodesinfo_test.go | 37 + .../elastigo/lib/clusternodesshutdown.go | 37 + .../elastigo/lib/clusternodesstats.go | 31 + .../mattbaird/elastigo/lib/clusterreroute.go | 81 + .../mattbaird/elastigo/lib/clusterstate.go | 38 + .../elastigo/lib/clusterstatresponses.go | 299 ++ .../elastigo/lib/clusterupdatesettings.go | 46 + .../mattbaird/elastigo/lib/connection.go | 184 ++ .../mattbaird/elastigo/lib/connection_test.go | 62 + .../mattbaird/elastigo/lib/corebulk.go | 414 +++ .../mattbaird/elastigo/lib/corebulk_test.go | 399 +++ .../mattbaird/elastigo/lib/corebulkudp.go | 12 + .../mattbaird/elastigo/lib/corecount.go | 45 + .../mattbaird/elastigo/lib/coredelete.go | 37 + .../elastigo/lib/coredeletebyquery.go | 57 + .../elastigo/lib/coreexample_test.go | 52 + .../mattbaird/elastigo/lib/coreexplain.go | 43 + .../mattbaird/elastigo/lib/coreget.go | 129 + .../mattbaird/elastigo/lib/coreindex.go | 132 + .../mattbaird/elastigo/lib/coremget.go | 62 + .../elastigo/lib/coremorelikethis.go | 57 + .../mattbaird/elastigo/lib/coremsearch.go | 12 + .../mattbaird/elastigo/lib/corepercolate.go | 64 + .../elastigo/lib/corepercolate_test.go | 64 + .../mattbaird/elastigo/lib/coresearch.go | 246 ++ .../mattbaird/elastigo/lib/coresearch_test.go | 83 + .../mattbaird/elastigo/lib/coretest_test.go | 198 ++ .../mattbaird/elastigo/lib/coreupdate.go | 94 + .../mattbaird/elastigo/lib/corevalidate.go | 53 + .../mattbaird/elastigo/lib/error.go | 8 + .../mattbaird/elastigo/lib/indicesaliases.go | 65 + .../mattbaird/elastigo/lib/indicesanalyze.go | 55 + .../elastigo/lib/indicesclearcache.go | 44 + .../elastigo/lib/indicescreateindex.go | 77 + .../elastigo/lib/indicesdeleteindex.go | 42 + .../elastigo/lib/indicesdeletemapping.go | 45 + .../elastigo/lib/indicesdeletemapping_test.go | 54 + .../mattbaird/elastigo/lib/indicesdoc.go | 12 + .../mattbaird/elastigo/lib/indicesflush.go | 46 + .../elastigo/lib/indicesgetsettings.go | 12 + .../elastigo/lib/indicesindicesexists.go | 37 + .../elastigo/lib/indicesopencloseindex.go | 54 + .../mattbaird/elastigo/lib/indicesoptimize.go | 41 + .../elastigo/lib/indicesputmapping.go | 171 ++ .../elastigo/lib/indicesputmapping_test.go | 302 ++ .../elastigo/lib/indicesputsettings.go | 42 + .../mattbaird/elastigo/lib/indicesrefresh.go | 45 + .../mattbaird/elastigo/lib/indicessegments.go | 12 + .../mattbaird/elastigo/lib/indicessnapshot.go | 44 + .../mattbaird/elastigo/lib/indicesstats.go | 12 + .../mattbaird/elastigo/lib/indicesstatus.go | 43 + .../elastigo/lib/indicestemplates.go | 12 + .../elastigo/lib/indicesupdatesettings.go | 12 + .../mattbaird/elastigo/lib/request.go | 126 + .../mattbaird/elastigo/lib/request_test.go | 74 + .../mattbaird/elastigo/lib/searchaggregate.go | 226 ++ .../elastigo/lib/searchaggregate_test.go | 177 ++ .../mattbaird/elastigo/lib/searchdsl.go | 28 + .../mattbaird/elastigo/lib/searchfacet.go | 142 + .../elastigo/lib/searchfacet_test.go | 42 + .../mattbaird/elastigo/lib/searchfilter.go | 402 +++ .../elastigo/lib/searchfilter_test.go | 287 ++ .../mattbaird/elastigo/lib/searchhighlight.go | 138 + .../elastigo/lib/searchhighlight_test.go | 67 + .../mattbaird/elastigo/lib/searchquery.go | 262 ++ .../mattbaird/elastigo/lib/searchreadme | 4 + .../mattbaird/elastigo/lib/searchsearch.go | 204 ++ .../elastigo/lib/searchsearch_test.go | 291 ++ .../mattbaird/elastigo/lib/searchsort.go | 52 + .../mattbaird/elastigo/lib/setup_test.go | 84 + .../mattbaird/elastigo/lib/shared.go | 18 + .../mattbaird/elastigo/lib/shared_test.go | 43 + .../mattbaird/elastigo/lib/snapshot.go | 120 + .../src/github.com/olebedev/config/LICENSE | 20 + .../src/github.com/olebedev/config/README.md | 17 + .../src/github.com/olebedev/config/config.go | 583 ++++ .../github.com/olebedev/config/config_test.go | 486 +++ .../src/github.com/olebedev/config/doc.go | 139 + .../github.com/olebedev/config/wercker.yml | 1 + .../vendor/src/gopkg.in/yaml.v2/LICENSE | 188 ++ .../src/gopkg.in/yaml.v2/LICENSE.libyaml | 31 + .../vendor/src/gopkg.in/yaml.v2/README.md | 131 + .../vendor/src/gopkg.in/yaml.v2/apic.go | 742 +++++ .../vendor/src/gopkg.in/yaml.v2/decode.go | 683 +++++ .../src/gopkg.in/yaml.v2/decode_test.go | 988 ++++++ .../vendor/src/gopkg.in/yaml.v2/emitterc.go | 1685 ++++++++++ .../vendor/src/gopkg.in/yaml.v2/encode.go | 306 ++ .../src/gopkg.in/yaml.v2/encode_test.go | 501 +++ .../vendor/src/gopkg.in/yaml.v2/parserc.go | 1096 +++++++ .../vendor/src/gopkg.in/yaml.v2/readerc.go | 394 +++ .../vendor/src/gopkg.in/yaml.v2/resolve.go | 203 ++ .../vendor/src/gopkg.in/yaml.v2/scannerc.go | 2710 +++++++++++++++++ .../vendor/src/gopkg.in/yaml.v2/sorter.go | 104 + .../vendor/src/gopkg.in/yaml.v2/suite_test.go | 12 + .../vendor/src/gopkg.in/yaml.v2/writerc.go | 89 + .../vendor/src/gopkg.in/yaml.v2/yaml.go | 346 +++ .../vendor/src/gopkg.in/yaml.v2/yamlh.go | 716 +++++ .../src/gopkg.in/yaml.v2/yamlprivateh.go | 173 ++ 168 files changed, 33250 insertions(+) create mode 100644 services/templeton/vendor/manifest create mode 100644 services/templeton/vendor/src/github.com/araddon/gou/LICENSE.md create mode 100644 services/templeton/vendor/src/github.com/araddon/gou/README.md create mode 100644 services/templeton/vendor/src/github.com/araddon/gou/coerce.go create mode 100644 services/templeton/vendor/src/github.com/araddon/gou/coerce_test.go create mode 100644 services/templeton/vendor/src/github.com/araddon/gou/goutest/assert.go create mode 100644 services/templeton/vendor/src/github.com/araddon/gou/http.go create mode 100644 services/templeton/vendor/src/github.com/araddon/gou/jsonhelper.go create mode 100644 services/templeton/vendor/src/github.com/araddon/gou/jsonhelper_test.go create mode 100644 services/templeton/vendor/src/github.com/araddon/gou/log.go create mode 100644 services/templeton/vendor/src/github.com/araddon/gou/log_unix.go create mode 100644 services/templeton/vendor/src/github.com/araddon/gou/log_windows.go create mode 100644 services/templeton/vendor/src/github.com/araddon/gou/testutil.go create mode 100644 services/templeton/vendor/src/github.com/araddon/gou/throttle.go create mode 100644 services/templeton/vendor/src/github.com/araddon/gou/throttle_test.go create mode 100644 services/templeton/vendor/src/github.com/araddon/gou/uid.go create mode 100644 services/templeton/vendor/src/github.com/araddon/gou/uid_test.go create mode 100644 services/templeton/vendor/src/github.com/bitly/go-hostpool/LICENSE create mode 100644 services/templeton/vendor/src/github.com/bitly/go-hostpool/README.md create mode 100644 services/templeton/vendor/src/github.com/bitly/go-hostpool/epsilon_greedy.go create mode 100644 services/templeton/vendor/src/github.com/bitly/go-hostpool/epsilon_value_calculators.go create mode 100644 services/templeton/vendor/src/github.com/bitly/go-hostpool/example_test.go create mode 100644 services/templeton/vendor/src/github.com/bitly/go-hostpool/host_entry.go create mode 100644 services/templeton/vendor/src/github.com/bitly/go-hostpool/hostpool.go create mode 100644 services/templeton/vendor/src/github.com/bitly/go-hostpool/hostpool_test.go create mode 100644 services/templeton/vendor/src/github.com/lib/pq/CONTRIBUTING.md create mode 100644 services/templeton/vendor/src/github.com/lib/pq/LICENSE.md create mode 100644 services/templeton/vendor/src/github.com/lib/pq/README.md create mode 100644 services/templeton/vendor/src/github.com/lib/pq/bench_test.go create mode 100644 services/templeton/vendor/src/github.com/lib/pq/buf.go create mode 100644 services/templeton/vendor/src/github.com/lib/pq/certs/README create mode 100644 services/templeton/vendor/src/github.com/lib/pq/certs/postgresql.crt create mode 100644 services/templeton/vendor/src/github.com/lib/pq/certs/postgresql.key create mode 100644 services/templeton/vendor/src/github.com/lib/pq/certs/root.crt create mode 100644 services/templeton/vendor/src/github.com/lib/pq/certs/server.crt create mode 100644 services/templeton/vendor/src/github.com/lib/pq/certs/server.key create mode 100644 services/templeton/vendor/src/github.com/lib/pq/conn.go create mode 100644 services/templeton/vendor/src/github.com/lib/pq/conn_test.go create mode 100644 services/templeton/vendor/src/github.com/lib/pq/copy.go create mode 100644 services/templeton/vendor/src/github.com/lib/pq/copy_test.go create mode 100644 services/templeton/vendor/src/github.com/lib/pq/doc.go create mode 100644 services/templeton/vendor/src/github.com/lib/pq/encode.go create mode 100644 services/templeton/vendor/src/github.com/lib/pq/encode_test.go create mode 100644 services/templeton/vendor/src/github.com/lib/pq/error.go create mode 100644 services/templeton/vendor/src/github.com/lib/pq/hstore/hstore.go create mode 100644 services/templeton/vendor/src/github.com/lib/pq/hstore/hstore_test.go create mode 100644 services/templeton/vendor/src/github.com/lib/pq/listen_example/doc.go create mode 100644 services/templeton/vendor/src/github.com/lib/pq/notify.go create mode 100644 services/templeton/vendor/src/github.com/lib/pq/notify_test.go create mode 100644 services/templeton/vendor/src/github.com/lib/pq/oid/doc.go create mode 100644 services/templeton/vendor/src/github.com/lib/pq/oid/gen.go create mode 100644 services/templeton/vendor/src/github.com/lib/pq/oid/types.go create mode 100644 services/templeton/vendor/src/github.com/lib/pq/ssl_test.go create mode 100644 services/templeton/vendor/src/github.com/lib/pq/url.go create mode 100644 services/templeton/vendor/src/github.com/lib/pq/url_test.go create mode 100644 services/templeton/vendor/src/github.com/lib/pq/user_posix.go create mode 100644 services/templeton/vendor/src/github.com/lib/pq/user_windows.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/baserequest.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/baseresponse.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catindexinfo.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catindexinfo_test.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catnodeinfo.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catnodeinfo_test.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catresponses.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catshardinfo.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catshardinfo_test.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterhealth.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterhealthresponses.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodeshotthreads.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesinfo.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesinfo_test.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesshutdown.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesstats.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterreroute.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterstate.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterstatresponses.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterupdatesettings.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/connection.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/connection_test.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corebulk.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corebulk_test.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corebulkudp.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corecount.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coredelete.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coredeletebyquery.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreexample_test.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreexplain.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreget.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreindex.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coremget.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coremorelikethis.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coremsearch.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corepercolate.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corepercolate_test.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coresearch.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coresearch_test.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coretest_test.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreupdate.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corevalidate.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/error.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesaliases.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesanalyze.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesclearcache.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicescreateindex.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdeleteindex.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdeletemapping.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdeletemapping_test.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdoc.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesflush.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesgetsettings.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesindicesexists.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesopencloseindex.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesoptimize.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesputmapping.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesputmapping_test.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesputsettings.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesrefresh.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicessegments.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicessnapshot.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesstats.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesstatus.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicestemplates.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesupdatesettings.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/request.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/request_test.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchaggregate.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchaggregate_test.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchdsl.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfacet.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfacet_test.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfilter.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfilter_test.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchhighlight.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchhighlight_test.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchquery.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchreadme create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchsearch.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchsearch_test.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchsort.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/setup_test.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/shared.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/shared_test.go create mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/snapshot.go create mode 100644 services/templeton/vendor/src/github.com/olebedev/config/LICENSE create mode 100644 services/templeton/vendor/src/github.com/olebedev/config/README.md create mode 100644 services/templeton/vendor/src/github.com/olebedev/config/config.go create mode 100644 services/templeton/vendor/src/github.com/olebedev/config/config_test.go create mode 100644 services/templeton/vendor/src/github.com/olebedev/config/doc.go create mode 100644 services/templeton/vendor/src/github.com/olebedev/config/wercker.yml create mode 100644 services/templeton/vendor/src/gopkg.in/yaml.v2/LICENSE create mode 100644 services/templeton/vendor/src/gopkg.in/yaml.v2/LICENSE.libyaml create mode 100644 services/templeton/vendor/src/gopkg.in/yaml.v2/README.md create mode 100644 services/templeton/vendor/src/gopkg.in/yaml.v2/apic.go create mode 100644 services/templeton/vendor/src/gopkg.in/yaml.v2/decode.go create mode 100644 services/templeton/vendor/src/gopkg.in/yaml.v2/decode_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/yaml.v2/emitterc.go create mode 100644 services/templeton/vendor/src/gopkg.in/yaml.v2/encode.go create mode 100644 services/templeton/vendor/src/gopkg.in/yaml.v2/encode_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/yaml.v2/parserc.go create mode 100644 services/templeton/vendor/src/gopkg.in/yaml.v2/readerc.go create mode 100644 services/templeton/vendor/src/gopkg.in/yaml.v2/resolve.go create mode 100644 services/templeton/vendor/src/gopkg.in/yaml.v2/scannerc.go create mode 100644 services/templeton/vendor/src/gopkg.in/yaml.v2/sorter.go create mode 100644 services/templeton/vendor/src/gopkg.in/yaml.v2/suite_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/yaml.v2/writerc.go create mode 100644 services/templeton/vendor/src/gopkg.in/yaml.v2/yaml.go create mode 100644 services/templeton/vendor/src/gopkg.in/yaml.v2/yamlh.go create mode 100644 services/templeton/vendor/src/gopkg.in/yaml.v2/yamlprivateh.go diff --git a/services/templeton/vendor/manifest b/services/templeton/vendor/manifest new file mode 100644 index 000000000..2d643f23b --- /dev/null +++ b/services/templeton/vendor/manifest @@ -0,0 +1,42 @@ +{ + "version": 0, + "dependencies": [ + { + "importpath": "github.com/araddon/gou", + "repository": "https://github.com/araddon/gou", + "revision": "1fd0868458fb611a8a956fae0ed0d0cc657cd321", + "branch": "master" + }, + { + "importpath": "github.com/bitly/go-hostpool", + "repository": "https://github.com/bitly/go-hostpool", + "revision": "d0e59c22a56e8dadfed24f74f452cea5a52722d2", + "branch": "master" + }, + { + "importpath": "github.com/lib/pq", + "repository": "https://github.com/lib/pq", + "revision": "69552e54d2a9d4c6a2438926a774930f7bc398ec", + "branch": "master" + }, + { + "importpath": "github.com/mattbaird/elastigo/lib", + "repository": "https://github.com/mattbaird/elastigo", + "revision": "7dc47d261c9718f93000e2ed2b0b94b13d078e77", + "branch": "master", + "path": "/lib" + }, + { + "importpath": "github.com/olebedev/config", + "repository": "https://github.com/olebedev/config", + "revision": "e3edea7d68b76222b5118cc2e1cf3825e30abb80", + "branch": "master" + }, + { + "importpath": "gopkg.in/yaml.v2", + "repository": "https://gopkg.in/yaml.v2", + "revision": "f7716cbe52baa25d2e9b0d0da546fcf909fc16b4", + "branch": "v2" + } + ] +} \ No newline at end of file diff --git a/services/templeton/vendor/src/github.com/araddon/gou/LICENSE.md b/services/templeton/vendor/src/github.com/araddon/gou/LICENSE.md new file mode 100644 index 000000000..628f430fb --- /dev/null +++ b/services/templeton/vendor/src/github.com/araddon/gou/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2012-2014 Aaron Raddon and contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/services/templeton/vendor/src/github.com/araddon/gou/README.md b/services/templeton/vendor/src/github.com/araddon/gou/README.md new file mode 100644 index 000000000..5c773adbb --- /dev/null +++ b/services/templeton/vendor/src/github.com/araddon/gou/README.md @@ -0,0 +1,129 @@ +gou - Go Utilities +=========================== + +Go Utilities (logging, json) + +JsonHelper +=============== + +A Go Json Helper, focused on Type coercion, and json path query. + +```go + package main + import . "github.com/araddon/gou" + import . "github.com/araddon/gou/goutest" + import "testing" + + + func TestJsonHelper() { + + var jsonData := []byte(`{ + "name":"aaron", + "nullstring":null, + "ints":[1,2,3,4], + "int":1, + "intstr":"1", + "int64":1234567890, + "MaxSize" : 1048576, + "strings":["string1"], + "stringscsv":"string1,string2", + "nested":{ + "nest":"string2", + "strings":["string1"], + "int":2, + "list":["value"], + "nest2":{ + "test":"good" + } + }, + "nested2":[ + {"sub":2} + ], + "period.name":"value" + }` + + jh := NewJsonHelper(jsonData) + + // String method + Assert(jh.String("name") == "aaron", t, "should get 'aaron' %s", jh.String("name")) + // Int Method + Assert(jh.Int("int") == 1, t, "get int ") + // Selecting items from an array + Assert(jh.Int("ints[0]") == 1, t, "get int from array %d", jh.Int("ints[0]")) + Assert(jh.Int("ints[2]") == 3, t, "get int from array %d", jh.Int("ints[0]")) + // Getting arrays + Assert(len(jh.Ints("ints")) == 4, t, "get int array %v", jh.Ints("ints")) + // Type coercion to Int64 + Assert(jh.Int64("int64") == 1234567890, t, "get int") + Assert(jh.Int("nested.int") == 2, t, "get int") + + // Path based selection + Assert(jh.String("nested.nest") == "string2", t, "should get string %s", jh.String("nested.nest")) + Assert(jh.String("nested.nest2.test") == "good", t, "should get string %s", jh.String("nested.nest2.test")) + Assert(jh.String("nested.list[0]") == "value", t, "get string from array") + Assert(jh.Int("nested2[0].sub") == 2, t, "get int from obj in array %d", jh.Int("nested2[0].sub")) + + // casing? + Assert(jh.Int("MaxSize") == 1048576, t, "get int, test capitalization? ") + sl := jh.Strings("strings") + Assert(len(sl) == 1 && sl[0] == "string1", t, "get strings ") + sl = jh.Strings("stringscsv") + Assert(len(sl) == 2 && sl[0] == "string1", t, "get strings ") + + // Safe gets + i64, ok := jh.Int64Safe("int64") + Assert(ok, t, "int64safe ok") + Assert(i64 == 1234567890, t, "int64safe value") + + i, ok := jh.IntSafe("int") + Assert(ok, t, "intsafe ok") + Assert(i == 1, t, "intsafe value") + + l := jh.List("nested2") + Assert(len(l) == 1, t, "get list") + + jhm := jh.Helpers("nested2") + Assert(len(jhm) == 1, t, "get list of helpers") + Assert(jhm[0].Int("sub") == 2, t, "Should get list of helpers") + + // Now lets test xpath type syntax + Assert(jh.Int("/MaxSize") == 1048576, t, "get int, test capitalization? ") + Assert(jh.String("/nested/nest") == "string2", t, "should get string %s", jh.String("/nested/nest")) + Assert(jh.String("/nested/list[0]") == "value", t, "get string from array") + // note this one has period in name + Assert(jh.String("/period.name") == "value", t, "test period in name ") + } + +``` + + +Logging +=============== + +Yet Another Go Logger, configureable logging. + +```go + package main + import "github.com/araddon/gou" + import "flag" + + var logLevel *string = flag.String("logging", "debug", "Which log level: [debug,info,warn,error,fatal]") + + func main() { + + flag.Parse() + gou.SetupLogging(*logLevel) + + // logging methods + gou.Debug("hello", thing, " more ", stuff) + + gou.Error("hello") + + gou.Errorf("hello %v", thing) + } + +``` + +License +=============== +MIT License diff --git a/services/templeton/vendor/src/github.com/araddon/gou/coerce.go b/services/templeton/vendor/src/github.com/araddon/gou/coerce.go new file mode 100644 index 000000000..8b461b456 --- /dev/null +++ b/services/templeton/vendor/src/github.com/araddon/gou/coerce.go @@ -0,0 +1,274 @@ +package gou + +import ( + "encoding/json" + "fmt" + "math" + "strconv" +) + +// Coerce types (string,int,int64, float, []byte) into String type +func CoerceString(v interface{}) (string, error) { + switch val := v.(type) { + case string: + if val == "null" || val == "NULL" { + return "", nil + } + return val, nil + case int: + return strconv.Itoa(val), nil + case int32: + return strconv.FormatInt(int64(val), 10), nil + case int64: + return strconv.FormatInt(val, 10), nil + case uint32: + return strconv.FormatUint(uint64(val), 10), nil + case uint64: + return strconv.FormatUint(val, 10), nil + case float32: + return strconv.FormatFloat(float64(val), 'f', -1, 32), nil + case float64: + return strconv.FormatFloat(val, 'f', -1, 64), nil + case []byte: + if string(val) == "null" || string(val) == "NULL" { + return "", nil + } + return string(val), nil + case json.RawMessage: + if string(val) == "null" || string(val) == "NULL" { + return "", nil + } + return string(val), nil + } + return "", fmt.Errorf("Could not coerce to string: %v", v) +} + +// Coerce type to string, returning zero length string if error or nil +func CoerceStringShort(v interface{}) string { + val, _ := CoerceString(v) + return val +} + +func CoerceFloat(v interface{}) (float64, error) { + switch val := v.(type) { + case int: + return float64(val), nil + case int32: + return float64(val), nil + case int64: + return float64(val), nil + case uint32: + return float64(val), nil + case uint64: + return float64(val), nil + case float64: + return val, nil + case string: + if len(val) > 0 { + if iv, err := strconv.ParseFloat(val, 64); err == nil { + return iv, nil + } + } + case []byte: + if len(val) > 0 { + if iv, err := strconv.ParseFloat(string(val), 64); err == nil { + return iv, nil + } + } + case json.RawMessage: + if len(val) > 0 { + if iv, err := strconv.ParseFloat(string(val), 64); err == nil { + return iv, nil + } + } + case nil: + return math.NaN(), nil + } + return 0, fmt.Errorf("Could not Coerce Value: %v", v) +} +func CoerceFloatShort(v interface{}) float64 { + val, _ := CoerceFloat(v) + return val +} + +func CoerceInt64(v interface{}) (int64, error) { + val, ok := valToInt64(v) + if ok { + return val, nil + } + return 0, fmt.Errorf("Could not coerce to int64: %v", v) +} +func CoerceInt64Short(v interface{}) int64 { + val, ok := valToInt64(v) + if ok { + return val + } + return 0 +} + +func CoerceInt(v interface{}) (int, error) { + val, ok := valToInt(v) + if ok { + return val, nil + } + return 0, fmt.Errorf("Could not coerce to int64: %v", v) +} +func CoerceIntShort(v interface{}) int { + val, ok := valToInt(v) + if ok { + return val + } + return 0 +} + +// Coerce a val(interface{}) into a Uint64 +func CoerceUint(v interface{}) (uint64, error) { + u64, ok := valToUint64(v) + if !ok { + return 0, fmt.Errorf("Could not Coerce %v", v) + } + return u64, nil +} + +// Coerce a Val(interface{}) into Uint64 +func CoerceUintShort(v interface{}) uint64 { + val, _ := CoerceUint(v) + return val +} + +// Given any numeric type (float*, int*, uint*, string) return an int. Returns false if it would +// overflow or if the the argument is not numeric. +func valToInt(i interface{}) (int, bool) { + i64, ok := valToInt64(i) + if !ok { + return -1, false + } + if i64 > MaxInt || i64 < MinInt { + return -1, false + } + return int(i64), true +} + +// Given any simple type (float*, int*, uint*, string, []byte, json.RawMessage) return an int64. +// Returns false if it would overflow or if the the argument is not numeric. +func valToInt64(i interface{}) (int64, bool) { + switch x := i.(type) { + case float32: + return int64(x), true + case float64: + return int64(x), true + case uint8: + return int64(x), true + case uint16: + return int64(x), true + case uint32: + return int64(x), true + case uint64: + if x > math.MaxInt64 { + return 0, false + } + return int64(x), true + case int8: + return int64(x), true + case int16: + return int64(x), true + case int32: + return int64(x), true + case int64: + return int64(x), true + case int: + return int64(x), true + case uint: + if uint64(x) > math.MaxInt64 { + return 0, false + } + return int64(x), true + case string: + if len(x) > 0 { + if iv, err := strconv.ParseInt(x, 10, 64); err == nil { + return iv, true + } + if iv, err := strconv.ParseFloat(x, 64); err == nil { + return valToInt64(iv) + } + } + case []byte: + if len(x) > 0 { + if iv, err := strconv.ParseInt(string(x), 10, 64); err == nil { + return iv, true + } + if iv, err := strconv.ParseFloat(string(x), 64); err == nil { + return valToInt64(iv) + } + } + case json.RawMessage: + if len(x) > 0 { + if iv, err := strconv.ParseInt(string(x), 10, 64); err == nil { + return iv, true + } + if iv, err := strconv.ParseFloat(string(x), 64); err == nil { + return valToInt64(iv) + } + } + } + return 0, false +} + +// Given any simple type (float*, int*, uint*, string, []byte, json.RawMessage) return an int64. +// Returns false if it would overflow or if the the argument is not numeric. +func valToUint64(i interface{}) (uint64, bool) { + switch x := i.(type) { + case float32: + return uint64(x), true + case float64: + return uint64(x), true + case uint8: + return uint64(x), true + case uint16: + return uint64(x), true + case uint32: + return uint64(x), true + case uint64: + return x, true + case int8: + return uint64(x), true + case int16: + return uint64(x), true + case int32: + return uint64(x), true + case int64: + return uint64(x), true + case int: + return uint64(x), true + case uint: + return uint64(x), true + case string: + if len(x) > 0 { + if uiv, err := strconv.ParseUint(x, 10, 64); err == nil { + return uiv, true + } + if fv, err := strconv.ParseFloat(x, 64); err == nil { + return uint64(fv), true + } + } + case []byte: + if len(x) > 0 { + if uiv, err := strconv.ParseUint(string(x), 10, 64); err == nil { + return uiv, true + } + if fv, err := strconv.ParseFloat(string(x), 64); err == nil { + return uint64(fv), true + } + } + case json.RawMessage: + if len(x) > 0 { + if uiv, err := strconv.ParseUint(string(x), 10, 64); err == nil { + return uiv, true + } + if fv, err := strconv.ParseFloat(string(x), 64); err == nil { + return uint64(fv), true + } + } + } + return 0, false +} diff --git a/services/templeton/vendor/src/github.com/araddon/gou/coerce_test.go b/services/templeton/vendor/src/github.com/araddon/gou/coerce_test.go new file mode 100644 index 000000000..58922fbbc --- /dev/null +++ b/services/templeton/vendor/src/github.com/araddon/gou/coerce_test.go @@ -0,0 +1,25 @@ +package gou + +import ( + . "github.com/araddon/gou/goutest" + "testing" +) + +func TestCoerce(t *testing.T) { + + data := map[string]interface{}{ + "int": 4, + "float": 45.3, + "string": "22", + "stringf": "22.2", + } + Assert(CoerceStringShort(data["int"]) == "4", t, "get int as string") + Assert(CoerceStringShort(data["float"]) == "45.3", t, "get float as string: %v", data["float"]) + Assert(CoerceStringShort(data["string"]) == "22", t, "get string as string: %v", data["string"]) + Assert(CoerceStringShort(data["stringf"]) == "22.2", t, "get stringf as string: %v", data["stringf"]) + + Assert(CoerceIntShort(data["int"]) == 4, t, "get int as int: %v", data["int"]) + Assert(CoerceIntShort(data["float"]) == 45, t, "get float as int: %v", data["float"]) + Assert(CoerceIntShort(data["string"]) == 22, t, "get string as int: %v", data["string"]) + Assert(CoerceIntShort(data["stringf"]) == 22, t, "get stringf as int: %v", data["stringf"]) +} diff --git a/services/templeton/vendor/src/github.com/araddon/gou/goutest/assert.go b/services/templeton/vendor/src/github.com/araddon/gou/goutest/assert.go new file mode 100644 index 000000000..d74759555 --- /dev/null +++ b/services/templeton/vendor/src/github.com/araddon/gou/goutest/assert.go @@ -0,0 +1,33 @@ +package goutest + +import ( + "fmt" + "testing" +) + +// dumb simple assert for testing, printing +// Assert(len(items) == 9, t, "Should be 9 but was %d", len(items)) +func Assert(is bool, t *testing.T, args ...interface{}) { + if is == false { + msg := "" + if len(args) > 1 { + switch val := args[0].(type) { + case string: + msg = fmt.Sprintf(val, args[1:len(args)-1]) + default: + msg = fmt.Sprint(args) + } + + } else if len(args) == 1 { + switch val := args[0].(type) { + case string: + msg = val + default: + msg = fmt.Sprint(val) + } + } + + //gou.DoLog(3, gou.ERROR, msg) + t.Fatal(msg) + } +} diff --git a/services/templeton/vendor/src/github.com/araddon/gou/http.go b/services/templeton/vendor/src/github.com/araddon/gou/http.go new file mode 100644 index 000000000..d1dd85a95 --- /dev/null +++ b/services/templeton/vendor/src/github.com/araddon/gou/http.go @@ -0,0 +1,205 @@ +package gou + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/url" +) + +// Simple Fetch Wrapper, given a url it returns bytes +func Fetch(url string) (ret []byte, err error) { + resp, err := http.Get(url) + defer func() { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + }() + if err != nil { + Log(WARN, err.Error()) + return + } + ret, err = ioutil.ReadAll(resp.Body) + if err != nil { + return + } + return +} + +// Simple Fetch Wrapper, given a url it returns bytes and response +func FetchResp(url string) (ret []byte, err error, resp *http.Response) { + resp, err = http.Get(url) + defer func() { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + }() + if err != nil { + Log(WARN, err.Error()) + } + if resp == nil || resp.Body == nil { + return + } + ret, err = ioutil.ReadAll(resp.Body) + return +} + +// Simple Fetch Wrapper, given a url it returns Helper, error +// Sends as type application/json, interprets whatever datatype is sent in appropriately +func JsonHelperHttp(method, urlStr string, data interface{}) (JsonHelper, error) { + var body io.Reader + if data != nil { + switch val := data.(type) { + case string: + body = bytes.NewReader([]byte(val)) + case io.Reader: + body = val + case url.Values: + body = bytes.NewReader([]byte(val.Encode())) + default: + by, err := json.Marshal(data) + if err != nil { + return nil, err + } + body = bytes.NewReader(by) + } + + } + req, err := http.NewRequest(method, urlStr, body) + if err != nil { + return nil, err + } + req.Header.Add("Accept", "application/json") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + jh, err := NewJsonHelperReader(resp.Body) + return jh, err +} + +// posts an application/json to url with body +// ie: type = application/json +func PostJson(postUrl string, data interface{}) (ret string, err error, resp *http.Response) { + var buf io.Reader + if data != nil { + switch val := data.(type) { + case string: + buf = bytes.NewBufferString(val) + case []byte: + buf = bytes.NewReader(val) + case json.RawMessage: + buf = bytes.NewReader([]byte(val)) + case io.Reader: + buf = val + case url.Values: + buf = bytes.NewBufferString(val.Encode()) + default: + by, err := json.Marshal(data) + if err != nil { + return "", err, nil + } + buf = bytes.NewReader(by) + } + } + + resp, err = http.Post(postUrl, "application/json", buf) + defer func() { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + }() + if err != nil { + Log(WARN, err.Error()) + return "", err, resp + } + bodyBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err, resp + } + + return string(bodyBytes), nil, resp +} + +// issues http delete an application/json to url with body +func DeleteJson(url, body string) (ret string, err error, resp *http.Response) { + //Post(url string, bodyType string, body io.Reader) + buf := bytes.NewBufferString(body) + Debug(buf.Len()) + req, err := http.NewRequest("DELETE", url, buf) + if err != nil { + Debug(err) + return + } + + req.Header.Add("Content-Type", "application/json") + resp, err = http.DefaultClient.Do(req) //(url, "application/json", buf) + defer func() { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + }() + if err != nil { + Log(WARN, err.Error()) + return "", err, resp + } + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err, resp + } + + return string(data), nil, resp +} + +// posts a www-form encoded form to url with body +func PostForm(url, body string) (ret string, err error, resp *http.Response) { + //Post(url string, bodyType string, body io.Reader) + buf := bytes.NewBufferString(body) + resp, err = http.Post(url, "application/x-www-form-urlencoded", buf) + defer func() { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + }() + if err != nil { + Log(WARN, url, " ", body, " ", err.Error()) + return "", err, resp + } + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err, resp + } + + return string(data), nil, resp +} + +// issues http put an application/json to url with optional body +func PutJson(url, body string) (ret string, err error, resp *http.Response) { + buf := bytes.NewBufferString(body) + req, err := http.NewRequest("PUT", url, buf) + if err != nil { + Debug(err) + return + } + req.Header.Add("Content-Type", "application/json") + resp, err = http.DefaultClient.Do(req) + defer func() { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + }() + if err != nil { + Log(WARN, err.Error()) + return "", err, resp + } + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err, resp + } + + return string(data), nil, resp +} diff --git a/services/templeton/vendor/src/github.com/araddon/gou/jsonhelper.go b/services/templeton/vendor/src/github.com/araddon/gou/jsonhelper.go new file mode 100644 index 000000000..dc3ee693b --- /dev/null +++ b/services/templeton/vendor/src/github.com/araddon/gou/jsonhelper.go @@ -0,0 +1,694 @@ +package gou + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "math" + "net/http" + "net/url" + "strconv" + "strings" + "unicode/utf8" +) + +// Convert a slice of bytes into an array by ensuring it is wrapped +// with [] +func MakeJsonList(b []byte) []byte { + if !bytes.HasPrefix(b, []byte{'['}) { + b = append([]byte{'['}, b...) + b = append(b, ']') + } + return b +} + +func JsonString(v interface{}) string { + b, err := json.Marshal(v) + if err != nil { + return `""` + } + return string(b) +} + +func firstNonWsRune(by []byte) (r rune, ok bool) { + for { + if len(by) == 0 { + return 0, false + } + r, numBytes := utf8.DecodeRune(by) + switch r { + case '\t', '\n', '\r', ' ': + by = by[numBytes:] // advance past the current whitespace rune and continue + continue + case utf8.RuneError: // This is returned when invalid UTF8 is found + return 0, false + } + return r, true + } + return 0, false +} + +// Determines if the bytes is a json array, only looks at prefix +// not parsing the entire thing +func IsJson(by []byte) bool { + firstRune, ok := firstNonWsRune(by) + if !ok { + return false + } + if firstRune == '[' || firstRune == '{' { + return true + } + return false +} + +// Determines if the bytes is a json array, only looks at prefix +// not parsing the entire thing +func IsJsonArray(by []byte) bool { + firstRune, ok := firstNonWsRune(by) + if !ok { + return false + } + if firstRune == '[' { + return true + } + return false +} + +func IsJsonObject(by []byte) bool { + firstRune, ok := firstNonWsRune(by) + if !ok { + return false + } + if firstRune == '{' { + return true + } + return false +} + +type JsonRawWriter struct { + bytes.Buffer +} + +func (m *JsonRawWriter) MarshalJSON() ([]byte, error) { + return m.Bytes(), nil +} + +func (m *JsonRawWriter) Raw() json.RawMessage { + return json.RawMessage(m.Bytes()) +} + +// A simple wrapper to help json data be consumed when not +// using Strongly typed structs. +type JsonInterface struct { + data interface{} +} + +// Encode returns its marshaled data as `[]byte` +func (j *JsonInterface) Encode() ([]byte, error) { + return j.MarshalJSON() +} + +// Implements the json.Marshaler interface. +func (j *JsonInterface) MarshalJSON() ([]byte, error) { + return json.Marshal(&j.data) +} + +// Implements the json.Unmarshal interface. +func (j *JsonInterface) UnmarshalJSON(raw []byte) error { + return json.Unmarshal(raw, &j.data) +} + +// Coerce to a String +func (j *JsonInterface) String() (string, error) { + return CoerceString(j.data) +} + +// Coerce to a string, may be zero length if missing, or zero length +func (j JsonInterface) StringSh() string { + val, _ := CoerceString(j.data) + return val +} + +// Coerce to Int +func (j *JsonInterface) Int() (int, error) { + return CoerceInt(j.data) +} + +// Coerce to Int, 0 returned if missing or zero +func (j JsonInterface) IntSh() int { + val, _ := CoerceInt(j.data) + return val +} + +// Coerce to Float, return err if needed +func (j *JsonInterface) Float() (float32, error) { + val, err := CoerceFloat(j.data) + return float32(val), err +} + +// Coerce to Float, 0 returned if 0 or missing +func (j JsonInterface) FloatSh() float32 { + val, _ := CoerceFloat(j.data) + return float32(val) +} + +// A wrapper around a map[string]interface{} to facilitate coercion +// of json data to what you want +// +// allows usage such as this +// +// jh := NewJsonHelper([]byte(`{ +// "name":"string", +// "ints":[1,5,9,11], +// "int":1, +// "int64":1234567890, +// "MaxSize" : 1048576, +// "strings":["string1"], +// "nested":{ +// "nest":"string2", +// "strings":["string1"], +// "int":2, +// "list":["value"], +// "nest2":{ +// "test":"good" +// } +// }, +// "nested2":[ +// {"sub":5} +// ] +// }`) +// +// i := jh.Int("nested.int") // 2 +// i2 := jh.Int("ints[1]") // 5 array position 1 from [1,5,9,11] +// s := jh.String("nested.nest") // "string2" +// +type JsonHelper map[string]interface{} + +func NewJsonHelper(b []byte) JsonHelper { + jh := make(JsonHelper) + json.Unmarshal(b, &jh) + return jh +} + +func NewJsonHelperReader(r io.Reader) (jh JsonHelper, err error) { + jh = make(JsonHelper) + err = json.NewDecoder(r).Decode(&jh) + return +} + +func NewJsonHelpers(b []byte) []JsonHelper { + var jhl []JsonHelper + json.Unmarshal(MakeJsonList(b), &jhl) + return jhl +} + +// Make a JsonHelper from http response. This will automatically +// close the response body +func NewJsonHelperFromResp(resp *http.Response) (JsonHelper, error) { + jh := make(JsonHelper) + if resp == nil || resp.Body == nil { + return jh, fmt.Errorf("No response or response body to read") + } + defer resp.Body.Close() + respBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + if len(respBytes) == 0 { + return jh, fmt.Errorf("No data in response") + } + if err := json.Unmarshal(respBytes, &jh); err != nil { + return jh, err + } + return jh, nil +} + +func jsonList(v interface{}) []interface{} { + switch v.(type) { + case []interface{}: + return v.([]interface{}) + } + return nil +} + +func jsonEntry(name string, v interface{}) (interface{}, bool) { + switch val := v.(type) { + case map[string]interface{}: + if root, ok := val[name]; ok { + return root, true + } else { + return nil, false + } + case JsonHelper: + return val.Get(name), true + case []interface{}: + return v, true + default: + Debug("no type? ", name, " ", v) + return nil, false + } +} + +// Get the key (or keypath) value as interface, mostly used +// internally through String, etc methods +// +// jh.Get("name.subname") +// jh.Get("name/subname") +// jh.Get("name.arrayname[1]") +// jh.Get("name.arrayname[]") +func (j JsonHelper) Get(n string) interface{} { + var parts []string + if strings.Contains(n, "/") { + parts = strings.Split(n, "/") + if strings.HasPrefix(n, "/") && len(parts) > 0 { + parts = parts[1:] + } + } else { + parts = strings.Split(n, ".") + } + var root interface{} + var err error + var ok, isList, listEntry bool + var ln, st, idx int + for ict, name := range parts { + isList = strings.HasSuffix(name, "[]") + listEntry = strings.HasSuffix(name, "]") && !isList + ln, idx = len(name), -1 + if isList || listEntry { + st = strings.Index(name, "[") + idx, err = strconv.Atoi(name[st+1 : ln-1]) + name = name[:st] + } + if ict == 0 { + root, ok = j[name] + } else { + root, ok = jsonEntry(name, root) + } + //Debug(isList, listEntry, " ", name, " ", root, " ", ok, err) + if !ok { + if len(parts) > 0 { + // lets ensure the actual json-value doesn't have period in key + root, ok = j[n] + if !ok { + return nil + } else { + //Warnf("returning root %T %#v", root, root) + return root + } + } else { + return nil + } + + } + if isList { + return jsonList(root) + } else if listEntry && err == nil { + if lst := jsonList(root); lst != nil && len(lst) > idx { + root = lst[idx] + } else { + return nil + } + } + + } + return root +} + +// Get a Helper from a string path +func (j JsonHelper) Helper(n string) JsonHelper { + v := j.Get(n) + if v == nil { + return nil + } + switch vt := v.(type) { + case map[string]interface{}: + cn := JsonHelper{} + for n, val := range vt { + cn[n] = val + } + return cn + case map[string]string: + cn := JsonHelper{} + for n, val := range vt { + cn[n] = val + } + return cn + case JsonHelper: + return vt + default: + //Infof("wrong type: %T", v) + } + return nil +} + +// Get list of Helpers at given name. Trys to coerce into +// proper Helper type +func (j JsonHelper) Helpers(n string) []JsonHelper { + v := j.Get(n) + if v == nil { + return nil + } + switch val := v.(type) { + case []map[string]interface{}: + hl := make([]JsonHelper, 0) + for _, mapVal := range val { + hl = append(hl, mapVal) + } + return hl + case []interface{}: + jhl := make([]JsonHelper, 0) + for _, item := range val { + if jh, ok := item.(map[string]interface{}); ok { + jhl = append(jhl, jh) + } + } + return jhl + } + + return nil +} + +// Gets slice of interface{} +func (j JsonHelper) List(n string) []interface{} { + v := j.Get(n) + switch val := v.(type) { + case []string: + il := make([]interface{}, len(val)) + for i, val := range val { + il[i] = val + } + return il + case []interface{}: + return val + } + return nil +} + +func (j JsonHelper) String(n string) string { + if v := j.Get(n); v != nil { + val, _ := CoerceString(v) + return val + } + return "" +} +func (j JsonHelper) Strings(n string) []string { + if v := j.Get(n); v != nil { + //Debugf("Strings(%s) => %T %#v", n, v, v) + switch val := v.(type) { + case string: + return strings.Split(val, ",") + case []string: + //Debug("type []string") + return val + case []interface{}: + //Debug("Kind = []interface{} n=", n, " v=", v) + sva := make([]string, 0) + for _, av := range val { + switch aval := av.(type) { + case string: + sva = append(sva, aval) + default: + //Warnf("Kind ? %T v=%v", aval, aval) + } + } + return sva + default: + return []string{j.String(n)} + } + } + return nil +} +func (j JsonHelper) Ints(n string) []int { + v := j.Get(n) + if v == nil { + return nil + } + if sl, isSlice := v.([]interface{}); isSlice { + iva := make([]int, 0) + for _, av := range sl { + avAsInt, ok := valToInt(av) + if ok { + iva = append(iva, avAsInt) + } + } + return iva + } + return nil +} +func (j JsonHelper) StringSafe(n string) (string, bool) { + v := j.Get(n) + if v != nil { + if s, ok := v.(string); ok { + return s, ok + } + } + return "", false +} + +func (j JsonHelper) Int(n string) int { + i, ok := j.IntSafe(n) + if !ok { + return -1 + } + return i +} + +func (j JsonHelper) IntSafe(n string) (int, bool) { + v := j.Get(n) + return valToInt(v) +} + +func (j JsonHelper) Int64(n string) int64 { + i64, ok := j.Int64Safe(n) + if !ok { + return -1 + } + return i64 +} + +func (j JsonHelper) Int64Safe(n string) (int64, bool) { + v := j.Get(n) + return valToInt64(v) +} + +func (j JsonHelper) Float64(n string) float64 { + v := j.Get(n) + f64, err := CoerceFloat(v) + if err != nil { + return math.NaN() + } + return f64 +} + +func (j JsonHelper) Float64Safe(n string) (float64, bool) { + v := j.Get(n) + if v == nil { + return math.NaN(), true + } + fv, err := CoerceFloat(v) + if err != nil { + return math.NaN(), false + } + return fv, true +} + +func (j JsonHelper) Uint64(n string) uint64 { + v := j.Get(n) + if v != nil { + return CoerceUintShort(v) + } + return 0 +} + +func (j JsonHelper) Uint64Safe(n string) (uint64, bool) { + v := j.Get(n) + if v != nil { + if uv, err := CoerceUint(v); err == nil { + return uv, true + } + } + return 0, false +} + +func (j JsonHelper) BoolSafe(n string) (val bool, ok bool) { + v := j.Get(n) + if v != nil { + switch v.(type) { + case bool: + return v.(bool), true + case string: + if s := v.(string); len(s) > 0 { + if b, err := strconv.ParseBool(s); err == nil { + return b, true + } + } + } + } + return false, false +} + +func (j JsonHelper) Bool(n string) bool { + val, ok := j.BoolSafe(n) + if !ok { + return false + } + + return val +} + +func (j JsonHelper) Map(n string) map[string]interface{} { + v := j.Get(n) + if v == nil { + return nil + } + m, ok := v.(map[string]interface{}) + if !ok { + return nil + } + return m +} + +func (j JsonHelper) MapSafe(n string) (map[string]interface{}, bool) { + v := j.Get(n) + if v == nil { + return nil, false + } + m, ok := v.(map[string]interface{}) + if !ok { + return nil, false + } + return m, true +} + +func (j JsonHelper) PrettyJson() []byte { + jsonPretty, _ := json.MarshalIndent(j, " ", " ") + return jsonPretty +} +func (j JsonHelper) Keys() []string { + keys := make([]string, 0) + for key := range j { + keys = append(keys, key) + } + return keys +} +func (j JsonHelper) HasKey(name string) bool { + if val := j.Get(name); val != nil { + return true + } + return false +} + +// GobDecode overwrites the receiver, which must be a pointer, +// with the value represented by the byte slice, which was written +// by GobEncode, usually for the same concrete type. +// GobDecode([]byte) error +func (j *JsonHelper) GobDecode(data []byte) error { + var mv map[string]interface{} + if err := json.Unmarshal(data, &mv); err != nil { + return err + } + *j = JsonHelper(mv) + return nil +} +func (j *JsonHelper) GobEncode() ([]byte, error) { + by, err := json.Marshal(j) + return by, err +} + +// The following consts are from http://code.google.com/p/go-bit/ (Apache licensed). It +// lets us figure out how wide go ints are, and determine their max and min values. + +// Note the use of << to create an untyped constant. +const bitsPerWord = 32 << uint(^uint(0)>>63) + +// Implementation-specific size of int and uint in bits. +const BitsPerWord = bitsPerWord // either 32 or 64 + +// Implementation-specific integer limit values. +const ( + MaxInt = 1<<(BitsPerWord-1) - 1 // either 1<<31 - 1 or 1<<63 - 1 + MinInt = -MaxInt - 1 // either -1 << 31 or -1 << 63 + MaxUint = 1< 0 { + // uv[k] = sva + // } + case map[string]bool: + // what to do? + Info("not implemented: [string]bool") + case map[string]interface{}: + if len(x) > 0 { + if err := flattenJsonMap(uv, x, k+"."); err != nil { + return err + } + } + case string: + uv.Set(k, x) + case bool: + if x == true { + uv.Set(k, "t") + } else { + uv.Set(k, "f") + } + case int: + uv.Set(k, strconv.FormatInt(int64(x), 10)) + case int8: + uv.Set(k, strconv.FormatInt(int64(x), 10)) + case int16: + uv.Set(k, strconv.FormatInt(int64(x), 10)) + case int32: + uv.Set(k, strconv.FormatInt(int64(x), 10)) + case int64: + uv.Set(k, strconv.FormatInt(x, 10)) + case uint: + uv.Set(k, strconv.FormatUint(uint64(x), 10)) + case uint8: + uv.Set(k, strconv.FormatUint(uint64(x), 10)) + case uint16: + uv.Set(k, strconv.FormatUint(uint64(x), 10)) + case uint32: + uv.Set(k, strconv.FormatUint(uint64(x), 10)) + case uint64: + uv.Set(k, strconv.FormatUint(x, 10)) + case float32: + uv.Set(k, strconv.FormatFloat(float64(x), 'f', -1, 64)) + case float64: + uv.Set(k, strconv.FormatFloat(x, 'f', -1, 64)) + default: + // what types don't we support? + // []interface{} + } + return nil +} diff --git a/services/templeton/vendor/src/github.com/araddon/gou/jsonhelper_test.go b/services/templeton/vendor/src/github.com/araddon/gou/jsonhelper_test.go new file mode 100644 index 000000000..1d1e05b9d --- /dev/null +++ b/services/templeton/vendor/src/github.com/araddon/gou/jsonhelper_test.go @@ -0,0 +1,221 @@ +package gou + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "math" + "strings" + "testing" + + . "github.com/araddon/gou/goutest" + "github.com/bmizerany/assert" +) + +// go test -bench=".*" +// go test -run="(Util)" + +var ( + jh JsonHelper +) + +func init() { + SetupLogging("debug") + SetColorOutput() + //SetLogger(log.New(os.Stderr, "", log.Ltime|log.Lshortfile), "debug") + // create test data + json.Unmarshal([]byte(`{ + "name":"aaron", + "nullstring":null, + "ints":[1,2,3,4], + "int":1, + "intstr":"1", + "int64":1234567890, + "float64":123.456, + "float64str":"123.456", + "float64null": null, + "MaxSize" : 1048576, + "strings":["string1"], + "stringscsv":"string1,string2", + "nested":{ + "nest":"string2", + "strings":["string1"], + "int":2, + "list":["value"], + "nest2":{ + "test":"good" + } + }, + "nested2":[ + {"sub":2} + ], + "period.name":"value" + }`), &jh) +} + +func TestJsonRawWriter(t *testing.T) { + var buf bytes.Buffer + buf.WriteString(`"hello"`) + raw := json.RawMessage(buf.Bytes()) + bya, _ := json.Marshal(&buf) + Debug(string(bya)) + bya, _ = json.Marshal(&raw) + Debug(string(bya)) + + /* + bya, err := json.Marshal(buf) + Assert(string(bya) == `"hello"`, t, "Should be hello but was %s", string(bya)) + Debug(string(buf.Bytes()), err) + var jrw JsonRawWriter + jrw.WriteString(`"hello"`) + Debug(jrw.Raw()) + bya, err = json.Marshal(jrw.Raw()) + Assert(string(bya) == `"hello"`, t, "Should be hello but was %s", string(bya)) + Debug(string(jrw.Bytes()), err) + */ +} + +func TestJsonHelper(t *testing.T) { + + Assert(jh.String("name") == "aaron", t, "should get 'aaron' %s", jh.String("name")) + Assert(jh.String("nullstring") == "", t, "should get '' %s", jh.String("nullstring")) + + Assert(jh.Int("int") == 1, t, "get int ") + Assert(jh.Int("ints[0]") == 1, t, "get int from array %d", jh.Int("ints[0]")) + Assert(jh.Int("ints[2]") == 3, t, "get int from array %d", jh.Int("ints[0]")) + Assert(len(jh.Ints("ints")) == 4, t, "get int array %v", jh.Ints("ints")) + Assert(jh.Int64("int64") == 1234567890, t, "get int") + Assert(jh.Int("nested.int") == 2, t, "get int") + Assert(jh.String("nested.nest") == "string2", t, "should get string %s", jh.String("nested.nest")) + Assert(jh.String("nested.nest2.test") == "good", t, "should get string %s", jh.String("nested.nest2.test")) + Assert(jh.String("nested.list[0]") == "value", t, "get string from array") + Assert(jh.Int("nested2[0].sub") == 2, t, "get int from obj in array %d", jh.Int("nested2[0].sub")) + + Assert(jh.Int("MaxSize") == 1048576, t, "get int, test capitalization? ") + sl := jh.Strings("strings") + Assert(len(sl) == 1 && sl[0] == "string1", t, "get strings ") + sl = jh.Strings("stringscsv") + Assert(len(sl) == 2 && sl[0] == "string1", t, "get strings ") + + i64, ok := jh.Int64Safe("int64") + Assert(ok, t, "int64safe ok") + Assert(i64 == 1234567890, t, "int64safe value") + + u64, ok := jh.Uint64Safe("int64") + Assert(ok, t, "uint64safe ok") + Assert(u64 == 1234567890, t, "int64safe value") + _, ok = jh.Uint64Safe("notexistent") + assert.Tf(t, !ok, "should not be ok") + _, ok = jh.Uint64Safe("name") + assert.Tf(t, !ok, "should not be ok") + + i, ok := jh.IntSafe("int") + Assert(ok, t, "intsafe ok") + Assert(i == 1, t, "intsafe value") + + l := jh.List("nested2") + Assert(len(l) == 1, t, "get list") + + fv, ok := jh.Float64Safe("name") + assert.Tf(t, !ok, "floatsafe not ok") + fv, ok = jh.Float64Safe("float64") + assert.Tf(t, ok, "floatsafe ok") + assert.Tf(t, CloseEnuf(fv, 123.456), "floatsafe value %v", fv) + fv = jh.Float64("float64") + assert.Tf(t, CloseEnuf(fv, 123.456), "floatsafe value %v", fv) + fv, ok = jh.Float64Safe("float64str") + assert.Tf(t, ok, "floatsafe ok") + assert.Tf(t, CloseEnuf(fv, 123.456), "floatsafe value %v", fv) + fv = jh.Float64("float64str") + assert.Tf(t, CloseEnuf(fv, 123.456), "floatsafe value %v", fv) + fv, ok = jh.Float64Safe("float64null") + assert.Tf(t, ok, "float64null ok") + assert.Tf(t, math.IsNaN(fv), "float64null expected Nan but got %v", fv) + fv = jh.Float64("float64null") + assert.Tf(t, math.IsNaN(fv), "float64null expected Nan but got %v", fv) + + jhm := jh.Helpers("nested2") + Assert(len(jhm) == 1, t, "get list of helpers") + Assert(jhm[0].Int("sub") == 2, t, "Should get list of helpers") +} + +func TestJsonInterface(t *testing.T) { + + var jim map[string]JsonInterface + err := json.Unmarshal([]byte(`{ + "nullstring":null, + "string":"string", + "int":22, + "float":22.2, + "floatstr":"22.2", + "intstr":"22" + }`), &jim) + Assert(err == nil, t, "no error:%v ", err) + Assert(jim["nullstring"].StringSh() == "", t, "nullstring: %v", jim["nullstring"]) + Assert(jim["string"].StringSh() == "string", t, "nullstring: %v", jim["string"]) + Assert(jim["int"].IntSh() == 22, t, "int: %v", jim["int"]) + Assert(jim["int"].StringSh() == "22", t, "int->string: %v", jim["int"]) + Assert(jim["int"].FloatSh() == float32(22), t, "int->float: %v", jim["int"]) + Assert(jim["float"].FloatSh() == 22.2, t, "float: %v", jim["float"]) + Assert(jim["float"].StringSh() == "22.2", t, "float->string: %v", jim["float"]) + Assert(jim["float"].IntSh() == 22, t, "float->int: %v", jim["float"]) + Assert(jim["intstr"].IntSh() == 22, t, "intstr: %v", jim["intstr"]) + Assert(jim["intstr"].FloatSh() == float32(22), t, "intstr->float: %v", jim["intstr"]) +} + +func TestJsonCoercion(t *testing.T) { + assert.Tf(t, jh.Int("intstr") == 1, "get string as int %s", jh.String("intstr")) + assert.Tf(t, jh.String("int") == "1", "get int as string %s", jh.String("int")) + assert.Tf(t, jh.Int("notint") == -1, "get non existent int = 0??? ") +} + +func TestJsonPathNotation(t *testing.T) { + // Now lets test xpath type syntax + assert.Tf(t, jh.Int("/MaxSize") == 1048576, "get int, test capitalization? ") + assert.Tf(t, jh.String("/nested/nest") == "string2", "should get string %s", jh.String("/nested/nest")) + assert.Tf(t, jh.String("/nested/list[0]") == "value", "get string from array") + // note this one has period in name + assert.Tf(t, jh.String("/period.name") == "value", "test period in name ") +} + +func TestFromReader(t *testing.T) { + raw := `{"testing": 123}` + reader := strings.NewReader(raw) + jh, err := NewJsonHelperReader(reader) + assert.Tf(t, err == nil, "Unexpected error decoding json: %s", err) + assert.Tf(t, jh.Int("testing") == 123, "Unexpected value in json: %d", jh.Int("testing")) +} + +func TestJsonHelperGobEncoding(t *testing.T) { + raw := `{"testing": 123,"name":"bob & more"}` + reader := strings.NewReader(raw) + jh, err := NewJsonHelperReader(reader) + assert.Tf(t, err == nil, "Unexpected error decoding gob: %s", err) + assert.Tf(t, jh.Int("testing") == 123, "Unexpected value in gob: %d", jh.Int("testing")) + var buf bytes.Buffer + err = gob.NewEncoder(&buf).Encode(&jh) + assert.T(t, err == nil, err) + + var jhNew JsonHelper + err = gob.NewDecoder(&buf).Decode(&jhNew) + assert.T(t, err == nil, err) + assert.Tf(t, jhNew.Int("testing") == 123, "Unexpected value in gob: %d", jhNew.Int("testing")) + assert.Tf(t, jhNew.String("name") == "bob & more", "Unexpected value in gob: %d", jhNew.String("name")) + + buf2 := bytes.Buffer{} + gt := GobTest{"Hello", jh} + err = gob.NewEncoder(&buf2).Encode(>) + assert.T(t, err == nil, err) + + var gt2 GobTest + err = gob.NewDecoder(&buf2).Decode(>2) + assert.T(t, err == nil, err) + assert.Tf(t, gt2.Name == "Hello", "Unexpected value in gob: %d", gt2.Name) + assert.Tf(t, gt2.Data.Int("testing") == 123, "Unexpected value in gob: %d", gt2.Data.Int("testing")) + assert.Tf(t, gt2.Data.String("name") == "bob & more", "Unexpected value in gob: %d", gt2.Data.String("name")) +} + +type GobTest struct { + Name string + Data JsonHelper +} diff --git a/services/templeton/vendor/src/github.com/araddon/gou/log.go b/services/templeton/vendor/src/github.com/araddon/gou/log.go new file mode 100644 index 000000000..2a01805f7 --- /dev/null +++ b/services/templeton/vendor/src/github.com/araddon/gou/log.go @@ -0,0 +1,412 @@ +package gou + +import ( + "fmt" + "log" + "os" + "runtime" + "strings" + "sync" + "time" +) + +const ( + NOLOGGING = -1 + FATAL = 0 + ERROR = 1 + WARN = 2 + INFO = 3 + DEBUG = 4 +) + +/* +https://github.com/mewkiz/pkg/tree/master/term +RED = '\033[0;1;31m' +GREEN = '\033[0;1;32m' +YELLOW = '\033[0;1;33m' +BLUE = '\033[0;1;34m' +MAGENTA = '\033[0;1;35m' +CYAN = '\033[0;1;36m' +WHITE = '\033[0;1;37m' +DARK_MAGENTA = '\033[0;35m' +ANSI_RESET = '\033[0m' +LogColor = map[int]string{FATAL: "\033[0m\033[37m", + ERROR: "\033[0m\033[31m", + WARN: "\033[0m\033[33m", + INFO: "\033[0m\033[32m", + DEBUG: "\033[0m\033[34m"} + +\e]PFdedede +*/ + +var ( + LogLevel int = ERROR + EMPTY struct{} + ErrLogLevel int = ERROR + logger *log.Logger + loggerErr *log.Logger + LogColor = map[int]string{FATAL: "\033[0m\033[37m", + ERROR: "\033[0m\033[31m", + WARN: "\033[0m\033[33m", + INFO: "\033[0m\033[35m", + DEBUG: "\033[0m\033[34m"} + LogPrefix = map[int]string{ + FATAL: "[FATAL] ", + ERROR: "[ERROR] ", + WARN: "[WARN] ", + INFO: "[INFO] ", + DEBUG: "[DEBUG] ", + } + escapeNewlines bool = false + postFix = "" //\033[0m + LogLevelWords map[string]int = map[string]int{"fatal": 0, "error": 1, "warn": 2, "info": 3, "debug": 4, "none": -1} + logThrottles = make(map[string]*Throttler) + throttleMu sync.Mutex +) + +// Setup default logging to Stderr, equivalent to: +// +// gou.SetLogger(log.New(os.Stderr, "", log.Ltime|log.Lshortfile), "debug") +func SetupLogging(lvl string) { + SetLogger(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile|log.Lmicroseconds), strings.ToLower(lvl)) +} + +// Setup default logging to Stderr, equivalent to: +// +// gou.SetLogger(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile|log.Lmicroseconds), level) +func SetupLoggingLong(lvl string) { + SetLogger(log.New(os.Stderr, "", log.LstdFlags|log.Llongfile|log.Lmicroseconds), strings.ToLower(lvl)) +} + +// Setup colorized output if this is a terminal +func SetColorIfTerminal() { + if IsTerminal() { + SetColorOutput() + } +} + +// Setup colorized output +func SetColorOutput() { + for lvl, color := range LogColor { + LogPrefix[lvl] = color + } + postFix = "\033[0m" +} + +//Set whether to escape newline characters in log messages +func SetEscapeNewlines(en bool) { + escapeNewlines = en +} + +// Setup default log output to go to a dev/null +// +// log.SetOutput(new(DevNull)) +func DiscardStandardLogger() { + log.SetOutput(new(DevNull)) +} + +// you can set a logger, and log level,most common usage is: +// +// gou.SetLogger(log.New(os.Stdout, "", log.LstdFlags), "debug") +// +// loglevls: debug, info, warn, error, fatal +// Note, that you can also set a seperate Error Log Level +func SetLogger(l *log.Logger, logLevel string) { + logger = l + LogLevelSet(logLevel) +} +func GetLogger() *log.Logger { + return logger +} + +// you can set a logger, and log level. this is for errors, and assumes +// you are logging to Stderr (seperate from stdout above), allowing you to seperate +// debug&info logging from errors +// +// gou.SetLogger(log.New(os.Stderr, "", log.LstdFlags), "debug") +// +// loglevls: debug, info, warn, error, fatal +func SetErrLogger(l *log.Logger, logLevel string) { + loggerErr = l + if lvl, ok := LogLevelWords[logLevel]; ok { + ErrLogLevel = lvl + } +} +func GetErrLogger() *log.Logger { + return logger +} + +// sets the log level from a string +func LogLevelSet(levelWord string) { + if lvl, ok := LogLevelWords[levelWord]; ok { + LogLevel = lvl + } +} + +// Log at debug level +func Debug(v ...interface{}) { + if LogLevel >= 4 { + DoLog(3, DEBUG, fmt.Sprint(v...)) + } +} + +// Debug log formatted +func Debugf(format string, v ...interface{}) { + if LogLevel >= 4 { + DoLog(3, DEBUG, fmt.Sprintf(format, v...)) + } +} + +// Log at info level +func Info(v ...interface{}) { + if LogLevel >= 3 { + DoLog(3, INFO, fmt.Sprint(v...)) + } +} + +// info log formatted +func Infof(format string, v ...interface{}) { + if LogLevel >= 3 { + DoLog(3, INFO, fmt.Sprintf(format, v...)) + } +} + +// Log at warn level +func Warn(v ...interface{}) { + if LogLevel >= 2 { + DoLog(3, WARN, fmt.Sprint(v...)) + } +} + +// Debug log formatted +func Warnf(format string, v ...interface{}) { + if LogLevel >= 2 { + DoLog(3, WARN, fmt.Sprintf(format, v...)) + } +} + +// Log at error level +func Error(v ...interface{}) { + if LogLevel >= 1 { + DoLog(3, ERROR, fmt.Sprint(v...)) + } +} + +// Error log formatted +func Errorf(format string, v ...interface{}) { + if LogLevel >= 1 { + DoLog(3, ERROR, fmt.Sprintf(format, v...)) + } +} + +// Log this error, and return error object +func LogErrorf(format string, v ...interface{}) error { + err := fmt.Errorf(format, v...) + if LogLevel >= 1 { + DoLog(3, ERROR, err.Error()) + } + return err +} + +// Log to logger if setup +// Log(ERROR, "message") +func Log(logLvl int, v ...interface{}) { + if LogLevel >= logLvl { + DoLog(3, logLvl, fmt.Sprint(v...)) + } +} + +// Log to logger if setup, grab a stack trace and add that as well +// +// u.LogTracef(u.ERROR, "message %s", varx) +// +func LogTracef(logLvl int, format string, v ...interface{}) { + if LogLevel >= logLvl { + // grab a stack trace + stackBuf := make([]byte, 6000) + stackBufLen := runtime.Stack(stackBuf, false) + stackTraceStr := string(stackBuf[0:stackBufLen]) + parts := strings.Split(stackTraceStr, "\n") + if len(parts) > 1 { + v = append(v, strings.Join(parts[3:], "\n")) + } + DoLog(3, logLvl, fmt.Sprintf(format+"\n%v", v...)) + } +} + +// Log to logger if setup, grab a stack trace and add that as well +// +// u.LogTracef(u.ERROR, "message %s", varx) +// +func LogTraceDf(logLvl, lineCt int, format string, v ...interface{}) { + if LogLevel >= logLvl { + // grab a stack trace + stackBuf := make([]byte, 6000) + stackBufLen := runtime.Stack(stackBuf, false) + stackTraceStr := string(stackBuf[0:stackBufLen]) + parts := strings.Split(stackTraceStr, "\n") + if len(parts) > 1 { + if (len(parts) - 3) > lineCt { + parts = parts[3 : 3+lineCt] + parts2 := make([]string, 0, len(parts)/2) + for i := 1; i < len(parts); i = i + 2 { + parts2 = append(parts2, parts[i]) + } + v = append(v, strings.Join(parts2, "\n")) + //v = append(v, strings.Join(parts[3:3+lineCt], "\n")) + } else { + v = append(v, strings.Join(parts[3:], "\n")) + } + } + DoLog(3, logLvl, fmt.Sprintf(format+"\n%v", v...)) + } +} + +// Throttle logging based on key, such that key would never occur more than +// @limit times per hour +// +// LogThrottleKey(u.ERROR, 1,"error_that_happens_a_lot" "message %s", varx) +// +func LogThrottleKey(logLvl, limit int, key, format string, v ...interface{}) { + if LogLevel >= logLvl { + throttleMu.Lock() + th, ok := logThrottles[key] + if !ok { + th = NewThrottler(limit, 3600*time.Second) + logThrottles[key] = th + } + if th.Throttle() { + throttleMu.Unlock() + return + } + throttleMu.Unlock() + DoLog(3, logLvl, fmt.Sprintf(format, v...)) + } +} + +// Throttle logging based on @format as a key, such that key would never occur more than +// @limit times per hour +// +// LogThrottle(u.ERROR, 1, "message %s", varx) +// +func LogThrottle(logLvl, limit int, format string, v ...interface{}) { + if LogLevel >= logLvl { + throttleMu.Lock() + th, ok := logThrottles[format] + if !ok { + th = NewThrottler(limit, 3600*time.Second) + logThrottles[format] = th + } + if th.Throttle() { + throttleMu.Unlock() + return + } + throttleMu.Unlock() + DoLog(3, logLvl, fmt.Sprintf(format, v...)) + } +} + +// Throttle logging based on @format as a key, such that key would never occur more than +// @limit times per hour +// +// LogThrottleD(5, u.ERROR, 1, "message %s", varx) +// +func LogThrottleD(depth, logLvl, limit int, format string, v ...interface{}) { + if LogLevel >= logLvl { + throttleMu.Lock() + th, ok := logThrottles[format] + if !ok { + th = NewThrottler(limit, 3600*time.Second) + logThrottles[format] = th + } + if th.Throttle() { + throttleMu.Unlock() + return + } + throttleMu.Unlock() + DoLog(depth, logLvl, fmt.Sprintf(format, v...)) + } +} + +// Log to logger if setup +// Logf(ERROR, "message %d", 20) +func Logf(logLvl int, format string, v ...interface{}) { + if LogLevel >= logLvl { + DoLog(3, logLvl, fmt.Sprintf(format, v...)) + } +} + +// Log to logger if setup +// LogP(ERROR, "prefix", "message", anyItems, youWant) +func LogP(logLvl int, prefix string, v ...interface{}) { + if ErrLogLevel >= logLvl && loggerErr != nil { + loggerErr.Output(3, prefix+LogPrefix[logLvl]+fmt.Sprint(v...)+postFix) + } else if LogLevel >= logLvl && logger != nil { + logger.Output(3, prefix+LogPrefix[logLvl]+fmt.Sprint(v...)+postFix) + } +} + +// Log to logger if setup with a prefix +// LogPf(ERROR, "prefix", "formatString %s %v", anyItems, youWant) +func LogPf(logLvl int, prefix string, format string, v ...interface{}) { + if ErrLogLevel >= logLvl && loggerErr != nil { + loggerErr.Output(3, prefix+LogPrefix[logLvl]+fmt.Sprintf(format, v...)+postFix) + } else if LogLevel >= logLvl && logger != nil { + logger.Output(3, prefix+LogPrefix[logLvl]+fmt.Sprintf(format, v...)+postFix) + } +} + +// When you want to use the log short filename flag, and want to use +// the lower level logging functions (say from an *Assert* type function) +// you need to modify the stack depth: +// +// func init() {} +// SetLogger(log.New(os.Stderr, "", log.Ltime|log.Lshortfile|log.Lmicroseconds), lvl) +// } +// +// func assert(t *testing.T, myData) { +// // we want log line to show line that called this assert, not this line +// LogD(5, DEBUG, v...) +// } +func LogD(depth int, logLvl int, v ...interface{}) { + if LogLevel >= logLvl { + DoLog(depth, logLvl, fmt.Sprint(v...)) + } +} + +// Low level log with depth , level, message and logger +func DoLog(depth, logLvl int, msg string) { + if escapeNewlines { + msg = EscapeNewlines(msg) + } + if ErrLogLevel >= logLvl && loggerErr != nil { + loggerErr.Output(depth, LogPrefix[logLvl]+msg+postFix) + } else if LogLevel >= logLvl && logger != nil { + logger.Output(depth, LogPrefix[logLvl]+msg+postFix) + } +} + +type winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + _TIOCGWINSZ = 0x5413 // OSX 1074295912 +) + +//http://play.golang.org/p/5LIA41Iqfp +// Dummy discard, satisfies io.Writer without importing io or os. +type DevNull struct{} + +func (DevNull) Write(p []byte) (int, error) { + return len(p), nil +} + +//Replace standard newline characters with escaped newlines so long msgs will +//remain one line. +func EscapeNewlines(str string) string { + return strings.Replace(str, "\n", "\\n", -1) +} diff --git a/services/templeton/vendor/src/github.com/araddon/gou/log_unix.go b/services/templeton/vendor/src/github.com/araddon/gou/log_unix.go new file mode 100644 index 000000000..6d4851cf5 --- /dev/null +++ b/services/templeton/vendor/src/github.com/araddon/gou/log_unix.go @@ -0,0 +1,29 @@ +// +build !windows + +package gou + +import ( + "syscall" + "unsafe" +) + +// Determine is this process is running in a Terminal or not? +func IsTerminal() bool { + ws := &winsize{} + isTerm := true + defer func() { + if r := recover(); r != nil { + isTerm = false + } + }() + // This blows up on windows + retCode, _, _ := syscall.Syscall(syscall.SYS_IOCTL, + uintptr(syscall.Stdin), + uintptr(_TIOCGWINSZ), + uintptr(unsafe.Pointer(ws))) + + if int(retCode) == -1 { + return false + } + return isTerm +} diff --git a/services/templeton/vendor/src/github.com/araddon/gou/log_windows.go b/services/templeton/vendor/src/github.com/araddon/gou/log_windows.go new file mode 100644 index 000000000..12713de91 --- /dev/null +++ b/services/templeton/vendor/src/github.com/araddon/gou/log_windows.go @@ -0,0 +1,8 @@ +// +build windows + +package gou + +// Determine is this process is running in a Terminal or not? +func IsTerminal() bool { + return false // TODO Needs correct implementation on Windows +} diff --git a/services/templeton/vendor/src/github.com/araddon/gou/testutil.go b/services/templeton/vendor/src/github.com/araddon/gou/testutil.go new file mode 100644 index 000000000..fe92c6f1f --- /dev/null +++ b/services/templeton/vendor/src/github.com/araddon/gou/testutil.go @@ -0,0 +1,75 @@ +package gou + +import ( + "os" + "time" +) + +var ( + //finished chan bool + lastTest time.Time = time.Now() + stopper func() = func() {} +) + +// Wait for condition (defined by func) to be true +// this is mostly for testing, but a utility to +// create a ticker checking back every 100 ms to see +// if something (the supplied check func) is done +// +// WaitFor(func() bool { +// return ctr.Ct == 0 +// },10) +// timeout (in seconds) is the last arg +func WaitFor(check func() bool, timeoutSecs int) { + timer := time.NewTicker(100 * time.Millisecond) + tryct := 0 + for range timer.C { + if check() { + timer.Stop() + break + } + if tryct >= timeoutSecs*10 { + timer.Stop() + break + } + tryct++ + } +} + +// Use this in combo with StopCheck() for test functions that must start +// processes such as +func SetStopper(f func()) { + stopper = f +} + +// take two floats, compare, need to be within 2% +func CloseEnuf(a, b float64) bool { + c := a / b + if c > .98 && c < 1.02 { + return true + } + return false +} + +// take two ints, compare, need to be within 5% +func CloseInt(a, b int) bool { + c := float64(a) / float64(b) + if c >= .95 && c <= 1.05 { + return true + } + return false +} + +func StartTest() { + lastTest = time.Now() +} + +func StopCheck() { + t := time.Now() + if lastTest.Add(time.Millisecond*1000).UnixNano() < t.UnixNano() { + Log(INFO, "Stopping Test ", lastTest.Unix()) + //finished <- true + stopper() + os.Exit(0) + } +} diff --git a/services/templeton/vendor/src/github.com/araddon/gou/throttle.go b/services/templeton/vendor/src/github.com/araddon/gou/throttle.go new file mode 100644 index 000000000..8d1b3c34d --- /dev/null +++ b/services/templeton/vendor/src/github.com/araddon/gou/throttle.go @@ -0,0 +1,56 @@ +package gou + +import ( + "time" +) + +type Throttler struct { + + // Limit to this events/per + maxPer float64 + per float64 + + // Last Event + last time.Time + + // How many events are allowed left to happen? + // Starts at limit, decrements down + allowance float64 +} + +// new Throttler that will tell you to limit or not based +// on given @max events @per duration +func NewThrottler(max int, per time.Duration) *Throttler { + return &Throttler{ + maxPer: float64(max), + allowance: float64(max), + last: time.Now(), + per: per.Seconds(), + } +} + +// Should we limit this because we are above rate? +func (r *Throttler) Throttle() bool { + + if r.maxPer == 0 { + return false + } + + // http://stackoverflow.com/questions/667508/whats-a-good-rate-limiting-algorithm + now := time.Now() + elapsed := float64(now.Sub(r.last).Nanoseconds()) / 1e9 // seconds + r.last = now + r.allowance += elapsed * (r.maxPer / r.per) + + //Infof("maxRate: %v cur: %v elapsed:%-6.6f incr: %v", r.maxPer, int(r.allowance), elapsed, elapsed*float64(r.maxPer)) + if r.allowance > r.maxPer { + r.allowance = r.maxPer + } + + if r.allowance <= 1.0 { + return true // do throttle/limit + } + + r.allowance -= 1.0 + return false // dont throttle +} diff --git a/services/templeton/vendor/src/github.com/araddon/gou/throttle_test.go b/services/templeton/vendor/src/github.com/araddon/gou/throttle_test.go new file mode 100644 index 000000000..7c654d47b --- /dev/null +++ b/services/templeton/vendor/src/github.com/araddon/gou/throttle_test.go @@ -0,0 +1,30 @@ +package gou + +import ( + "testing" + "time" + + "github.com/bmizerany/assert" +) + +func TestThrottleer(t *testing.T) { + th := NewThrottler(10, 10*time.Second) + for i := 0; i < 10; i++ { + assert.Tf(t, th.Throttle() == false, "Should not throttle %v", i) + time.Sleep(time.Millisecond * 10) + } + throttled := 0 + th = NewThrottler(10, 1*time.Second) + // We are going to loop 20 times, first 10 should make it, next 10 throttled + for i := 0; i < 20; i++ { + LogThrottleKey(WARN, 10, "throttle", "hello %v", i) + if th.Throttle() { + throttled += 1 + } + } + assert.Tf(t, throttled == 10, "Should throttle 10 of 20 requests: %v", throttled) + // Now sleep for 1 second so that we should + // no longer be throttled + time.Sleep(time.Second * 1) + assert.Tf(t, th.Throttle() == false, "We should not have been throttled") +} diff --git a/services/templeton/vendor/src/github.com/araddon/gou/uid.go b/services/templeton/vendor/src/github.com/araddon/gou/uid.go new file mode 100644 index 000000000..6afa66ad5 --- /dev/null +++ b/services/templeton/vendor/src/github.com/araddon/gou/uid.go @@ -0,0 +1,94 @@ +package gou + +import ( + "crypto/md5" + "crypto/rand" + "encoding/binary" + "fmt" + "io" + "os" + "strconv" + "sync/atomic" + "time" +) + +const ( + //2013-2-3 + ourEpoch = uint32(1359931242) +) + +func init() { + initHostPidId() +} + +/* +Special thanks to ideas from Mgo, and Noeqd, this is somewhat inbetween them +https://github.com/bmizerany/noeqd + +It is a roughly sortable UID, but uses machine specific info (host, processid) +as part of the uid so each machine *will* have unique id's + +The host+processid is 3 bytes + +*/ + +// uidCounter is an atomically incremented each time we created +// a new uid within given ms time window +var uidCounter uint32 = 0 + +// hostPidId stores the generated hostPid +var hostPidId []byte + +// initHostPidId generates a machine-process specific id by using hostname +// and processid +func initHostPidId() { + var sum [4]byte + hostB := sum[:] + host, err := os.Hostname() + if err != nil { + // if we cannot get hostname, just use a random set of bytes + _, err2 := io.ReadFull(rand.Reader, hostB) + if err2 != nil { + panic(fmt.Errorf("cannot get hostname: %v; %v", err, err2)) + } + } else { + hw := md5.New() + hw.Write([]byte(host)) + copy(hostB, hw.Sum(nil)) + } + pid := os.Getpid() + hostI := binary.BigEndian.Uint32(hostB) + uid := uint32(pid) + uint32(hostI) + binary.BigEndian.PutUint32(hostB, uid) + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, uid) + hostPidId = b[:] +} + +// uid is a 64 bit int uid +type Uid uint64 + +// Create a new uint64 unique id +func NewUid() uint64 { + b := make([]byte, 8) + ts := uint32(time.Now().Unix()) - ourEpoch + + // Timestamp, 4 bytes, big endian + binary.BigEndian.PutUint32(b, ts) + //Debugf("ts=%v b=%v", ts, b) + // first 3 bytes of host/pid + b[4] = hostPidId[2] + b[5] = hostPidId[3] + b[6] = hostPidId[3] + // Increment, 2 bytes, big endian + i := atomic.AddUint32(&uidCounter, 1) + //b[6] = byte(i >> 8) + b[7] = byte(i) + ui := binary.BigEndian.Uint64(b) + //Debugf("ui=%d b=%v ", ui, b) + return ui +} + +func (u *Uid) String() string { + return strconv.FormatUint(uint64(*u), 10) +} diff --git a/services/templeton/vendor/src/github.com/araddon/gou/uid_test.go b/services/templeton/vendor/src/github.com/araddon/gou/uid_test.go new file mode 100644 index 000000000..7896e7c74 --- /dev/null +++ b/services/templeton/vendor/src/github.com/araddon/gou/uid_test.go @@ -0,0 +1,11 @@ +package gou + +import ( + "testing" +) + +func TestUid(t *testing.T) { + u := NewUid() + Debug(u) + Debug(NewUid()) +} diff --git a/services/templeton/vendor/src/github.com/bitly/go-hostpool/LICENSE b/services/templeton/vendor/src/github.com/bitly/go-hostpool/LICENSE new file mode 100644 index 000000000..f24db89c4 --- /dev/null +++ b/services/templeton/vendor/src/github.com/bitly/go-hostpool/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Bitly + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/services/templeton/vendor/src/github.com/bitly/go-hostpool/README.md b/services/templeton/vendor/src/github.com/bitly/go-hostpool/README.md new file mode 100644 index 000000000..7f4437277 --- /dev/null +++ b/services/templeton/vendor/src/github.com/bitly/go-hostpool/README.md @@ -0,0 +1,17 @@ +go-hostpool +=========== + +A Go package to intelligently and flexibly pool among multiple hosts from your Go application. +Host selection can operate in round robin or epsilon greedy mode, and unresponsive hosts are +avoided. +Usage example: + +```go +hp := hostpool.NewEpsilonGreedy([]string{"a", "b"}, 0, &hostpool.LinearEpsilonValueCalculator{}) +hostResponse := hp.Get() +hostname := hostResponse.Host() +err := _ // (make a request with hostname) +hostResponse.Mark(err) +``` + +View more detailed documentation on [godoc.org](http://godoc.org/github.com/bitly/go-hostpool) diff --git a/services/templeton/vendor/src/github.com/bitly/go-hostpool/epsilon_greedy.go b/services/templeton/vendor/src/github.com/bitly/go-hostpool/epsilon_greedy.go new file mode 100644 index 000000000..6976ba711 --- /dev/null +++ b/services/templeton/vendor/src/github.com/bitly/go-hostpool/epsilon_greedy.go @@ -0,0 +1,205 @@ +package hostpool + +import ( + "log" + "math/rand" + "time" +) + +type epsilonHostPoolResponse struct { + standardHostPoolResponse + started time.Time + ended time.Time +} + +func (r *epsilonHostPoolResponse) Mark(err error) { + r.Do(func() { + r.ended = time.Now() + doMark(err, r) + }) +} + +type epsilonGreedyHostPool struct { + standardHostPool // TODO - would be nifty if we could embed HostPool and Locker interfaces + epsilon float32 // this is our exploration factor + decayDuration time.Duration + EpsilonValueCalculator // embed the epsilonValueCalculator + timer + quit chan bool +} + +// Construct an Epsilon Greedy HostPool +// +// Epsilon Greedy is an algorithm that allows HostPool not only to track failure state, +// but also to learn about "better" options in terms of speed, and to pick from available hosts +// based on how well they perform. This gives a weighted request rate to better +// performing hosts, while still distributing requests to all hosts (proportionate to their performance). +// The interface is the same as the standard HostPool, but be sure to mark the HostResponse immediately +// after executing the request to the host, as that will stop the implicitly running request timer. +// +// A good overview of Epsilon Greedy is here http://stevehanov.ca/blog/index.php?id=132 +// +// To compute the weighting scores, we perform a weighted average of recent response times, over the course of +// `decayDuration`. decayDuration may be set to 0 to use the default value of 5 minutes +// We then use the supplied EpsilonValueCalculator to calculate a score from that weighted average response time. +func NewEpsilonGreedy(hosts []string, decayDuration time.Duration, calc EpsilonValueCalculator) HostPool { + + if decayDuration <= 0 { + decayDuration = defaultDecayDuration + } + stdHP := New(hosts).(*standardHostPool) + p := &epsilonGreedyHostPool{ + standardHostPool: *stdHP, + epsilon: float32(initialEpsilon), + decayDuration: decayDuration, + EpsilonValueCalculator: calc, + timer: &realTimer{}, + quit: make(chan bool), + } + + // allocate structures + for _, h := range p.hostList { + h.epsilonCounts = make([]int64, epsilonBuckets) + h.epsilonValues = make([]int64, epsilonBuckets) + } + go p.epsilonGreedyDecay() + return p +} + +func (p *epsilonGreedyHostPool) Close() { + // No need to do p.quit <- true as close(p.quit) does the trick. + close(p.quit) +} + +func (p *epsilonGreedyHostPool) SetEpsilon(newEpsilon float32) { + p.Lock() + defer p.Unlock() + p.epsilon = newEpsilon +} + +func (p *epsilonGreedyHostPool) epsilonGreedyDecay() { + durationPerBucket := p.decayDuration / epsilonBuckets + ticker := time.NewTicker(durationPerBucket) + for { + select { + case <-p.quit: + ticker.Stop() + return + case <-ticker.C: + p.performEpsilonGreedyDecay() + } + } +} +func (p *epsilonGreedyHostPool) performEpsilonGreedyDecay() { + p.Lock() + for _, h := range p.hostList { + h.epsilonIndex += 1 + h.epsilonIndex = h.epsilonIndex % epsilonBuckets + h.epsilonCounts[h.epsilonIndex] = 0 + h.epsilonValues[h.epsilonIndex] = 0 + } + p.Unlock() +} + +func (p *epsilonGreedyHostPool) Get() HostPoolResponse { + p.Lock() + defer p.Unlock() + host := p.getEpsilonGreedy() + started := time.Now() + return &epsilonHostPoolResponse{ + standardHostPoolResponse: standardHostPoolResponse{host: host, pool: p}, + started: started, + } +} + +func (p *epsilonGreedyHostPool) getEpsilonGreedy() string { + var hostToUse *hostEntry + + // this is our exploration phase + if rand.Float32() < p.epsilon { + p.epsilon = p.epsilon * epsilonDecay + if p.epsilon < minEpsilon { + p.epsilon = minEpsilon + } + return p.getRoundRobin() + } + + // calculate values for each host in the 0..1 range (but not ormalized) + var possibleHosts []*hostEntry + now := time.Now() + var sumValues float64 + for _, h := range p.hostList { + if h.canTryHost(now) { + v := h.getWeightedAverageResponseTime() + if v > 0 { + ev := p.CalcValueFromAvgResponseTime(v) + h.epsilonValue = ev + sumValues += ev + possibleHosts = append(possibleHosts, h) + } + } + } + + if len(possibleHosts) != 0 { + // now normalize to the 0..1 range to get a percentage + for _, h := range possibleHosts { + h.epsilonPercentage = h.epsilonValue / sumValues + } + + // do a weighted random choice among hosts + ceiling := 0.0 + pickPercentage := rand.Float64() + for _, h := range possibleHosts { + ceiling += h.epsilonPercentage + if pickPercentage <= ceiling { + hostToUse = h + break + } + } + } + + if hostToUse == nil { + if len(possibleHosts) != 0 { + log.Println("Failed to randomly choose a host, Dan loses") + } + return p.getRoundRobin() + } + + if hostToUse.dead { + hostToUse.willRetryHost(p.maxRetryInterval) + } + return hostToUse.host +} + +func (p *epsilonGreedyHostPool) markSuccess(hostR HostPoolResponse) { + // first do the base markSuccess - a little redundant with host lookup but cleaner than repeating logic + p.standardHostPool.markSuccess(hostR) + eHostR, ok := hostR.(*epsilonHostPoolResponse) + if !ok { + log.Printf("Incorrect type in eps markSuccess!") // TODO reflection to print out offending type + return + } + host := eHostR.host + duration := p.between(eHostR.started, eHostR.ended) + + p.Lock() + defer p.Unlock() + h, ok := p.hosts[host] + if !ok { + log.Fatalf("host %s not in HostPool %v", host, p.Hosts()) + } + h.epsilonCounts[h.epsilonIndex]++ + h.epsilonValues[h.epsilonIndex] += int64(duration.Seconds() * 1000) +} + +// --- timer: this just exists for testing + +type timer interface { + between(time.Time, time.Time) time.Duration +} + +type realTimer struct{} + +func (rt *realTimer) between(start time.Time, end time.Time) time.Duration { + return end.Sub(start) +} diff --git a/services/templeton/vendor/src/github.com/bitly/go-hostpool/epsilon_value_calculators.go b/services/templeton/vendor/src/github.com/bitly/go-hostpool/epsilon_value_calculators.go new file mode 100644 index 000000000..9bc3102a9 --- /dev/null +++ b/services/templeton/vendor/src/github.com/bitly/go-hostpool/epsilon_value_calculators.go @@ -0,0 +1,40 @@ +package hostpool + +// --- Value Calculators ----------------- + +import ( + "math" +) + +// --- Definitions ----------------------- + +// Structs implementing this interface are used to convert the average response time for a host +// into a score that can be used to weight hosts in the epsilon greedy hostpool. Lower response +// times should yield higher scores (we want to select the faster hosts more often) The default +// LinearEpsilonValueCalculator just uses the reciprocal of the response time. In practice, any +// decreasing function from the positive reals to the positive reals should work. +type EpsilonValueCalculator interface { + CalcValueFromAvgResponseTime(float64) float64 +} + +type LinearEpsilonValueCalculator struct{} +type LogEpsilonValueCalculator struct{ LinearEpsilonValueCalculator } +type PolynomialEpsilonValueCalculator struct { + LinearEpsilonValueCalculator + Exp float64 // the exponent to which we will raise the value to reweight +} + +// -------- Methods ----------------------- + +func (c *LinearEpsilonValueCalculator) CalcValueFromAvgResponseTime(v float64) float64 { + return 1.0 / v +} + +func (c *LogEpsilonValueCalculator) CalcValueFromAvgResponseTime(v float64) float64 { + // we need to add 1 to v so that this will be defined on all positive floats + return c.LinearEpsilonValueCalculator.CalcValueFromAvgResponseTime(math.Log(v + 1.0)) +} + +func (c *PolynomialEpsilonValueCalculator) CalcValueFromAvgResponseTime(v float64) float64 { + return c.LinearEpsilonValueCalculator.CalcValueFromAvgResponseTime(math.Pow(v, c.Exp)) +} diff --git a/services/templeton/vendor/src/github.com/bitly/go-hostpool/example_test.go b/services/templeton/vendor/src/github.com/bitly/go-hostpool/example_test.go new file mode 100644 index 000000000..88d0e558c --- /dev/null +++ b/services/templeton/vendor/src/github.com/bitly/go-hostpool/example_test.go @@ -0,0 +1,13 @@ +package hostpool + +import ( + "github.com/bitly/go-hostpool" +) + +func ExampleNewEpsilonGreedy() { + hp := hostpool.NewEpsilonGreedy([]string{"a", "b"}, 0, &hostpool.LinearEpsilonValueCalculator{}) + hostResponse := hp.Get() + hostname := hostResponse.Host() + err := nil // (make a request with hostname) + hostResponse.Mark(err) +} diff --git a/services/templeton/vendor/src/github.com/bitly/go-hostpool/host_entry.go b/services/templeton/vendor/src/github.com/bitly/go-hostpool/host_entry.go new file mode 100644 index 000000000..dcec9a0b7 --- /dev/null +++ b/services/templeton/vendor/src/github.com/bitly/go-hostpool/host_entry.go @@ -0,0 +1,62 @@ +package hostpool + +import ( + "time" +) + +// --- hostEntry - this is due to get upgraded + +type hostEntry struct { + host string + nextRetry time.Time + retryCount int16 + retryDelay time.Duration + dead bool + epsilonCounts []int64 + epsilonValues []int64 + epsilonIndex int + epsilonValue float64 + epsilonPercentage float64 +} + +func (h *hostEntry) canTryHost(now time.Time) bool { + if !h.dead { + return true + } + if h.nextRetry.Before(now) { + return true + } + return false +} + +func (h *hostEntry) willRetryHost(maxRetryInterval time.Duration) { + h.retryCount += 1 + newDelay := h.retryDelay * 2 + if newDelay < maxRetryInterval { + h.retryDelay = newDelay + } else { + h.retryDelay = maxRetryInterval + } + h.nextRetry = time.Now().Add(h.retryDelay) +} + +func (h *hostEntry) getWeightedAverageResponseTime() float64 { + var value float64 + var lastValue float64 + + // start at 1 so we start with the oldest entry + for i := 1; i <= epsilonBuckets; i += 1 { + pos := (h.epsilonIndex + i) % epsilonBuckets + bucketCount := h.epsilonCounts[pos] + // Changing the line below to what I think it should be to get the weights right + weight := float64(i) / float64(epsilonBuckets) + if bucketCount > 0 { + currentValue := float64(h.epsilonValues[pos]) / float64(bucketCount) + value += currentValue * weight + lastValue = currentValue + } else { + value += lastValue * weight + } + } + return value +} diff --git a/services/templeton/vendor/src/github.com/bitly/go-hostpool/hostpool.go b/services/templeton/vendor/src/github.com/bitly/go-hostpool/hostpool.go new file mode 100644 index 000000000..d65cb2dee --- /dev/null +++ b/services/templeton/vendor/src/github.com/bitly/go-hostpool/hostpool.go @@ -0,0 +1,201 @@ +// A Go package to intelligently and flexibly pool among multiple hosts from your Go application. +// Host selection can operate in round robin or epsilon greedy mode, and unresponsive hosts are +// avoided. A good overview of Epsilon Greedy is here http://stevehanov.ca/blog/index.php?id=132 +package hostpool + +import ( + "log" + "sync" + "time" +) + +// Returns current version +func Version() string { + return "0.1" +} + +// --- Response interfaces and structs ---- + +// This interface represents the response from HostPool. You can retrieve the +// hostname by calling Host(), and after making a request to the host you should +// call Mark with any error encountered, which will inform the HostPool issuing +// the HostPoolResponse of what happened to the request and allow it to update. +type HostPoolResponse interface { + Host() string + Mark(error) + hostPool() HostPool +} + +type standardHostPoolResponse struct { + host string + sync.Once + pool HostPool +} + +// --- HostPool structs and interfaces ---- + +// This is the main HostPool interface. Structs implementing this interface +// allow you to Get a HostPoolResponse (which includes a hostname to use), +// get the list of all Hosts, and use ResetAll to reset state. +type HostPool interface { + Get() HostPoolResponse + // keep the marks separate so we can override independently + markSuccess(HostPoolResponse) + markFailed(HostPoolResponse) + + ResetAll() + Hosts() []string + + // Close the hostpool and release all resources. + Close() +} + +type standardHostPool struct { + sync.RWMutex + hosts map[string]*hostEntry + hostList []*hostEntry + initialRetryDelay time.Duration + maxRetryInterval time.Duration + nextHostIndex int +} + +// ------ constants ------------------- + +const epsilonBuckets = 120 +const epsilonDecay = 0.90 // decay the exploration rate +const minEpsilon = 0.01 // explore one percent of the time +const initialEpsilon = 0.3 +const defaultDecayDuration = time.Duration(5) * time.Minute + +// Construct a basic HostPool using the hostnames provided +func New(hosts []string) HostPool { + p := &standardHostPool{ + hosts: make(map[string]*hostEntry, len(hosts)), + hostList: make([]*hostEntry, len(hosts)), + initialRetryDelay: time.Duration(30) * time.Second, + maxRetryInterval: time.Duration(900) * time.Second, + } + + for i, h := range hosts { + e := &hostEntry{ + host: h, + retryDelay: p.initialRetryDelay, + } + p.hosts[h] = e + p.hostList[i] = e + } + + return p +} + +func (r *standardHostPoolResponse) Host() string { + return r.host +} + +func (r *standardHostPoolResponse) hostPool() HostPool { + return r.pool +} + +func (r *standardHostPoolResponse) Mark(err error) { + r.Do(func() { + doMark(err, r) + }) +} + +func doMark(err error, r HostPoolResponse) { + if err == nil { + r.hostPool().markSuccess(r) + } else { + r.hostPool().markFailed(r) + } +} + +// return an entry from the HostPool +func (p *standardHostPool) Get() HostPoolResponse { + p.Lock() + defer p.Unlock() + host := p.getRoundRobin() + return &standardHostPoolResponse{host: host, pool: p} +} + +func (p *standardHostPool) getRoundRobin() string { + now := time.Now() + hostCount := len(p.hostList) + for i := range p.hostList { + // iterate via sequenece from where we last iterated + currentIndex := (i + p.nextHostIndex) % hostCount + + h := p.hostList[currentIndex] + if !h.dead { + p.nextHostIndex = currentIndex + 1 + return h.host + } + if h.nextRetry.Before(now) { + h.willRetryHost(p.maxRetryInterval) + p.nextHostIndex = currentIndex + 1 + return h.host + } + } + + // all hosts are down. re-add them + p.doResetAll() + p.nextHostIndex = 0 + return p.hostList[0].host +} + +func (p *standardHostPool) ResetAll() { + p.Lock() + defer p.Unlock() + p.doResetAll() +} + +// this actually performs the logic to reset, +// and should only be called when the lock has +// already been acquired +func (p *standardHostPool) doResetAll() { + for _, h := range p.hosts { + h.dead = false + } +} + +func (p *standardHostPool) Close() { + for _, h := range p.hosts { + h.dead = true + } +} + +func (p *standardHostPool) markSuccess(hostR HostPoolResponse) { + host := hostR.Host() + p.Lock() + defer p.Unlock() + + h, ok := p.hosts[host] + if !ok { + log.Fatalf("host %s not in HostPool %v", host, p.Hosts()) + } + h.dead = false +} + +func (p *standardHostPool) markFailed(hostR HostPoolResponse) { + host := hostR.Host() + p.Lock() + defer p.Unlock() + h, ok := p.hosts[host] + if !ok { + log.Fatalf("host %s not in HostPool %v", host, p.Hosts()) + } + if !h.dead { + h.dead = true + h.retryCount = 0 + h.retryDelay = p.initialRetryDelay + h.nextRetry = time.Now().Add(h.retryDelay) + } + +} +func (p *standardHostPool) Hosts() []string { + hosts := make([]string, 0, len(p.hosts)) + for host := range p.hosts { + hosts = append(hosts, host) + } + return hosts +} diff --git a/services/templeton/vendor/src/github.com/bitly/go-hostpool/hostpool_test.go b/services/templeton/vendor/src/github.com/bitly/go-hostpool/hostpool_test.go new file mode 100644 index 000000000..e974aa74c --- /dev/null +++ b/services/templeton/vendor/src/github.com/bitly/go-hostpool/hostpool_test.go @@ -0,0 +1,145 @@ +package hostpool + +import ( + "errors" + "github.com/bmizerany/assert" + "io/ioutil" + "log" + "math/rand" + "os" + "testing" + "time" +) + +func TestHostPool(t *testing.T) { + log.SetOutput(ioutil.Discard) + defer log.SetOutput(os.Stdout) + + dummyErr := errors.New("Dummy Error") + + p := New([]string{"a", "b", "c"}) + assert.Equal(t, p.Get().Host(), "a") + assert.Equal(t, p.Get().Host(), "b") + assert.Equal(t, p.Get().Host(), "c") + respA := p.Get() + assert.Equal(t, respA.Host(), "a") + + respA.Mark(dummyErr) + respB := p.Get() + respB.Mark(dummyErr) + respC := p.Get() + assert.Equal(t, respC.Host(), "c") + respC.Mark(nil) + // get again, and verify that it's still c + assert.Equal(t, p.Get().Host(), "c") + // now try to mark b as success; should fail because already marked + respB.Mark(nil) + assert.Equal(t, p.Get().Host(), "c") // would be b if it were not dead + // now restore a + respA = &standardHostPoolResponse{host: "a", pool: p} + respA.Mark(nil) + assert.Equal(t, p.Get().Host(), "a") + assert.Equal(t, p.Get().Host(), "c") + + // ensure that we get *something* back when all hosts fail + for _, host := range []string{"a", "b", "c"} { + response := &standardHostPoolResponse{host: host, pool: p} + response.Mark(dummyErr) + } + resp := p.Get() + assert.NotEqual(t, resp, nil) +} + +type mockTimer struct { + t int // the time it will always return +} + +func (t *mockTimer) between(start time.Time, end time.Time) time.Duration { + return time.Duration(t.t) * time.Millisecond +} + +func TestEpsilonGreedy(t *testing.T) { + log.SetOutput(ioutil.Discard) + defer log.SetOutput(os.Stdout) + + rand.Seed(10) + + iterations := 12000 + p := NewEpsilonGreedy([]string{"a", "b"}, 0, &LinearEpsilonValueCalculator{}).(*epsilonGreedyHostPool) + + timings := make(map[string]int64) + timings["a"] = 200 + timings["b"] = 300 + + hitCounts := make(map[string]int) + hitCounts["a"] = 0 + hitCounts["b"] = 0 + + log.Printf("starting first run (a, b)") + + for i := 0; i < iterations; i += 1 { + if i != 0 && i%100 == 0 { + p.performEpsilonGreedyDecay() + } + hostR := p.Get() + host := hostR.Host() + hitCounts[host]++ + timing := timings[host] + p.timer = &mockTimer{t: int(timing)} + hostR.Mark(nil) + } + + for host := range hitCounts { + log.Printf("host %s hit %d times (%0.2f percent)", host, hitCounts[host], (float64(hitCounts[host])/float64(iterations))*100.0) + } + + assert.Equal(t, hitCounts["a"] > hitCounts["b"], true) + + hitCounts["a"] = 0 + hitCounts["b"] = 0 + log.Printf("starting second run (b, a)") + timings["a"] = 500 + timings["b"] = 100 + + for i := 0; i < iterations; i += 1 { + if i != 0 && i%100 == 0 { + p.performEpsilonGreedyDecay() + } + hostR := p.Get() + host := hostR.Host() + hitCounts[host]++ + timing := timings[host] + p.timer = &mockTimer{t: int(timing)} + hostR.Mark(nil) + } + + for host := range hitCounts { + log.Printf("host %s hit %d times (%0.2f percent)", host, hitCounts[host], (float64(hitCounts[host])/float64(iterations))*100.0) + } + + assert.Equal(t, hitCounts["b"] > hitCounts["a"], true) +} + +func BenchmarkEpsilonGreedy(b *testing.B) { + b.StopTimer() + + // Make up some response times + zipfDist := rand.NewZipf(rand.New(rand.NewSource(0)), 1.1, 5, 5000) + timings := make([]uint64, b.N) + for i := 0; i < b.N; i++ { + timings[i] = zipfDist.Uint64() + } + + // Make the hostpool with a few hosts + p := NewEpsilonGreedy([]string{"a", "b"}, 0, &LinearEpsilonValueCalculator{}).(*epsilonGreedyHostPool) + + b.StartTimer() + for i := 0; i < b.N; i++ { + if i != 0 && i%100 == 0 { + p.performEpsilonGreedyDecay() + } + hostR := p.Get() + p.timer = &mockTimer{t: int(timings[i])} + hostR.Mark(nil) + } +} diff --git a/services/templeton/vendor/src/github.com/lib/pq/CONTRIBUTING.md b/services/templeton/vendor/src/github.com/lib/pq/CONTRIBUTING.md new file mode 100644 index 000000000..84c937f15 --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/CONTRIBUTING.md @@ -0,0 +1,29 @@ +## Contributing to pq + +`pq` has a backlog of pull requests, but contributions are still very +much welcome. You can help with patch review, submitting bug reports, +or adding new functionality. There is no formal style guide, but +please conform to the style of existing code and general Go formatting +conventions when submitting patches. + +### Patch review + +Help review existing open pull requests by commenting on the code or +proposed functionality. + +### Bug reports + +We appreciate any bug reports, but especially ones with self-contained +(doesn't depend on code outside of pq), minimal (can't be simplified +further) test cases. It's especially helpful if you can submit a pull +request with just the failing test case (you'll probably want to +pattern it after the tests in +[conn_test.go](https://github.com/lib/pq/blob/master/conn_test.go). + +### New functionality + +There are a number of pending patches for new functionality, so +additional feature patches will take a while to merge. Still, patches +are generally reviewed based on usefulness and complexity in addition +to time-in-queue, so if you have a knockout idea, take a shot. Feel +free to open an issue discussion your proposed patch beforehand. diff --git a/services/templeton/vendor/src/github.com/lib/pq/LICENSE.md b/services/templeton/vendor/src/github.com/lib/pq/LICENSE.md new file mode 100644 index 000000000..5773904a3 --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/LICENSE.md @@ -0,0 +1,8 @@ +Copyright (c) 2011-2013, 'pq' Contributors +Portions Copyright (C) 2011 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/services/templeton/vendor/src/github.com/lib/pq/README.md b/services/templeton/vendor/src/github.com/lib/pq/README.md new file mode 100644 index 000000000..b4e3f45cb --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/README.md @@ -0,0 +1,105 @@ +# pq - A pure Go postgres driver for Go's database/sql package + +[![Build Status](https://travis-ci.org/lib/pq.png?branch=master)](https://travis-ci.org/lib/pq) + +## Install + + go get github.com/lib/pq + +## Docs + +For detailed documentation and basic usage examples, please see the package +documentation at . + +## Tests + +`go test` is used for testing. A running PostgreSQL server is +required, with the ability to log in. The default database to connect +to test with is "pqgotest," but it can be overridden using environment +variables. + +Example: + + PGHOST=/var/run/postgresql go test github.com/lib/pq + +Optionally, a benchmark suite can be run as part of the tests: + + PGHOST=/var/run/postgresql go test -bench . + +## Features + +* SSL +* Handles bad connections for `database/sql` +* Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`) +* Scan binary blobs correctly (i.e. `bytea`) +* Package for `hstore` support +* COPY FROM support +* pq.ParseURL for converting urls to connection strings for sql.Open. +* Many libpq compatible environment variables +* Unix socket support +* Notifications: `LISTEN`/`NOTIFY` +* pgpass support + +## Future / Things you can help with + +* Better COPY FROM / COPY TO (see discussion in #181) + +## Thank you (alphabetical) + +Some of these contributors are from the original library `bmizerany/pq.go` whose +code still exists in here. + +* Andy Balholm (andybalholm) +* Ben Berkert (benburkert) +* Benjamin Heatwole (bheatwole) +* Bill Mill (llimllib) +* Bjørn Madsen (aeons) +* Blake Gentry (bgentry) +* Brad Fitzpatrick (bradfitz) +* Charlie Melbye (cmelbye) +* Chris Bandy (cbandy) +* Chris Gilling (cgilling) +* Chris Walsh (cwds) +* Dan Sosedoff (sosedoff) +* Daniel Farina (fdr) +* Eric Chlebek (echlebek) +* Eric Garrido (minusnine) +* Eric Urban (hydrogen18) +* Everyone at The Go Team +* Evan Shaw (edsrzf) +* Ewan Chou (coocood) +* Fazal Majid (fazalmajid) +* Federico Romero (federomero) +* Fumin (fumin) +* Gary Burd (garyburd) +* Heroku (heroku) +* James Pozdena (jpoz) +* Jason McVetta (jmcvetta) +* Jeremy Jay (pbnjay) +* Joakim Sernbrant (serbaut) +* John Gallagher (jgallagher) +* Jonathan Rudenberg (titanous) +* Joël Stemmer (jstemmer) +* Kamil Kisiel (kisielk) +* Kelly Dunn (kellydunn) +* Keith Rarick (kr) +* Kir Shatrov (kirs) +* Lann Martin (lann) +* Maciek Sakrejda (deafbybeheading) +* Marc Brinkmann (mbr) +* Marko Tiikkaja (johto) +* Matt Newberry (MattNewberry) +* Matt Robenolt (mattrobenolt) +* Martin Olsen (martinolsen) +* Mike Lewis (mikelikespie) +* Nicolas Patry (Narsil) +* Oliver Tonnhofer (olt) +* Patrick Hayes (phayes) +* Paul Hammond (paulhammond) +* Ryan Smith (ryandotsmith) +* Samuel Stauffer (samuel) +* Timothée Peignier (cyberdelia) +* Travis Cline (tmc) +* TruongSinh Tran-Nguyen (truongsinh) +* Yaismel Miranda (ympons) +* notedit (notedit) diff --git a/services/templeton/vendor/src/github.com/lib/pq/bench_test.go b/services/templeton/vendor/src/github.com/lib/pq/bench_test.go new file mode 100644 index 000000000..e71f41d06 --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/bench_test.go @@ -0,0 +1,435 @@ +// +build go1.1 + +package pq + +import ( + "bufio" + "bytes" + "database/sql" + "database/sql/driver" + "io" + "math/rand" + "net" + "runtime" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/lib/pq/oid" +) + +var ( + selectStringQuery = "SELECT '" + strings.Repeat("0123456789", 10) + "'" + selectSeriesQuery = "SELECT generate_series(1, 100)" +) + +func BenchmarkSelectString(b *testing.B) { + var result string + benchQuery(b, selectStringQuery, &result) +} + +func BenchmarkSelectSeries(b *testing.B) { + var result int + benchQuery(b, selectSeriesQuery, &result) +} + +func benchQuery(b *testing.B, query string, result interface{}) { + b.StopTimer() + db := openTestConn(b) + defer db.Close() + b.StartTimer() + + for i := 0; i < b.N; i++ { + benchQueryLoop(b, db, query, result) + } +} + +func benchQueryLoop(b *testing.B, db *sql.DB, query string, result interface{}) { + rows, err := db.Query(query) + if err != nil { + b.Fatal(err) + } + defer rows.Close() + for rows.Next() { + err = rows.Scan(result) + if err != nil { + b.Fatal("failed to scan", err) + } + } +} + +// reading from circularConn yields content[:prefixLen] once, followed by +// content[prefixLen:] over and over again. It never returns EOF. +type circularConn struct { + content string + prefixLen int + pos int + net.Conn // for all other net.Conn methods that will never be called +} + +func (r *circularConn) Read(b []byte) (n int, err error) { + n = copy(b, r.content[r.pos:]) + r.pos += n + if r.pos >= len(r.content) { + r.pos = r.prefixLen + } + return +} + +func (r *circularConn) Write(b []byte) (n int, err error) { return len(b), nil } + +func (r *circularConn) Close() error { return nil } + +func fakeConn(content string, prefixLen int) *conn { + c := &circularConn{content: content, prefixLen: prefixLen} + return &conn{buf: bufio.NewReader(c), c: c} +} + +// This benchmark is meant to be the same as BenchmarkSelectString, but takes +// out some of the factors this package can't control. The numbers are less noisy, +// but also the costs of network communication aren't accurately represented. +func BenchmarkMockSelectString(b *testing.B) { + b.StopTimer() + // taken from a recorded run of BenchmarkSelectString + // See: http://www.postgresql.org/docs/current/static/protocol-message-formats.html + const response = "1\x00\x00\x00\x04" + + "t\x00\x00\x00\x06\x00\x00" + + "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" + + "Z\x00\x00\x00\x05I" + + "2\x00\x00\x00\x04" + + "D\x00\x00\x00n\x00\x01\x00\x00\x00d0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" + + "C\x00\x00\x00\rSELECT 1\x00" + + "Z\x00\x00\x00\x05I" + + "3\x00\x00\x00\x04" + + "Z\x00\x00\x00\x05I" + c := fakeConn(response, 0) + b.StartTimer() + + for i := 0; i < b.N; i++ { + benchMockQuery(b, c, selectStringQuery) + } +} + +var seriesRowData = func() string { + var buf bytes.Buffer + for i := 1; i <= 100; i++ { + digits := byte(2) + if i >= 100 { + digits = 3 + } else if i < 10 { + digits = 1 + } + buf.WriteString("D\x00\x00\x00") + buf.WriteByte(10 + digits) + buf.WriteString("\x00\x01\x00\x00\x00") + buf.WriteByte(digits) + buf.WriteString(strconv.Itoa(i)) + } + return buf.String() +}() + +func BenchmarkMockSelectSeries(b *testing.B) { + b.StopTimer() + var response = "1\x00\x00\x00\x04" + + "t\x00\x00\x00\x06\x00\x00" + + "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" + + "Z\x00\x00\x00\x05I" + + "2\x00\x00\x00\x04" + + seriesRowData + + "C\x00\x00\x00\x0fSELECT 100\x00" + + "Z\x00\x00\x00\x05I" + + "3\x00\x00\x00\x04" + + "Z\x00\x00\x00\x05I" + c := fakeConn(response, 0) + b.StartTimer() + + for i := 0; i < b.N; i++ { + benchMockQuery(b, c, selectSeriesQuery) + } +} + +func benchMockQuery(b *testing.B, c *conn, query string) { + stmt, err := c.Prepare(query) + if err != nil { + b.Fatal(err) + } + defer stmt.Close() + rows, err := stmt.Query(nil) + if err != nil { + b.Fatal(err) + } + defer rows.Close() + var dest [1]driver.Value + for { + if err := rows.Next(dest[:]); err != nil { + if err == io.EOF { + break + } + b.Fatal(err) + } + } +} + +func BenchmarkPreparedSelectString(b *testing.B) { + var result string + benchPreparedQuery(b, selectStringQuery, &result) +} + +func BenchmarkPreparedSelectSeries(b *testing.B) { + var result int + benchPreparedQuery(b, selectSeriesQuery, &result) +} + +func benchPreparedQuery(b *testing.B, query string, result interface{}) { + b.StopTimer() + db := openTestConn(b) + defer db.Close() + stmt, err := db.Prepare(query) + if err != nil { + b.Fatal(err) + } + defer stmt.Close() + b.StartTimer() + + for i := 0; i < b.N; i++ { + benchPreparedQueryLoop(b, db, stmt, result) + } +} + +func benchPreparedQueryLoop(b *testing.B, db *sql.DB, stmt *sql.Stmt, result interface{}) { + rows, err := stmt.Query() + if err != nil { + b.Fatal(err) + } + if !rows.Next() { + rows.Close() + b.Fatal("no rows") + } + defer rows.Close() + for rows.Next() { + err = rows.Scan(&result) + if err != nil { + b.Fatal("failed to scan") + } + } +} + +// See the comment for BenchmarkMockSelectString. +func BenchmarkMockPreparedSelectString(b *testing.B) { + b.StopTimer() + const parseResponse = "1\x00\x00\x00\x04" + + "t\x00\x00\x00\x06\x00\x00" + + "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" + + "Z\x00\x00\x00\x05I" + const responses = parseResponse + + "2\x00\x00\x00\x04" + + "D\x00\x00\x00n\x00\x01\x00\x00\x00d0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" + + "C\x00\x00\x00\rSELECT 1\x00" + + "Z\x00\x00\x00\x05I" + c := fakeConn(responses, len(parseResponse)) + + stmt, err := c.Prepare(selectStringQuery) + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + benchPreparedMockQuery(b, c, stmt) + } +} + +func BenchmarkMockPreparedSelectSeries(b *testing.B) { + b.StopTimer() + const parseResponse = "1\x00\x00\x00\x04" + + "t\x00\x00\x00\x06\x00\x00" + + "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" + + "Z\x00\x00\x00\x05I" + var responses = parseResponse + + "2\x00\x00\x00\x04" + + seriesRowData + + "C\x00\x00\x00\x0fSELECT 100\x00" + + "Z\x00\x00\x00\x05I" + c := fakeConn(responses, len(parseResponse)) + + stmt, err := c.Prepare(selectSeriesQuery) + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + benchPreparedMockQuery(b, c, stmt) + } +} + +func benchPreparedMockQuery(b *testing.B, c *conn, stmt driver.Stmt) { + rows, err := stmt.Query(nil) + if err != nil { + b.Fatal(err) + } + defer rows.Close() + var dest [1]driver.Value + for { + if err := rows.Next(dest[:]); err != nil { + if err == io.EOF { + break + } + b.Fatal(err) + } + } +} + +func BenchmarkEncodeInt64(b *testing.B) { + for i := 0; i < b.N; i++ { + encode(¶meterStatus{}, int64(1234), oid.T_int8) + } +} + +func BenchmarkEncodeFloat64(b *testing.B) { + for i := 0; i < b.N; i++ { + encode(¶meterStatus{}, 3.14159, oid.T_float8) + } +} + +var testByteString = []byte("abcdefghijklmnopqrstuvwxyz") + +func BenchmarkEncodeByteaHex(b *testing.B) { + for i := 0; i < b.N; i++ { + encode(¶meterStatus{serverVersion: 90000}, testByteString, oid.T_bytea) + } +} +func BenchmarkEncodeByteaEscape(b *testing.B) { + for i := 0; i < b.N; i++ { + encode(¶meterStatus{serverVersion: 84000}, testByteString, oid.T_bytea) + } +} + +func BenchmarkEncodeBool(b *testing.B) { + for i := 0; i < b.N; i++ { + encode(¶meterStatus{}, true, oid.T_bool) + } +} + +var testTimestamptz = time.Date(2001, time.January, 1, 0, 0, 0, 0, time.Local) + +func BenchmarkEncodeTimestamptz(b *testing.B) { + for i := 0; i < b.N; i++ { + encode(¶meterStatus{}, testTimestamptz, oid.T_timestamptz) + } +} + +var testIntBytes = []byte("1234") + +func BenchmarkDecodeInt64(b *testing.B) { + for i := 0; i < b.N; i++ { + decode(¶meterStatus{}, testIntBytes, oid.T_int8, formatText) + } +} + +var testFloatBytes = []byte("3.14159") + +func BenchmarkDecodeFloat64(b *testing.B) { + for i := 0; i < b.N; i++ { + decode(¶meterStatus{}, testFloatBytes, oid.T_float8, formatText) + } +} + +var testBoolBytes = []byte{'t'} + +func BenchmarkDecodeBool(b *testing.B) { + for i := 0; i < b.N; i++ { + decode(¶meterStatus{}, testBoolBytes, oid.T_bool, formatText) + } +} + +func TestDecodeBool(t *testing.T) { + db := openTestConn(t) + rows, err := db.Query("select true") + if err != nil { + t.Fatal(err) + } + rows.Close() +} + +var testTimestamptzBytes = []byte("2013-09-17 22:15:32.360754-07") + +func BenchmarkDecodeTimestamptz(b *testing.B) { + for i := 0; i < b.N; i++ { + decode(¶meterStatus{}, testTimestamptzBytes, oid.T_timestamptz, formatText) + } +} + +func BenchmarkDecodeTimestamptzMultiThread(b *testing.B) { + oldProcs := runtime.GOMAXPROCS(0) + defer runtime.GOMAXPROCS(oldProcs) + runtime.GOMAXPROCS(runtime.NumCPU()) + globalLocationCache = newLocationCache() + + f := func(wg *sync.WaitGroup, loops int) { + defer wg.Done() + for i := 0; i < loops; i++ { + decode(¶meterStatus{}, testTimestamptzBytes, oid.T_timestamptz, formatText) + } + } + + wg := &sync.WaitGroup{} + b.ResetTimer() + for j := 0; j < 10; j++ { + wg.Add(1) + go f(wg, b.N/10) + } + wg.Wait() +} + +func BenchmarkLocationCache(b *testing.B) { + globalLocationCache = newLocationCache() + for i := 0; i < b.N; i++ { + globalLocationCache.getLocation(rand.Intn(10000)) + } +} + +func BenchmarkLocationCacheMultiThread(b *testing.B) { + oldProcs := runtime.GOMAXPROCS(0) + defer runtime.GOMAXPROCS(oldProcs) + runtime.GOMAXPROCS(runtime.NumCPU()) + globalLocationCache = newLocationCache() + + f := func(wg *sync.WaitGroup, loops int) { + defer wg.Done() + for i := 0; i < loops; i++ { + globalLocationCache.getLocation(rand.Intn(10000)) + } + } + + wg := &sync.WaitGroup{} + b.ResetTimer() + for j := 0; j < 10; j++ { + wg.Add(1) + go f(wg, b.N/10) + } + wg.Wait() +} + +// Stress test the performance of parsing results from the wire. +func BenchmarkResultParsing(b *testing.B) { + b.StopTimer() + + db := openTestConn(b) + defer db.Close() + _, err := db.Exec("BEGIN") + if err != nil { + b.Fatal(err) + } + + b.StartTimer() + for i := 0; i < b.N; i++ { + res, err := db.Query("SELECT generate_series(1, 50000)") + if err != nil { + b.Fatal(err) + } + res.Close() + } +} diff --git a/services/templeton/vendor/src/github.com/lib/pq/buf.go b/services/templeton/vendor/src/github.com/lib/pq/buf.go new file mode 100644 index 000000000..666b0012a --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/buf.go @@ -0,0 +1,91 @@ +package pq + +import ( + "bytes" + "encoding/binary" + + "github.com/lib/pq/oid" +) + +type readBuf []byte + +func (b *readBuf) int32() (n int) { + n = int(int32(binary.BigEndian.Uint32(*b))) + *b = (*b)[4:] + return +} + +func (b *readBuf) oid() (n oid.Oid) { + n = oid.Oid(binary.BigEndian.Uint32(*b)) + *b = (*b)[4:] + return +} + +// N.B: this is actually an unsigned 16-bit integer, unlike int32 +func (b *readBuf) int16() (n int) { + n = int(binary.BigEndian.Uint16(*b)) + *b = (*b)[2:] + return +} + +func (b *readBuf) string() string { + i := bytes.IndexByte(*b, 0) + if i < 0 { + errorf("invalid message format; expected string terminator") + } + s := (*b)[:i] + *b = (*b)[i+1:] + return string(s) +} + +func (b *readBuf) next(n int) (v []byte) { + v = (*b)[:n] + *b = (*b)[n:] + return +} + +func (b *readBuf) byte() byte { + return b.next(1)[0] +} + +type writeBuf struct { + buf []byte + pos int +} + +func (b *writeBuf) int32(n int) { + x := make([]byte, 4) + binary.BigEndian.PutUint32(x, uint32(n)) + b.buf = append(b.buf, x...) +} + +func (b *writeBuf) int16(n int) { + x := make([]byte, 2) + binary.BigEndian.PutUint16(x, uint16(n)) + b.buf = append(b.buf, x...) +} + +func (b *writeBuf) string(s string) { + b.buf = append(b.buf, (s + "\000")...) +} + +func (b *writeBuf) byte(c byte) { + b.buf = append(b.buf, c) +} + +func (b *writeBuf) bytes(v []byte) { + b.buf = append(b.buf, v...) +} + +func (b *writeBuf) wrap() []byte { + p := b.buf[b.pos:] + binary.BigEndian.PutUint32(p, uint32(len(p))) + return b.buf +} + +func (b *writeBuf) next(c byte) { + p := b.buf[b.pos:] + binary.BigEndian.PutUint32(p, uint32(len(p))) + b.pos = len(b.buf) + 1 + b.buf = append(b.buf, c, 0, 0, 0, 0) +} diff --git a/services/templeton/vendor/src/github.com/lib/pq/certs/README b/services/templeton/vendor/src/github.com/lib/pq/certs/README new file mode 100644 index 000000000..24ab7b256 --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/certs/README @@ -0,0 +1,3 @@ +This directory contains certificates and private keys for testing some +SSL-related functionality in Travis. Do NOT use these certificates for +anything other than testing. diff --git a/services/templeton/vendor/src/github.com/lib/pq/certs/postgresql.crt b/services/templeton/vendor/src/github.com/lib/pq/certs/postgresql.crt new file mode 100644 index 000000000..6e6b4284a --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/certs/postgresql.crt @@ -0,0 +1,69 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 2 (0x2) + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=Nevada, L=Las Vegas, O=github.com/lib/pq, CN=pq CA + Validity + Not Before: Oct 11 15:10:11 2014 GMT + Not After : Oct 8 15:10:11 2024 GMT + Subject: C=US, ST=Nevada, L=Las Vegas, O=github.com/lib/pq, CN=pqgosslcert + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + RSA Public Key: (1024 bit) + Modulus (1024 bit): + 00:e3:8c:06:9a:70:54:51:d1:34:34:83:39:cd:a2: + 59:0f:05:ed:8d:d8:0e:34:d0:92:f4:09:4d:ee:8c: + 78:55:49:24:f8:3c:e0:34:58:02:b2:e7:94:58:c1: + e8:e5:bb:d1:af:f6:54:c1:40:b1:90:70:79:0d:35: + 54:9c:8f:16:e9:c2:f0:92:e6:64:49:38:c1:76:f8: + 47:66:c4:5b:4a:b6:a9:43:ce:c8:be:6c:4d:2b:94: + 97:3c:55:bc:d1:d0:6e:b7:53:ae:89:5c:4b:6b:86: + 40:be:c1:ae:1e:64:ce:9c:ae:87:0a:69:e5:c8:21: + 12:be:ae:1d:f6:45:df:16:a7 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + 9B:25:31:63:A2:D8:06:FF:CB:E3:E9:96:FF:0D:BA:DC:12:7D:04:CF + X509v3 Authority Key Identifier: + keyid:52:93:ED:1E:76:0A:9F:65:4F:DE:19:66:C1:D5:22:40:35:CB:A0:72 + + X509v3 Basic Constraints: + CA:FALSE + X509v3 Key Usage: + Digital Signature, Non Repudiation, Key Encipherment + Signature Algorithm: sha256WithRSAEncryption + 3e:f5:f8:0b:4e:11:bd:00:86:1f:ce:dc:97:02:98:91:11:f5: + 65:f6:f2:8a:b2:3e:47:92:05:69:28:c9:e9:b4:f7:cf:93:d1: + 2d:81:5d:00:3c:23:be:da:70:ea:59:e1:2c:d3:25:49:ae:a6: + 95:54:c1:10:df:23:e3:fe:d6:e4:76:c7:6b:73:ad:1b:34:7c: + e2:56:cc:c0:37:ae:c5:7a:11:20:6c:3d:05:0e:99:cd:22:6c: + cf:59:a1:da:28:d4:65:ba:7d:2f:2b:3d:69:6d:a6:c1:ae:57: + bf:56:64:13:79:f8:48:46:65:eb:81:67:28:0b:7b:de:47:10: + b3:80:3c:31:d1:58:94:01:51:4a:c7:c8:1a:01:a8:af:c4:cd: + bb:84:a5:d9:8b:b4:b9:a1:64:3e:95:d9:90:1d:d5:3f:67:cc: + 3b:ba:f5:b4:d1:33:77:ee:c2:d2:3e:7e:c5:66:6e:b7:35:4c: + 60:57:b0:b8:be:36:c8:f3:d3:95:8c:28:4a:c9:f7:27:a4:0d: + e5:96:99:eb:f5:c8:bd:f3:84:6d:ef:02:f9:8a:36:7d:6b:5f: + 36:68:37:41:d9:74:ae:c6:78:2e:44:86:a1:ad:43:ca:fb:b5: + 3e:ba:10:23:09:02:ac:62:d1:d0:83:c8:95:b9:e3:5e:30:ff: + 5b:2b:38:fa +-----BEGIN CERTIFICATE----- +MIIDEzCCAfugAwIBAgIBAjANBgkqhkiG9w0BAQsFADBeMQswCQYDVQQGEwJVUzEP +MA0GA1UECBMGTmV2YWRhMRIwEAYDVQQHEwlMYXMgVmVnYXMxGjAYBgNVBAoTEWdp +dGh1Yi5jb20vbGliL3BxMQ4wDAYDVQQDEwVwcSBDQTAeFw0xNDEwMTExNTEwMTFa +Fw0yNDEwMDgxNTEwMTFaMGQxCzAJBgNVBAYTAlVTMQ8wDQYDVQQIEwZOZXZhZGEx +EjAQBgNVBAcTCUxhcyBWZWdhczEaMBgGA1UEChMRZ2l0aHViLmNvbS9saWIvcHEx +FDASBgNVBAMTC3BxZ29zc2xjZXJ0MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB +gQDjjAaacFRR0TQ0gznNolkPBe2N2A400JL0CU3ujHhVSST4POA0WAKy55RYwejl +u9Gv9lTBQLGQcHkNNVScjxbpwvCS5mRJOMF2+EdmxFtKtqlDzsi+bE0rlJc8VbzR +0G63U66JXEtrhkC+wa4eZM6crocKaeXIIRK+rh32Rd8WpwIDAQABo1owWDAdBgNV +HQ4EFgQUmyUxY6LYBv/L4+mW/w263BJ9BM8wHwYDVR0jBBgwFoAUUpPtHnYKn2VP +3hlmwdUiQDXLoHIwCQYDVR0TBAIwADALBgNVHQ8EBAMCBeAwDQYJKoZIhvcNAQEL +BQADggEBAD71+AtOEb0Ahh/O3JcCmJER9WX28oqyPkeSBWkoyem098+T0S2BXQA8 +I77acOpZ4SzTJUmuppVUwRDfI+P+1uR2x2tzrRs0fOJWzMA3rsV6ESBsPQUOmc0i +bM9Zodoo1GW6fS8rPWltpsGuV79WZBN5+EhGZeuBZygLe95HELOAPDHRWJQBUUrH +yBoBqK/EzbuEpdmLtLmhZD6V2ZAd1T9nzDu69bTRM3fuwtI+fsVmbrc1TGBXsLi+ +Nsjz05WMKErJ9yekDeWWmev1yL3zhG3vAvmKNn1rXzZoN0HZdK7GeC5EhqGtQ8r7 +tT66ECMJAqxi0dCDyJW5414w/1srOPo= +-----END CERTIFICATE----- diff --git a/services/templeton/vendor/src/github.com/lib/pq/certs/postgresql.key b/services/templeton/vendor/src/github.com/lib/pq/certs/postgresql.key new file mode 100644 index 000000000..eb8b20be9 --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/certs/postgresql.key @@ -0,0 +1,15 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICWwIBAAKBgQDjjAaacFRR0TQ0gznNolkPBe2N2A400JL0CU3ujHhVSST4POA0 +WAKy55RYwejlu9Gv9lTBQLGQcHkNNVScjxbpwvCS5mRJOMF2+EdmxFtKtqlDzsi+ +bE0rlJc8VbzR0G63U66JXEtrhkC+wa4eZM6crocKaeXIIRK+rh32Rd8WpwIDAQAB +AoGAM5dM6/kp9P700i8qjOgRPym96Zoh5nGfz/rIE5z/r36NBkdvIg8OVZfR96nH +b0b9TOMR5lsPp0sI9yivTWvX6qyvLJRWy2vvx17hXK9NxXUNTAm0PYZUTvCtcPeX +RnJpzQKNZQPkFzF0uXBc4CtPK2Vz0+FGvAelrhYAxnw1dIkCQQD+9qaW5QhXjsjb +Nl85CmXgxPmGROcgLQCO+omfrjf9UXrituU9Dz6auym5lDGEdMFnkzfr+wpasEy9 +mf5ZZOhDAkEA5HjXfVGaCtpydOt6hDon/uZsyssCK2lQ7NSuE3vP+sUsYMzIpEoy +t3VWXqKbo+g9KNDTP4WEliqp1aiSIylzzQJANPeqzihQnlgEdD4MdD4rwhFJwVIp +Le8Lcais1KaN7StzOwxB/XhgSibd2TbnPpw+3bSg5n5lvUdo+e62/31OHwJAU1jS +I+F09KikQIr28u3UUWT2IzTT4cpVv1AHAQyV3sG3YsjSGT0IK20eyP9BEBZU2WL0 +7aNjrvR5aHxKc5FXsQJABsFtyGpgI5X4xufkJZVZ+Mklz2n7iXa+XPatMAHFxAtb +EEMt60rngwMjXAzBSC6OYuYogRRAY3UCacNC5VhLYQ== +-----END RSA PRIVATE KEY----- diff --git a/services/templeton/vendor/src/github.com/lib/pq/certs/root.crt b/services/templeton/vendor/src/github.com/lib/pq/certs/root.crt new file mode 100644 index 000000000..aecf8f621 --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/certs/root.crt @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIJANmheROCdW1NMA0GCSqGSIb3DQEBBQUAMF4xCzAJBgNV +BAYTAlVTMQ8wDQYDVQQIEwZOZXZhZGExEjAQBgNVBAcTCUxhcyBWZWdhczEaMBgG +A1UEChMRZ2l0aHViLmNvbS9saWIvcHExDjAMBgNVBAMTBXBxIENBMB4XDTE0MTAx +MTE1MDQyOVoXDTI0MTAwODE1MDQyOVowXjELMAkGA1UEBhMCVVMxDzANBgNVBAgT +Bk5ldmFkYTESMBAGA1UEBxMJTGFzIFZlZ2FzMRowGAYDVQQKExFnaXRodWIuY29t +L2xpYi9wcTEOMAwGA1UEAxMFcHEgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCV4PxP7ShzWBzUCThcKk3qZtOLtHmszQVtbqhvgTpm1kTRtKBdVMu0 +pLAHQ3JgJCnAYgH0iZxVGoMP16T3irdgsdC48+nNTFM2T0cCdkfDURGIhSFN47cb +Pgy306BcDUD2q7ucW33+dlFSRuGVewocoh4BWM/vMtMvvWzdi4Ag/L/jhb+5wZxZ +sWymsadOVSDePEMKOvlCa3EdVwVFV40TVyDb+iWBUivDAYsS2a3KajuJrO6MbZiE +Sp2RCIkZS2zFmzWxVRi9ZhzIZhh7EVF9JAaNC3T52jhGUdlRq3YpBTMnd89iOh74 +6jWXG7wSuPj3haFzyNhmJ0ZUh+2Ynoh1AgMBAAGjgcMwgcAwHQYDVR0OBBYEFFKT +7R52Cp9lT94ZZsHVIkA1y6ByMIGQBgNVHSMEgYgwgYWAFFKT7R52Cp9lT94ZZsHV +IkA1y6ByoWKkYDBeMQswCQYDVQQGEwJVUzEPMA0GA1UECBMGTmV2YWRhMRIwEAYD +VQQHEwlMYXMgVmVnYXMxGjAYBgNVBAoTEWdpdGh1Yi5jb20vbGliL3BxMQ4wDAYD +VQQDEwVwcSBDQYIJANmheROCdW1NMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEF +BQADggEBAAEhCLWkqJNMI8b4gkbmj5fqQ/4+oO83bZ3w2Oqf6eZ8I8BC4f2NOyE6 +tRUlq5+aU7eqC1cOAvGjO+YHN/bF/DFpwLlzvUSXt+JP/pYcUjL7v+pIvwqec9hD +ndvM4iIbkD/H/OYQ3L+N3W+G1x7AcFIX+bGCb3PzYVQAjxreV6//wgKBosMGFbZo +HPxT9RPMun61SViF04H5TNs0derVn1+5eiiYENeAhJzQNyZoOOUuX1X/Inx9bEPh +C5vFBtSMgIytPgieRJVWAiMLYsfpIAStrHztRAbBs2DU01LmMgRvHdxgFEKinC/d +UHZZQDP+6pT+zADrGhQGXe4eThaO6f0= +-----END CERTIFICATE----- diff --git a/services/templeton/vendor/src/github.com/lib/pq/certs/server.crt b/services/templeton/vendor/src/github.com/lib/pq/certs/server.crt new file mode 100644 index 000000000..ddc995a6d --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/certs/server.crt @@ -0,0 +1,81 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 1 (0x1) + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=Nevada, L=Las Vegas, O=github.com/lib/pq, CN=pq CA + Validity + Not Before: Oct 11 15:05:15 2014 GMT + Not After : Oct 8 15:05:15 2024 GMT + Subject: C=US, ST=Nevada, L=Las Vegas, O=github.com/lib/pq, CN=postgres + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + RSA Public Key: (2048 bit) + Modulus (2048 bit): + 00:d7:8a:4c:85:fb:17:a5:3c:8f:e0:72:11:29:ce: + 3f:b0:1f:3f:7d:c6:ee:7f:a7:fc:02:2b:35:47:08: + a6:3d:90:df:5c:56:14:94:00:c7:6d:d1:d2:e2:61: + 95:77:b8:e3:a6:66:31:f9:1f:21:7d:62:e1:27:da: + 94:37:61:4a:ea:63:53:a0:61:b8:9c:bb:a5:e2:e7: + b7:a6:d8:0f:05:04:c7:29:e2:ea:49:2b:7f:de:15: + 00:a6:18:70:50:c7:0c:de:9a:f9:5a:96:b0:e1:94: + 06:c6:6d:4a:21:3b:b4:0f:a5:6d:92:86:34:b2:4e: + d7:0e:a7:19:c0:77:0b:7b:87:c8:92:de:42:ff:86: + d2:b7:9a:a4:d4:15:23:ca:ad:a5:69:21:b8:ce:7e: + 66:cb:85:5d:b9:ed:8b:2d:09:8d:94:e4:04:1e:72: + ec:ef:d0:76:90:15:5a:a4:f7:91:4b:e9:ce:4e:9d: + 5d:9a:70:17:9c:d8:e9:73:83:ea:3d:61:99:a6:cd: + ac:91:40:5a:88:77:e5:4e:2a:8e:3d:13:f3:f9:38: + 6f:81:6b:8a:95:ca:0e:07:ab:6f:da:b4:8c:d9:ff: + aa:78:03:aa:c7:c2:cf:6f:64:92:d3:d8:83:d5:af: + f1:23:18:a7:2e:7b:17:0b:e7:7d:f1:fa:a8:41:a3: + 04:57 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + EE:F0:B3:46:DC:C7:09:EB:0E:B6:2F:E5:FE:62:60:45:44:9F:59:CC + X509v3 Authority Key Identifier: + keyid:52:93:ED:1E:76:0A:9F:65:4F:DE:19:66:C1:D5:22:40:35:CB:A0:72 + + X509v3 Basic Constraints: + CA:FALSE + X509v3 Key Usage: + Digital Signature, Non Repudiation, Key Encipherment + Signature Algorithm: sha256WithRSAEncryption + 7e:5a:6e:be:bf:d2:6c:c1:d6:fa:b6:fb:3f:06:53:36:08:87: + 9d:95:b1:39:af:9e:f6:47:38:17:39:da:25:7c:f2:ad:0c:e3: + ab:74:19:ca:fb:8c:a0:50:c0:1d:19:8a:9c:21:ed:0f:3a:d1: + 96:54:2e:10:09:4f:b8:70:f7:2b:99:43:d2:c6:15:bc:3f:24: + 7d:28:39:32:3f:8d:a4:4f:40:75:7f:3e:0d:1c:d1:69:f2:4e: + 98:83:47:97:d2:25:ac:c9:36:86:2f:04:a6:c4:86:c7:c4:00: + 5f:7f:b9:ad:fc:bf:e9:f5:78:d7:82:1a:51:0d:fc:ab:9e:92: + 1d:5f:0c:18:d1:82:e0:14:c9:ce:91:89:71:ff:49:49:ff:35: + bf:7b:44:78:42:c1:d0:66:65:bb:28:2e:60:ca:9b:20:12:a9: + 90:61:b1:96:ec:15:46:c9:37:f7:07:90:8a:89:45:2a:3f:37: + ec:dc:e3:e5:8f:c3:3a:57:80:a5:54:60:0c:e1:b2:26:99:2b: + 40:7e:36:d1:9a:70:02:ec:63:f4:3b:72:ae:81:fb:30:20:6d: + cb:48:46:c6:b5:8f:39:b1:84:05:25:55:8d:f5:62:f6:1b:46: + 2e:da:a3:4c:26:12:44:d7:56:b6:b8:a9:ca:d3:ab:71:45:7c: + 9f:48:6d:1e +-----BEGIN CERTIFICATE----- +MIIDlDCCAnygAwIBAgIBATANBgkqhkiG9w0BAQsFADBeMQswCQYDVQQGEwJVUzEP +MA0GA1UECBMGTmV2YWRhMRIwEAYDVQQHEwlMYXMgVmVnYXMxGjAYBgNVBAoTEWdp +dGh1Yi5jb20vbGliL3BxMQ4wDAYDVQQDEwVwcSBDQTAeFw0xNDEwMTExNTA1MTVa +Fw0yNDEwMDgxNTA1MTVaMGExCzAJBgNVBAYTAlVTMQ8wDQYDVQQIEwZOZXZhZGEx +EjAQBgNVBAcTCUxhcyBWZWdhczEaMBgGA1UEChMRZ2l0aHViLmNvbS9saWIvcHEx +ETAPBgNVBAMTCHBvc3RncmVzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEA14pMhfsXpTyP4HIRKc4/sB8/fcbuf6f8Ais1RwimPZDfXFYUlADHbdHS4mGV +d7jjpmYx+R8hfWLhJ9qUN2FK6mNToGG4nLul4ue3ptgPBQTHKeLqSSt/3hUAphhw +UMcM3pr5Wpaw4ZQGxm1KITu0D6VtkoY0sk7XDqcZwHcLe4fIkt5C/4bSt5qk1BUj +yq2laSG4zn5my4Vdue2LLQmNlOQEHnLs79B2kBVapPeRS+nOTp1dmnAXnNjpc4Pq +PWGZps2skUBaiHflTiqOPRPz+ThvgWuKlcoOB6tv2rSM2f+qeAOqx8LPb2SS09iD +1a/xIxinLnsXC+d98fqoQaMEVwIDAQABo1owWDAdBgNVHQ4EFgQU7vCzRtzHCesO +ti/l/mJgRUSfWcwwHwYDVR0jBBgwFoAUUpPtHnYKn2VP3hlmwdUiQDXLoHIwCQYD +VR0TBAIwADALBgNVHQ8EBAMCBeAwDQYJKoZIhvcNAQELBQADggEBAH5abr6/0mzB +1vq2+z8GUzYIh52VsTmvnvZHOBc52iV88q0M46t0Gcr7jKBQwB0Zipwh7Q860ZZU +LhAJT7hw9yuZQ9LGFbw/JH0oOTI/jaRPQHV/Pg0c0WnyTpiDR5fSJazJNoYvBKbE +hsfEAF9/ua38v+n1eNeCGlEN/Kuekh1fDBjRguAUyc6RiXH/SUn/Nb97RHhCwdBm +ZbsoLmDKmyASqZBhsZbsFUbJN/cHkIqJRSo/N+zc4+WPwzpXgKVUYAzhsiaZK0B+ +NtGacALsY/Q7cq6B+zAgbctIRsa1jzmxhAUlVY31YvYbRi7ao0wmEkTXVra4qcrT +q3FFfJ9IbR4= +-----END CERTIFICATE----- diff --git a/services/templeton/vendor/src/github.com/lib/pq/certs/server.key b/services/templeton/vendor/src/github.com/lib/pq/certs/server.key new file mode 100644 index 000000000..bd7b019b6 --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/certs/server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEA14pMhfsXpTyP4HIRKc4/sB8/fcbuf6f8Ais1RwimPZDfXFYU +lADHbdHS4mGVd7jjpmYx+R8hfWLhJ9qUN2FK6mNToGG4nLul4ue3ptgPBQTHKeLq +SSt/3hUAphhwUMcM3pr5Wpaw4ZQGxm1KITu0D6VtkoY0sk7XDqcZwHcLe4fIkt5C +/4bSt5qk1BUjyq2laSG4zn5my4Vdue2LLQmNlOQEHnLs79B2kBVapPeRS+nOTp1d +mnAXnNjpc4PqPWGZps2skUBaiHflTiqOPRPz+ThvgWuKlcoOB6tv2rSM2f+qeAOq +x8LPb2SS09iD1a/xIxinLnsXC+d98fqoQaMEVwIDAQABAoIBAF3ZoihUhJ82F4+r +Gz4QyDpv4L1reT2sb1aiabhcU8ZK5nbWJG+tRyjSS/i2dNaEcttpdCj9HR/zhgZM +bm0OuAgG58rVwgS80CZUruq++Qs+YVojq8/gWPTiQD4SNhV2Fmx3HkwLgUk3oxuT +SsvdqzGE3okGVrutCIcgy126eA147VPMoej1Bb3fO6npqK0pFPhZfAc0YoqJuM+k +obRm5pAnGUipyLCFXjA9HYPKwYZw2RtfdA3CiImHeanSdqS+ctrC9y8BV40Th7gZ +haXdKUNdjmIxV695QQ1mkGqpKLZFqhzKioGQ2/Ly2d1iaKN9fZltTusu8unepWJ2 +tlT9qMECgYEA9uHaF1t2CqE+AJvWTihHhPIIuLxoOQXYea1qvxfcH/UMtaLKzCNm +lQ5pqCGsPvp+10f36yttO1ZehIvlVNXuJsjt0zJmPtIolNuJY76yeussfQ9jHheB +5uPEzCFlHzxYbBUyqgWaF6W74okRGzEGJXjYSP0yHPPdU4ep2q3bGiUCgYEA34Af +wBSuQSK7uLxArWHvQhyuvi43ZGXls6oRGl+Ysj54s8BP6XGkq9hEJ6G4yxgyV+BR +DUOs5X8/TLT8POuIMYvKTQthQyCk0eLv2FLdESDuuKx0kBVY3s8lK3/z5HhrdOiN +VMNZU+xDKgKc3hN9ypkk8vcZe6EtH7Y14e0rVcsCgYBTgxi8F/M5K0wG9rAqphNz +VFBA9XKn/2M33cKjO5X5tXIEKzpAjaUQvNxexG04rJGljzG8+mar0M6ONahw5yD1 +O7i/XWgazgpuOEkkVYiYbd8RutfDgR4vFVMn3hAP3eDnRtBplRWH9Ec3HTiNIys6 +F8PKBOQjyRZQQC7jyzW3hQKBgACe5HeuFwXLSOYsb6mLmhR+6+VPT4wR1F95W27N +USk9jyxAnngxfpmTkiziABdgS9N+pfr5cyN4BP77ia/Jn6kzkC5Cl9SN5KdIkA3z +vPVtN/x/ThuQU5zaymmig1ThGLtMYggYOslG4LDfLPxY5YKIhle+Y+259twdr2yf +Mf2dAoGAaGv3tWMgnIdGRk6EQL/yb9PKHo7ShN+tKNlGaK7WwzBdKs+Fe8jkgcr7 +pz4Ne887CmxejdISzOCcdT+Zm9Bx6I/uZwWOtDvWpIgIxVX9a9URj/+D1MxTE/y4 +d6H+c89yDY62I2+drMpdjCd3EtCaTlxpTbRS+s1eAHMH7aEkcCE= +-----END RSA PRIVATE KEY----- diff --git a/services/templeton/vendor/src/github.com/lib/pq/conn.go b/services/templeton/vendor/src/github.com/lib/pq/conn.go new file mode 100644 index 000000000..336c89449 --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/conn.go @@ -0,0 +1,1847 @@ +package pq + +import ( + "bufio" + "crypto/md5" + "crypto/tls" + "crypto/x509" + "database/sql" + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/user" + "path" + "path/filepath" + "strconv" + "strings" + "time" + "unicode" + + "github.com/lib/pq/oid" +) + +// Common error types +var ( + ErrNotSupported = errors.New("pq: Unsupported command") + ErrInFailedTransaction = errors.New("pq: Could not complete operation in a failed transaction") + ErrSSLNotSupported = errors.New("pq: SSL is not enabled on the server") + ErrSSLKeyHasWorldPermissions = errors.New("pq: Private key file has group or world access. Permissions should be u=rw (0600) or less.") + ErrCouldNotDetectUsername = errors.New("pq: Could not detect default username. Please provide one explicitly.") +) + +type drv struct{} + +func (d *drv) Open(name string) (driver.Conn, error) { + return Open(name) +} + +func init() { + sql.Register("postgres", &drv{}) +} + +type parameterStatus struct { + // server version in the same format as server_version_num, or 0 if + // unavailable + serverVersion int + + // the current location based on the TimeZone value of the session, if + // available + currentLocation *time.Location +} + +type transactionStatus byte + +const ( + txnStatusIdle transactionStatus = 'I' + txnStatusIdleInTransaction transactionStatus = 'T' + txnStatusInFailedTransaction transactionStatus = 'E' +) + +func (s transactionStatus) String() string { + switch s { + case txnStatusIdle: + return "idle" + case txnStatusIdleInTransaction: + return "idle in transaction" + case txnStatusInFailedTransaction: + return "in a failed transaction" + default: + errorf("unknown transactionStatus %d", s) + } + + panic("not reached") +} + +type Dialer interface { + Dial(network, address string) (net.Conn, error) + DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) +} + +type defaultDialer struct{} + +func (d defaultDialer) Dial(ntw, addr string) (net.Conn, error) { + return net.Dial(ntw, addr) +} +func (d defaultDialer) DialTimeout(ntw, addr string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout(ntw, addr, timeout) +} + +type conn struct { + c net.Conn + buf *bufio.Reader + namei int + scratch [512]byte + txnStatus transactionStatus + + parameterStatus parameterStatus + + saveMessageType byte + saveMessageBuffer []byte + + // If true, this connection is bad and all public-facing functions should + // return ErrBadConn. + bad bool + + // If set, this connection should never use the binary format when + // receiving query results from prepared statements. Only provided for + // debugging. + disablePreparedBinaryResult bool + + // Whether to always send []byte parameters over as binary. Enables single + // round-trip mode for non-prepared Query calls. + binaryParameters bool +} + +// Handle driver-side settings in parsed connection string. +func (c *conn) handleDriverSettings(o values) (err error) { + boolSetting := func(key string, val *bool) error { + if value := o.Get(key); value != "" { + if value == "yes" { + *val = true + } else if value == "no" { + *val = false + } else { + return fmt.Errorf("unrecognized value %q for %s", value, key) + } + } + return nil + } + + err = boolSetting("disable_prepared_binary_result", &c.disablePreparedBinaryResult) + if err != nil { + return err + } + err = boolSetting("binary_parameters", &c.binaryParameters) + if err != nil { + return err + } + return nil +} + +func (c *conn) handlePgpass(o values) { + // if a password was supplied, do not process .pgpass + _, ok := o["password"] + if ok { + return + } + filename := os.Getenv("PGPASSFILE") + if filename == "" { + // XXX this code doesn't work on Windows where the default filename is + // XXX %APPDATA%\postgresql\pgpass.conf + user, err := user.Current() + if err != nil { + return + } + filename = filepath.Join(user.HomeDir, ".pgpass") + } + fileinfo, err := os.Stat(filename) + if err != nil { + return + } + mode := fileinfo.Mode() + if mode&(0x77) != 0 { + // XXX should warn about incorrect .pgpass permissions as psql does + return + } + file, err := os.Open(filename) + if err != nil { + return + } + defer file.Close() + scanner := bufio.NewScanner(io.Reader(file)) + hostname := o.Get("host") + ntw, _ := network(o) + port := o.Get("port") + db := o.Get("dbname") + username := o.Get("user") + // From: https://github.com/tg/pgpass/blob/master/reader.go + getFields := func(s string) []string { + fs := make([]string, 0, 5) + f := make([]rune, 0, len(s)) + + var esc bool + for _, c := range s { + switch { + case esc: + f = append(f, c) + esc = false + case c == '\\': + esc = true + case c == ':': + fs = append(fs, string(f)) + f = f[:0] + default: + f = append(f, c) + } + } + return append(fs, string(f)) + } + for scanner.Scan() { + line := scanner.Text() + if len(line) == 0 || line[0] == '#' { + continue + } + split := getFields(line) + if len(split) != 5 { + continue + } + if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) { + o["password"] = split[4] + return + } + } +} + +func (c *conn) writeBuf(b byte) *writeBuf { + c.scratch[0] = b + return &writeBuf{ + buf: c.scratch[:5], + pos: 1, + } +} + +func Open(name string) (_ driver.Conn, err error) { + return DialOpen(defaultDialer{}, name) +} + +func DialOpen(d Dialer, name string) (_ driver.Conn, err error) { + // Handle any panics during connection initialization. Note that we + // specifically do *not* want to use errRecover(), as that would turn any + // connection errors into ErrBadConns, hiding the real error message from + // the user. + defer errRecoverNoErrBadConn(&err) + + o := make(values) + + // A number of defaults are applied here, in this order: + // + // * Very low precedence defaults applied in every situation + // * Environment variables + // * Explicitly passed connection information + o.Set("host", "localhost") + o.Set("port", "5432") + // N.B.: Extra float digits should be set to 3, but that breaks + // Postgres 8.4 and older, where the max is 2. + o.Set("extra_float_digits", "2") + for k, v := range parseEnviron(os.Environ()) { + o.Set(k, v) + } + + if strings.HasPrefix(name, "postgres://") || strings.HasPrefix(name, "postgresql://") { + name, err = ParseURL(name) + if err != nil { + return nil, err + } + } + + if err := parseOpts(name, o); err != nil { + return nil, err + } + + // Use the "fallback" application name if necessary + if fallback := o.Get("fallback_application_name"); fallback != "" { + if !o.Isset("application_name") { + o.Set("application_name", fallback) + } + } + + // We can't work with any client_encoding other than UTF-8 currently. + // However, we have historically allowed the user to set it to UTF-8 + // explicitly, and there's no reason to break such programs, so allow that. + // Note that the "options" setting could also set client_encoding, but + // parsing its value is not worth it. Instead, we always explicitly send + // client_encoding as a separate run-time parameter, which should override + // anything set in options. + if enc := o.Get("client_encoding"); enc != "" && !isUTF8(enc) { + return nil, errors.New("client_encoding must be absent or 'UTF8'") + } + o.Set("client_encoding", "UTF8") + // DateStyle needs a similar treatment. + if datestyle := o.Get("datestyle"); datestyle != "" { + if datestyle != "ISO, MDY" { + panic(fmt.Sprintf("setting datestyle must be absent or %v; got %v", + "ISO, MDY", datestyle)) + } + } else { + o.Set("datestyle", "ISO, MDY") + } + + // If a user is not provided by any other means, the last + // resort is to use the current operating system provided user + // name. + if o.Get("user") == "" { + u, err := userCurrent() + if err != nil { + return nil, err + } else { + o.Set("user", u) + } + } + + cn := &conn{} + err = cn.handleDriverSettings(o) + if err != nil { + return nil, err + } + cn.handlePgpass(o) + + cn.c, err = dial(d, o) + if err != nil { + return nil, err + } + cn.ssl(o) + cn.buf = bufio.NewReader(cn.c) + cn.startup(o) + + // reset the deadline, in case one was set (see dial) + if timeout := o.Get("connect_timeout"); timeout != "" && timeout != "0" { + err = cn.c.SetDeadline(time.Time{}) + } + return cn, err +} + +func dial(d Dialer, o values) (net.Conn, error) { + ntw, addr := network(o) + // SSL is not necessary or supported over UNIX domain sockets + if ntw == "unix" { + o["sslmode"] = "disable" + } + + // Zero or not specified means wait indefinitely. + if timeout := o.Get("connect_timeout"); timeout != "" && timeout != "0" { + seconds, err := strconv.ParseInt(timeout, 10, 0) + if err != nil { + return nil, fmt.Errorf("invalid value for parameter connect_timeout: %s", err) + } + duration := time.Duration(seconds) * time.Second + // connect_timeout should apply to the entire connection establishment + // procedure, so we both use a timeout for the TCP connection + // establishment and set a deadline for doing the initial handshake. + // The deadline is then reset after startup() is done. + deadline := time.Now().Add(duration) + conn, err := d.DialTimeout(ntw, addr, duration) + if err != nil { + return nil, err + } + err = conn.SetDeadline(deadline) + return conn, err + } + return d.Dial(ntw, addr) +} + +func network(o values) (string, string) { + host := o.Get("host") + + if strings.HasPrefix(host, "/") { + sockPath := path.Join(host, ".s.PGSQL."+o.Get("port")) + return "unix", sockPath + } + + return "tcp", net.JoinHostPort(host, o.Get("port")) +} + +type values map[string]string + +func (vs values) Set(k, v string) { + vs[k] = v +} + +func (vs values) Get(k string) (v string) { + return vs[k] +} + +func (vs values) Isset(k string) bool { + _, ok := vs[k] + return ok +} + +// scanner implements a tokenizer for libpq-style option strings. +type scanner struct { + s []rune + i int +} + +// newScanner returns a new scanner initialized with the option string s. +func newScanner(s string) *scanner { + return &scanner{[]rune(s), 0} +} + +// Next returns the next rune. +// It returns 0, false if the end of the text has been reached. +func (s *scanner) Next() (rune, bool) { + if s.i >= len(s.s) { + return 0, false + } + r := s.s[s.i] + s.i++ + return r, true +} + +// SkipSpaces returns the next non-whitespace rune. +// It returns 0, false if the end of the text has been reached. +func (s *scanner) SkipSpaces() (rune, bool) { + r, ok := s.Next() + for unicode.IsSpace(r) && ok { + r, ok = s.Next() + } + return r, ok +} + +// parseOpts parses the options from name and adds them to the values. +// +// The parsing code is based on conninfo_parse from libpq's fe-connect.c +func parseOpts(name string, o values) error { + s := newScanner(name) + + for { + var ( + keyRunes, valRunes []rune + r rune + ok bool + ) + + if r, ok = s.SkipSpaces(); !ok { + break + } + + // Scan the key + for !unicode.IsSpace(r) && r != '=' { + keyRunes = append(keyRunes, r) + if r, ok = s.Next(); !ok { + break + } + } + + // Skip any whitespace if we're not at the = yet + if r != '=' { + r, ok = s.SkipSpaces() + } + + // The current character should be = + if r != '=' || !ok { + return fmt.Errorf(`missing "=" after %q in connection info string"`, string(keyRunes)) + } + + // Skip any whitespace after the = + if r, ok = s.SkipSpaces(); !ok { + // If we reach the end here, the last value is just an empty string as per libpq. + o.Set(string(keyRunes), "") + break + } + + if r != '\'' { + for !unicode.IsSpace(r) { + if r == '\\' { + if r, ok = s.Next(); !ok { + return fmt.Errorf(`missing character after backslash`) + } + } + valRunes = append(valRunes, r) + + if r, ok = s.Next(); !ok { + break + } + } + } else { + quote: + for { + if r, ok = s.Next(); !ok { + return fmt.Errorf(`unterminated quoted string literal in connection string`) + } + switch r { + case '\'': + break quote + case '\\': + r, _ = s.Next() + fallthrough + default: + valRunes = append(valRunes, r) + } + } + } + + o.Set(string(keyRunes), string(valRunes)) + } + + return nil +} + +func (cn *conn) isInTransaction() bool { + return cn.txnStatus == txnStatusIdleInTransaction || + cn.txnStatus == txnStatusInFailedTransaction +} + +func (cn *conn) checkIsInTransaction(intxn bool) { + if cn.isInTransaction() != intxn { + cn.bad = true + errorf("unexpected transaction status %v", cn.txnStatus) + } +} + +func (cn *conn) Begin() (_ driver.Tx, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + defer cn.errRecover(&err) + + cn.checkIsInTransaction(false) + _, commandTag, err := cn.simpleExec("BEGIN") + if err != nil { + return nil, err + } + if commandTag != "BEGIN" { + cn.bad = true + return nil, fmt.Errorf("unexpected command tag %s", commandTag) + } + if cn.txnStatus != txnStatusIdleInTransaction { + cn.bad = true + return nil, fmt.Errorf("unexpected transaction status %v", cn.txnStatus) + } + return cn, nil +} + +func (cn *conn) Commit() (err error) { + if cn.bad { + return driver.ErrBadConn + } + defer cn.errRecover(&err) + + cn.checkIsInTransaction(true) + // We don't want the client to think that everything is okay if it tries + // to commit a failed transaction. However, no matter what we return, + // database/sql will release this connection back into the free connection + // pool so we have to abort the current transaction here. Note that you + // would get the same behaviour if you issued a COMMIT in a failed + // transaction, so it's also the least surprising thing to do here. + if cn.txnStatus == txnStatusInFailedTransaction { + if err := cn.Rollback(); err != nil { + return err + } + return ErrInFailedTransaction + } + + _, commandTag, err := cn.simpleExec("COMMIT") + if err != nil { + if cn.isInTransaction() { + cn.bad = true + } + return err + } + if commandTag != "COMMIT" { + cn.bad = true + return fmt.Errorf("unexpected command tag %s", commandTag) + } + cn.checkIsInTransaction(false) + return nil +} + +func (cn *conn) Rollback() (err error) { + if cn.bad { + return driver.ErrBadConn + } + defer cn.errRecover(&err) + + cn.checkIsInTransaction(true) + _, commandTag, err := cn.simpleExec("ROLLBACK") + if err != nil { + if cn.isInTransaction() { + cn.bad = true + } + return err + } + if commandTag != "ROLLBACK" { + return fmt.Errorf("unexpected command tag %s", commandTag) + } + cn.checkIsInTransaction(false) + return nil +} + +func (cn *conn) gname() string { + cn.namei++ + return strconv.FormatInt(int64(cn.namei), 10) +} + +func (cn *conn) simpleExec(q string) (res driver.Result, commandTag string, err error) { + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'C': + res, commandTag = cn.parseComplete(r.string()) + case 'Z': + cn.processReadyForQuery(r) + // done + return + case 'E': + err = parseError(r) + case 'T', 'D', 'I': + // ignore any results + default: + cn.bad = true + errorf("unknown response for simple query: %q", t) + } + } +} + +func (cn *conn) simpleQuery(q string) (res *rows, err error) { + defer cn.errRecover(&err) + + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'C', 'I': + // We allow queries which don't return any results through Query as + // well as Exec. We still have to give database/sql a rows object + // the user can close, though, to avoid connections from being + // leaked. A "rows" with done=true works fine for that purpose. + if err != nil { + cn.bad = true + errorf("unexpected message %q in simple query execution", t) + } + if res == nil { + res = &rows{ + cn: cn, + } + } + res.done = true + case 'Z': + cn.processReadyForQuery(r) + // done + return + case 'E': + res = nil + err = parseError(r) + case 'D': + if res == nil { + cn.bad = true + errorf("unexpected DataRow in simple query execution") + } + // the query didn't fail; kick off to Next + cn.saveMessage(t, r) + return + case 'T': + // res might be non-nil here if we received a previous + // CommandComplete, but that's fine; just overwrite it + res = &rows{cn: cn} + res.colNames, res.colFmts, res.colTyps = parsePortalRowDescribe(r) + + // To work around a bug in QueryRow in Go 1.2 and earlier, wait + // until the first DataRow has been received. + default: + cn.bad = true + errorf("unknown response for simple query: %q", t) + } + } +} + +// Decides which column formats to use for a prepared statement. The input is +// an array of type oids, one element per result column. +func decideColumnFormats(colTyps []oid.Oid, forceText bool) (colFmts []format, colFmtData []byte) { + if len(colTyps) == 0 { + return nil, colFmtDataAllText + } + + colFmts = make([]format, len(colTyps)) + if forceText { + return colFmts, colFmtDataAllText + } + + allBinary := true + allText := true + for i, o := range colTyps { + switch o { + // This is the list of types to use binary mode for when receiving them + // through a prepared statement. If a type appears in this list, it + // must also be implemented in binaryDecode in encode.go. + case oid.T_bytea: + fallthrough + case oid.T_int8: + fallthrough + case oid.T_int4: + fallthrough + case oid.T_int2: + colFmts[i] = formatBinary + allText = false + + default: + allBinary = false + } + } + + if allBinary { + return colFmts, colFmtDataAllBinary + } else if allText { + return colFmts, colFmtDataAllText + } else { + colFmtData = make([]byte, 2+len(colFmts)*2) + binary.BigEndian.PutUint16(colFmtData, uint16(len(colFmts))) + for i, v := range colFmts { + binary.BigEndian.PutUint16(colFmtData[2+i*2:], uint16(v)) + } + return colFmts, colFmtData + } +} + +func (cn *conn) prepareTo(q, stmtName string) *stmt { + st := &stmt{cn: cn, name: stmtName} + + b := cn.writeBuf('P') + b.string(st.name) + b.string(q) + b.int16(0) + + b.next('D') + b.byte('S') + b.string(st.name) + + b.next('S') + cn.send(b) + + cn.readParseResponse() + st.paramTyps, st.colNames, st.colTyps = cn.readStatementDescribeResponse() + st.colFmts, st.colFmtData = decideColumnFormats(st.colTyps, cn.disablePreparedBinaryResult) + cn.readReadyForQuery() + return st +} + +func (cn *conn) Prepare(q string) (_ driver.Stmt, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + defer cn.errRecover(&err) + + if len(q) >= 4 && strings.EqualFold(q[:4], "COPY") { + return cn.prepareCopyIn(q) + } + return cn.prepareTo(q, cn.gname()), nil +} + +func (cn *conn) Close() (err error) { + if cn.bad { + return driver.ErrBadConn + } + defer cn.errRecover(&err) + + // Don't go through send(); ListenerConn relies on us not scribbling on the + // scratch buffer of this connection. + err = cn.sendSimpleMessage('X') + if err != nil { + return err + } + + return cn.c.Close() +} + +// Implement the "Queryer" interface +func (cn *conn) Query(query string, args []driver.Value) (_ driver.Rows, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + defer cn.errRecover(&err) + + // Check to see if we can use the "simpleQuery" interface, which is + // *much* faster than going through prepare/exec + if len(args) == 0 { + return cn.simpleQuery(query) + } + + if cn.binaryParameters { + cn.sendBinaryModeQuery(query, args) + + cn.readParseResponse() + cn.readBindResponse() + rows := &rows{cn: cn} + rows.colNames, rows.colFmts, rows.colTyps = cn.readPortalDescribeResponse() + cn.postExecuteWorkaround() + return rows, nil + } else { + st := cn.prepareTo(query, "") + st.exec(args) + return &rows{ + cn: cn, + colNames: st.colNames, + colTyps: st.colTyps, + colFmts: st.colFmts, + }, nil + } +} + +// Implement the optional "Execer" interface for one-shot queries +func (cn *conn) Exec(query string, args []driver.Value) (res driver.Result, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + defer cn.errRecover(&err) + + // Check to see if we can use the "simpleExec" interface, which is + // *much* faster than going through prepare/exec + if len(args) == 0 { + // ignore commandTag, our caller doesn't care + r, _, err := cn.simpleExec(query) + return r, err + } + + if cn.binaryParameters { + cn.sendBinaryModeQuery(query, args) + + cn.readParseResponse() + cn.readBindResponse() + cn.readPortalDescribeResponse() + cn.postExecuteWorkaround() + res, _, err = cn.readExecuteResponse("Execute") + return res, err + } else { + // Use the unnamed statement to defer planning until bind + // time, or else value-based selectivity estimates cannot be + // used. + st := cn.prepareTo(query, "") + r, err := st.Exec(args) + if err != nil { + panic(err) + } + return r, err + } +} + +func (cn *conn) send(m *writeBuf) { + _, err := cn.c.Write(m.wrap()) + if err != nil { + panic(err) + } +} + +func (cn *conn) sendStartupPacket(m *writeBuf) { + // sanity check + if m.buf[0] != 0 { + panic("oops") + } + + _, err := cn.c.Write((m.wrap())[1:]) + if err != nil { + panic(err) + } +} + +// Send a message of type typ to the server on the other end of cn. The +// message should have no payload. This method does not use the scratch +// buffer. +func (cn *conn) sendSimpleMessage(typ byte) (err error) { + _, err = cn.c.Write([]byte{typ, '\x00', '\x00', '\x00', '\x04'}) + return err +} + +// saveMessage memorizes a message and its buffer in the conn struct. +// recvMessage will then return these values on the next call to it. This +// method is useful in cases where you have to see what the next message is +// going to be (e.g. to see whether it's an error or not) but you can't handle +// the message yourself. +func (cn *conn) saveMessage(typ byte, buf *readBuf) { + if cn.saveMessageType != 0 { + cn.bad = true + errorf("unexpected saveMessageType %d", cn.saveMessageType) + } + cn.saveMessageType = typ + cn.saveMessageBuffer = *buf +} + +// recvMessage receives any message from the backend, or returns an error if +// a problem occurred while reading the message. +func (cn *conn) recvMessage(r *readBuf) (byte, error) { + // workaround for a QueryRow bug, see exec + if cn.saveMessageType != 0 { + t := cn.saveMessageType + *r = cn.saveMessageBuffer + cn.saveMessageType = 0 + cn.saveMessageBuffer = nil + return t, nil + } + + x := cn.scratch[:5] + _, err := io.ReadFull(cn.buf, x) + if err != nil { + return 0, err + } + + // read the type and length of the message that follows + t := x[0] + n := int(binary.BigEndian.Uint32(x[1:])) - 4 + var y []byte + if n <= len(cn.scratch) { + y = cn.scratch[:n] + } else { + y = make([]byte, n) + } + _, err = io.ReadFull(cn.buf, y) + if err != nil { + return 0, err + } + *r = y + return t, nil +} + +// recv receives a message from the backend, but if an error happened while +// reading the message or the received message was an ErrorResponse, it panics. +// NoticeResponses are ignored. This function should generally be used only +// during the startup sequence. +func (cn *conn) recv() (t byte, r *readBuf) { + for { + var err error + r = &readBuf{} + t, err = cn.recvMessage(r) + if err != nil { + panic(err) + } + + switch t { + case 'E': + panic(parseError(r)) + case 'N': + // ignore + default: + return + } + } +} + +// recv1Buf is exactly equivalent to recv1, except it uses a buffer supplied by +// the caller to avoid an allocation. +func (cn *conn) recv1Buf(r *readBuf) byte { + for { + t, err := cn.recvMessage(r) + if err != nil { + panic(err) + } + + switch t { + case 'A', 'N': + // ignore + case 'S': + cn.processParameterStatus(r) + default: + return t + } + } +} + +// recv1 receives a message from the backend, panicking if an error occurs +// while attempting to read it. All asynchronous messages are ignored, with +// the exception of ErrorResponse. +func (cn *conn) recv1() (t byte, r *readBuf) { + r = &readBuf{} + t = cn.recv1Buf(r) + return t, r +} + +func (cn *conn) ssl(o values) { + verifyCaOnly := false + tlsConf := tls.Config{} + switch mode := o.Get("sslmode"); mode { + case "require", "": + tlsConf.InsecureSkipVerify = true + case "verify-ca": + // We must skip TLS's own verification since it requires full + // verification since Go 1.3. + tlsConf.InsecureSkipVerify = true + verifyCaOnly = true + case "verify-full": + tlsConf.ServerName = o.Get("host") + case "disable": + return + default: + errorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode) + } + + cn.setupSSLClientCertificates(&tlsConf, o) + cn.setupSSLCA(&tlsConf, o) + + w := cn.writeBuf(0) + w.int32(80877103) + cn.sendStartupPacket(w) + + b := cn.scratch[:1] + _, err := io.ReadFull(cn.c, b) + if err != nil { + panic(err) + } + + if b[0] != 'S' { + panic(ErrSSLNotSupported) + } + + client := tls.Client(cn.c, &tlsConf) + if verifyCaOnly { + cn.verifyCA(client, &tlsConf) + } + cn.c = client +} + +// verifyCA carries out a TLS handshake to the server and verifies the +// presented certificate against the effective CA, i.e. the one specified in +// sslrootcert or the system CA if sslrootcert was not specified. +func (cn *conn) verifyCA(client *tls.Conn, tlsConf *tls.Config) { + err := client.Handshake() + if err != nil { + panic(err) + } + certs := client.ConnectionState().PeerCertificates + opts := x509.VerifyOptions{ + DNSName: client.ConnectionState().ServerName, + Intermediates: x509.NewCertPool(), + Roots: tlsConf.RootCAs, + } + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + _, err = certs[0].Verify(opts) + if err != nil { + panic(err) + } +} + +// This function sets up SSL client certificates based on either the "sslkey" +// and "sslcert" settings (possibly set via the environment variables PGSSLKEY +// and PGSSLCERT, respectively), or if they aren't set, from the .postgresql +// directory in the user's home directory. If the file paths are set +// explicitly, the files must exist. The key file must also not be +// world-readable, or this function will panic with +// ErrSSLKeyHasWorldPermissions. +func (cn *conn) setupSSLClientCertificates(tlsConf *tls.Config, o values) { + var missingOk bool + + sslkey := o.Get("sslkey") + sslcert := o.Get("sslcert") + if sslkey != "" && sslcert != "" { + // If the user has set an sslkey and sslcert, they *must* exist. + missingOk = false + } else { + // Automatically load certificates from ~/.postgresql. + user, err := user.Current() + if err != nil { + // user.Current() might fail when cross-compiling. We have to + // ignore the error and continue without client certificates, since + // we wouldn't know where to load them from. + return + } + + sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key") + sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt") + missingOk = true + } + + // Check that both files exist, and report the error or stop, depending on + // which behaviour we want. Note that we don't do any more extensive + // checks than this (such as checking that the paths aren't directories); + // LoadX509KeyPair() will take care of the rest. + keyfinfo, err := os.Stat(sslkey) + if err != nil && missingOk { + return + } else if err != nil { + panic(err) + } + _, err = os.Stat(sslcert) + if err != nil && missingOk { + return + } else if err != nil { + panic(err) + } + + // If we got this far, the key file must also have the correct permissions + kmode := keyfinfo.Mode() + if kmode != kmode&0600 { + panic(ErrSSLKeyHasWorldPermissions) + } + + cert, err := tls.LoadX509KeyPair(sslcert, sslkey) + if err != nil { + panic(err) + } + tlsConf.Certificates = []tls.Certificate{cert} +} + +// Sets up RootCAs in the TLS configuration if sslrootcert is set. +func (cn *conn) setupSSLCA(tlsConf *tls.Config, o values) { + if sslrootcert := o.Get("sslrootcert"); sslrootcert != "" { + tlsConf.RootCAs = x509.NewCertPool() + + cert, err := ioutil.ReadFile(sslrootcert) + if err != nil { + panic(err) + } + + ok := tlsConf.RootCAs.AppendCertsFromPEM(cert) + if !ok { + errorf("couldn't parse pem in sslrootcert") + } + } +} + +// isDriverSetting returns true iff a setting is purely for configuring the +// driver's options and should not be sent to the server in the connection +// startup packet. +func isDriverSetting(key string) bool { + switch key { + case "host", "port": + return true + case "password": + return true + case "sslmode", "sslcert", "sslkey", "sslrootcert": + return true + case "fallback_application_name": + return true + case "connect_timeout": + return true + case "disable_prepared_binary_result": + return true + case "binary_parameters": + return true + + default: + return false + } +} + +func (cn *conn) startup(o values) { + w := cn.writeBuf(0) + w.int32(196608) + // Send the backend the name of the database we want to connect to, and the + // user we want to connect as. Additionally, we send over any run-time + // parameters potentially included in the connection string. If the server + // doesn't recognize any of them, it will reply with an error. + for k, v := range o { + if isDriverSetting(k) { + // skip options which can't be run-time parameters + continue + } + // The protocol requires us to supply the database name as "database" + // instead of "dbname". + if k == "dbname" { + k = "database" + } + w.string(k) + w.string(v) + } + w.string("") + cn.sendStartupPacket(w) + + for { + t, r := cn.recv() + switch t { + case 'K': + case 'S': + cn.processParameterStatus(r) + case 'R': + cn.auth(r, o) + case 'Z': + cn.processReadyForQuery(r) + return + default: + errorf("unknown response for startup: %q", t) + } + } +} + +func (cn *conn) auth(r *readBuf, o values) { + switch code := r.int32(); code { + case 0: + // OK + case 3: + w := cn.writeBuf('p') + w.string(o.Get("password")) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 0 { + errorf("unexpected authentication response: %q", t) + } + case 5: + s := string(r.next(4)) + w := cn.writeBuf('p') + w.string("md5" + md5s(md5s(o.Get("password")+o.Get("user"))+s)) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 0 { + errorf("unexpected authentication response: %q", t) + } + default: + errorf("unknown authentication response: %d", code) + } +} + +type format int + +const formatText format = 0 +const formatBinary format = 1 + +// One result-column format code with the value 1 (i.e. all binary). +var colFmtDataAllBinary []byte = []byte{0, 1, 0, 1} + +// No result-column format codes (i.e. all text). +var colFmtDataAllText []byte = []byte{0, 0} + +type stmt struct { + cn *conn + name string + colNames []string + colFmts []format + colFmtData []byte + colTyps []oid.Oid + paramTyps []oid.Oid + closed bool +} + +func (st *stmt) Close() (err error) { + if st.closed { + return nil + } + if st.cn.bad { + return driver.ErrBadConn + } + defer st.cn.errRecover(&err) + + w := st.cn.writeBuf('C') + w.byte('S') + w.string(st.name) + st.cn.send(w) + + st.cn.send(st.cn.writeBuf('S')) + + t, _ := st.cn.recv1() + if t != '3' { + st.cn.bad = true + errorf("unexpected close response: %q", t) + } + st.closed = true + + t, r := st.cn.recv1() + if t != 'Z' { + st.cn.bad = true + errorf("expected ready for query, but got: %q", t) + } + st.cn.processReadyForQuery(r) + + return nil +} + +func (st *stmt) Query(v []driver.Value) (r driver.Rows, err error) { + if st.cn.bad { + return nil, driver.ErrBadConn + } + defer st.cn.errRecover(&err) + + st.exec(v) + return &rows{ + cn: st.cn, + colNames: st.colNames, + colTyps: st.colTyps, + colFmts: st.colFmts, + }, nil +} + +func (st *stmt) Exec(v []driver.Value) (res driver.Result, err error) { + if st.cn.bad { + return nil, driver.ErrBadConn + } + defer st.cn.errRecover(&err) + + st.exec(v) + res, _, err = st.cn.readExecuteResponse("simple query") + return res, err +} + +func (st *stmt) exec(v []driver.Value) { + if len(v) >= 65536 { + errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(v)) + } + if len(v) != len(st.paramTyps) { + errorf("got %d parameters but the statement requires %d", len(v), len(st.paramTyps)) + } + + cn := st.cn + w := cn.writeBuf('B') + w.byte(0) // unnamed portal + w.string(st.name) + + if cn.binaryParameters { + cn.sendBinaryParameters(w, v) + } else { + w.int16(0) + w.int16(len(v)) + for i, x := range v { + if x == nil { + w.int32(-1) + } else { + b := encode(&cn.parameterStatus, x, st.paramTyps[i]) + w.int32(len(b)) + w.bytes(b) + } + } + } + w.bytes(st.colFmtData) + + w.next('E') + w.byte(0) + w.int32(0) + + w.next('S') + cn.send(w) + + cn.readBindResponse() + cn.postExecuteWorkaround() + +} + +func (st *stmt) NumInput() int { + return len(st.paramTyps) +} + +// parseComplete parses the "command tag" from a CommandComplete message, and +// returns the number of rows affected (if applicable) and a string +// identifying only the command that was executed, e.g. "ALTER TABLE". If the +// command tag could not be parsed, parseComplete panics. +func (cn *conn) parseComplete(commandTag string) (driver.Result, string) { + commandsWithAffectedRows := []string{ + "SELECT ", + // INSERT is handled below + "UPDATE ", + "DELETE ", + "FETCH ", + "MOVE ", + "COPY ", + } + + var affectedRows *string + for _, tag := range commandsWithAffectedRows { + if strings.HasPrefix(commandTag, tag) { + t := commandTag[len(tag):] + affectedRows = &t + commandTag = tag[:len(tag)-1] + break + } + } + // INSERT also includes the oid of the inserted row in its command tag. + // Oids in user tables are deprecated, and the oid is only returned when + // exactly one row is inserted, so it's unlikely to be of value to any + // real-world application and we can ignore it. + if affectedRows == nil && strings.HasPrefix(commandTag, "INSERT ") { + parts := strings.Split(commandTag, " ") + if len(parts) != 3 { + cn.bad = true + errorf("unexpected INSERT command tag %s", commandTag) + } + affectedRows = &parts[len(parts)-1] + commandTag = "INSERT" + } + // There should be no affected rows attached to the tag, just return it + if affectedRows == nil { + return driver.RowsAffected(0), commandTag + } + n, err := strconv.ParseInt(*affectedRows, 10, 64) + if err != nil { + cn.bad = true + errorf("could not parse commandTag: %s", err) + } + return driver.RowsAffected(n), commandTag +} + +type rows struct { + cn *conn + colNames []string + colTyps []oid.Oid + colFmts []format + done bool + rb readBuf +} + +func (rs *rows) Close() error { + // no need to look at cn.bad as Next() will + for { + err := rs.Next(nil) + switch err { + case nil: + case io.EOF: + return nil + default: + return err + } + } +} + +func (rs *rows) Columns() []string { + return rs.colNames +} + +func (rs *rows) Next(dest []driver.Value) (err error) { + if rs.done { + return io.EOF + } + + conn := rs.cn + if conn.bad { + return driver.ErrBadConn + } + defer conn.errRecover(&err) + + for { + t := conn.recv1Buf(&rs.rb) + switch t { + case 'E': + err = parseError(&rs.rb) + case 'C', 'I': + continue + case 'Z': + conn.processReadyForQuery(&rs.rb) + rs.done = true + if err != nil { + return err + } + return io.EOF + case 'D': + n := rs.rb.int16() + if err != nil { + conn.bad = true + errorf("unexpected DataRow after error %s", err) + } + if n < len(dest) { + dest = dest[:n] + } + for i := range dest { + l := rs.rb.int32() + if l == -1 { + dest[i] = nil + continue + } + dest[i] = decode(&conn.parameterStatus, rs.rb.next(l), rs.colTyps[i], rs.colFmts[i]) + } + return + default: + errorf("unexpected message after execute: %q", t) + } + } +} + +// QuoteIdentifier quotes an "identifier" (e.g. a table or a column name) to be +// used as part of an SQL statement. For example: +// +// tblname := "my_table" +// data := "my_data" +// err = db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", pq.QuoteIdentifier(tblname)), data) +// +// Any double quotes in name will be escaped. The quoted identifier will be +// case sensitive when used in a query. If the input string contains a zero +// byte, the result will be truncated immediately before it. +func QuoteIdentifier(name string) string { + end := strings.IndexRune(name, 0) + if end > -1 { + name = name[:end] + } + return `"` + strings.Replace(name, `"`, `""`, -1) + `"` +} + +func md5s(s string) string { + h := md5.New() + h.Write([]byte(s)) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +func (cn *conn) sendBinaryParameters(b *writeBuf, args []driver.Value) { + // Do one pass over the parameters to see if we're going to send any of + // them over in binary. If we are, create a paramFormats array at the + // same time. + var paramFormats []int + for i, x := range args { + _, ok := x.([]byte) + if ok { + if paramFormats == nil { + paramFormats = make([]int, len(args)) + } + paramFormats[i] = 1 + } + } + if paramFormats == nil { + b.int16(0) + } else { + b.int16(len(paramFormats)) + for _, x := range paramFormats { + b.int16(x) + } + } + + b.int16(len(args)) + for _, x := range args { + if x == nil { + b.int32(-1) + } else { + datum := binaryEncode(&cn.parameterStatus, x) + b.int32(len(datum)) + b.bytes(datum) + } + } +} + +func (cn *conn) sendBinaryModeQuery(query string, args []driver.Value) { + if len(args) >= 65536 { + errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(args)) + } + + b := cn.writeBuf('P') + b.byte(0) // unnamed statement + b.string(query) + b.int16(0) + + b.next('B') + b.int16(0) // unnamed portal and statement + cn.sendBinaryParameters(b, args) + b.bytes(colFmtDataAllText) + + b.next('D') + b.byte('P') + b.byte(0) // unnamed portal + + b.next('E') + b.byte(0) + b.int32(0) + + b.next('S') + cn.send(b) +} + +func (c *conn) processParameterStatus(r *readBuf) { + var err error + + param := r.string() + switch param { + case "server_version": + var major1 int + var major2 int + var minor int + _, err = fmt.Sscanf(r.string(), "%d.%d.%d", &major1, &major2, &minor) + if err == nil { + c.parameterStatus.serverVersion = major1*10000 + major2*100 + minor + } + + case "TimeZone": + c.parameterStatus.currentLocation, err = time.LoadLocation(r.string()) + if err != nil { + c.parameterStatus.currentLocation = nil + } + + default: + // ignore + } +} + +func (c *conn) processReadyForQuery(r *readBuf) { + c.txnStatus = transactionStatus(r.byte()) +} + +func (cn *conn) readReadyForQuery() { + t, r := cn.recv1() + switch t { + case 'Z': + cn.processReadyForQuery(r) + return + default: + cn.bad = true + errorf("unexpected message %q; expected ReadyForQuery", t) + } +} + +func (cn *conn) readParseResponse() { + t, r := cn.recv1() + switch t { + case '1': + return + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Parse response %q", t) + } +} + +func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames []string, colTyps []oid.Oid) { + for { + t, r := cn.recv1() + switch t { + case 't': + nparams := r.int16() + paramTyps = make([]oid.Oid, nparams) + for i := range paramTyps { + paramTyps[i] = r.oid() + } + case 'n': + return paramTyps, nil, nil + case 'T': + colNames, colTyps = parseStatementRowDescribe(r) + return paramTyps, colNames, colTyps + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Describe statement response %q", t) + } + } +} + +func (cn *conn) readPortalDescribeResponse() (colNames []string, colFmts []format, colTyps []oid.Oid) { + t, r := cn.recv1() + switch t { + case 'T': + return parsePortalRowDescribe(r) + case 'n': + return nil, nil, nil + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Describe response %q", t) + } + panic("not reached") +} + +func (cn *conn) readBindResponse() { + t, r := cn.recv1() + switch t { + case '2': + return + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Bind response %q", t) + } +} + +func (cn *conn) postExecuteWorkaround() { + // Work around a bug in sql.DB.QueryRow: in Go 1.2 and earlier it ignores + // any errors from rows.Next, which masks errors that happened during the + // execution of the query. To avoid the problem in common cases, we wait + // here for one more message from the database. If it's not an error the + // query will likely succeed (or perhaps has already, if it's a + // CommandComplete), so we push the message into the conn struct; recv1 + // will return it as the next message for rows.Next or rows.Close. + // However, if it's an error, we wait until ReadyForQuery and then return + // the error to our caller. + for { + t, r := cn.recv1() + switch t { + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + case 'C', 'D', 'I': + // the query didn't fail, but we can't process this message + cn.saveMessage(t, r) + return + default: + cn.bad = true + errorf("unexpected message during extended query execution: %q", t) + } + } +} + +// Only for Exec(), since we ignore the returned data +func (cn *conn) readExecuteResponse(protocolState string) (res driver.Result, commandTag string, err error) { + for { + t, r := cn.recv1() + switch t { + case 'C': + if err != nil { + cn.bad = true + errorf("unexpected CommandComplete after error %s", err) + } + res, commandTag = cn.parseComplete(r.string()) + case 'Z': + cn.processReadyForQuery(r) + return res, commandTag, err + case 'E': + err = parseError(r) + case 'T', 'D', 'I': + if err != nil { + cn.bad = true + errorf("unexpected %q after error %s", t, err) + } + // ignore any results + default: + cn.bad = true + errorf("unknown %s response: %q", protocolState, t) + } + } +} + +func parseStatementRowDescribe(r *readBuf) (colNames []string, colTyps []oid.Oid) { + n := r.int16() + colNames = make([]string, n) + colTyps = make([]oid.Oid, n) + for i := range colNames { + colNames[i] = r.string() + r.next(6) + colTyps[i] = r.oid() + r.next(6) + // format code not known when describing a statement; always 0 + r.next(2) + } + return +} + +func parsePortalRowDescribe(r *readBuf) (colNames []string, colFmts []format, colTyps []oid.Oid) { + n := r.int16() + colNames = make([]string, n) + colFmts = make([]format, n) + colTyps = make([]oid.Oid, n) + for i := range colNames { + colNames[i] = r.string() + r.next(6) + colTyps[i] = r.oid() + r.next(6) + colFmts[i] = format(r.int16()) + } + return +} + +// parseEnviron tries to mimic some of libpq's environment handling +// +// To ease testing, it does not directly reference os.Environ, but is +// designed to accept its output. +// +// Environment-set connection information is intended to have a higher +// precedence than a library default but lower than any explicitly +// passed information (such as in the URL or connection string). +func parseEnviron(env []string) (out map[string]string) { + out = make(map[string]string) + + for _, v := range env { + parts := strings.SplitN(v, "=", 2) + + accrue := func(keyname string) { + out[keyname] = parts[1] + } + unsupported := func() { + panic(fmt.Sprintf("setting %v not supported", parts[0])) + } + + // The order of these is the same as is seen in the + // PostgreSQL 9.1 manual. Unsupported but well-defined + // keys cause a panic; these should be unset prior to + // execution. Options which pq expects to be set to a + // certain value are allowed, but must be set to that + // value if present (they can, of course, be absent). + switch parts[0] { + case "PGHOST": + accrue("host") + case "PGHOSTADDR": + unsupported() + case "PGPORT": + accrue("port") + case "PGDATABASE": + accrue("dbname") + case "PGUSER": + accrue("user") + case "PGPASSWORD": + accrue("password") + case "PGSERVICE", "PGSERVICEFILE", "PGREALM": + unsupported() + case "PGOPTIONS": + accrue("options") + case "PGAPPNAME": + accrue("application_name") + case "PGSSLMODE": + accrue("sslmode") + case "PGSSLCERT": + accrue("sslcert") + case "PGSSLKEY": + accrue("sslkey") + case "PGSSLROOTCERT": + accrue("sslrootcert") + case "PGREQUIRESSL", "PGSSLCRL": + unsupported() + case "PGREQUIREPEER": + unsupported() + case "PGKRBSRVNAME", "PGGSSLIB": + unsupported() + case "PGCONNECT_TIMEOUT": + accrue("connect_timeout") + case "PGCLIENTENCODING": + accrue("client_encoding") + case "PGDATESTYLE": + accrue("datestyle") + case "PGTZ": + accrue("timezone") + case "PGGEQO": + accrue("geqo") + case "PGSYSCONFDIR", "PGLOCALEDIR": + unsupported() + } + } + + return out +} + +// isUTF8 returns whether name is a fuzzy variation of the string "UTF-8". +func isUTF8(name string) bool { + // Recognize all sorts of silly things as "UTF-8", like Postgres does + s := strings.Map(alnumLowerASCII, name) + return s == "utf8" || s == "unicode" +} + +func alnumLowerASCII(ch rune) rune { + if 'A' <= ch && ch <= 'Z' { + return ch + ('a' - 'A') + } + if 'a' <= ch && ch <= 'z' || '0' <= ch && ch <= '9' { + return ch + } + return -1 // discard +} diff --git a/services/templeton/vendor/src/github.com/lib/pq/conn_test.go b/services/templeton/vendor/src/github.com/lib/pq/conn_test.go new file mode 100644 index 000000000..2639c8efd --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/conn_test.go @@ -0,0 +1,1433 @@ +package pq + +import ( + "database/sql" + "database/sql/driver" + "fmt" + "io" + "os" + "reflect" + "strings" + "testing" + "time" +) + +type Fatalistic interface { + Fatal(args ...interface{}) +} + +func forceBinaryParameters() bool { + bp := os.Getenv("PQTEST_BINARY_PARAMETERS") + if bp == "yes" { + return true + } else if bp == "" || bp == "no" { + return false + } else { + panic("unexpected value for PQTEST_BINARY_PARAMETERS") + } +} + +func openTestConnConninfo(conninfo string) (*sql.DB, error) { + defaultTo := func(envvar string, value string) { + if os.Getenv(envvar) == "" { + os.Setenv(envvar, value) + } + } + defaultTo("PGDATABASE", "pqgotest") + defaultTo("PGSSLMODE", "disable") + defaultTo("PGCONNECT_TIMEOUT", "20") + + if forceBinaryParameters() && + !strings.HasPrefix(conninfo, "postgres://") && + !strings.HasPrefix(conninfo, "postgresql://") { + conninfo = conninfo + " binary_parameters=yes" + } + + return sql.Open("postgres", conninfo) +} + +func openTestConn(t Fatalistic) *sql.DB { + conn, err := openTestConnConninfo("") + if err != nil { + t.Fatal(err) + } + + return conn +} + +func getServerVersion(t *testing.T, db *sql.DB) int { + var version int + err := db.QueryRow("SHOW server_version_num").Scan(&version) + if err != nil { + t.Fatal(err) + } + return version +} + +func TestReconnect(t *testing.T) { + db1 := openTestConn(t) + defer db1.Close() + tx, err := db1.Begin() + if err != nil { + t.Fatal(err) + } + var pid1 int + err = tx.QueryRow("SELECT pg_backend_pid()").Scan(&pid1) + if err != nil { + t.Fatal(err) + } + db2 := openTestConn(t) + defer db2.Close() + _, err = db2.Exec("SELECT pg_terminate_backend($1)", pid1) + if err != nil { + t.Fatal(err) + } + // The rollback will probably "fail" because we just killed + // its connection above + _ = tx.Rollback() + + const expected int = 42 + var result int + err = db1.QueryRow(fmt.Sprintf("SELECT %d", expected)).Scan(&result) + if err != nil { + t.Fatal(err) + } + if result != expected { + t.Errorf("got %v; expected %v", result, expected) + } +} + +func TestCommitInFailedTransaction(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + rows, err := txn.Query("SELECT error") + if err == nil { + rows.Close() + t.Fatal("expected failure") + } + err = txn.Commit() + if err != ErrInFailedTransaction { + t.Fatalf("expected ErrInFailedTransaction; got %#v", err) + } +} + +func TestOpenURL(t *testing.T) { + testURL := func(url string) { + db, err := openTestConnConninfo(url) + if err != nil { + t.Fatal(err) + } + defer db.Close() + // database/sql might not call our Open at all unless we do something with + // the connection + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + txn.Rollback() + } + testURL("postgres://") + testURL("postgresql://") +} + +const pgpass_file = "/tmp/pqgotest_pgpass" +func TestPgpass(t *testing.T) { + testAssert := func(conninfo string, expected string, reason string) { + conn, err := openTestConnConninfo(conninfo) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + txn, err := conn.Begin() + if err != nil { + if expected != "fail" { + t.Fatalf(reason, err) + } + return + } + rows, err := txn.Query("SELECT USER") + if err != nil { + txn.Rollback() + rows.Close() + if expected != "fail" { + t.Fatalf(reason, err) + } + } else { + if expected != "ok" { + t.Fatalf(reason, err) + } + } + txn.Rollback() + } + testAssert("", "ok", "missing .pgpass, unexpected error %#v") + os.Setenv("PGPASSFILE", pgpass_file) + testAssert("host=/tmp", "fail", ", unexpected error %#v") + os.Remove(pgpass_file) + pgpass, err := os.OpenFile(pgpass_file, os.O_RDWR|os.O_CREATE, 0644) + if err != nil { + t.Fatalf("Unexpected error writing pgpass file %#v", err) + } + _, err = pgpass.WriteString(`# comment +server:5432:some_db:some_user:pass_A +*:5432:some_db:some_user:pass_B +localhost:*:*:*:pass_C +*:*:*:*:pass_fallback +`) + if err != nil { + t.Fatalf("Unexpected error writing pgpass file %#v", err) + } + pgpass.Close() + + assertPassword := func(extra values, expected string) { + o := &values{"host": "localhost", "sslmode": "disable", "connect_timeout": "20", "user": "majid", "port": "5432", "extra_float_digits": "2", "dbname": "pqgotest", "client_encoding": "UTF8", "datestyle": "ISO, MDY"} + for k, v := range extra { + (*o)[k] = v + } + (&conn{}).handlePgpass(*o) + if o.Get("password") != expected { + t.Fatalf("For %v expected %s got %s", extra, expected, o.Get("password")) + } + } + // wrong permissions for the pgpass file means it should be ignored + assertPassword(values{"host": "example.com", "user": "foo"}, "") + // fix the permissions and check if it has taken effect + os.Chmod(pgpass_file, 0600) + assertPassword(values{"host": "server", "dbname": "some_db", "user": "some_user"}, "pass_A") + assertPassword(values{"host": "example.com", "user": "foo"}, "pass_fallback") + assertPassword(values{"host": "example.com", "dbname": "some_db", "user": "some_user"}, "pass_B") + // localhost also matches the default "" and UNIX sockets + assertPassword(values{"host": "", "user": "some_user"}, "pass_C") + assertPassword(values{"host": "/tmp", "user": "some_user"}, "pass_C") + // cleanup + os.Remove(pgpass_file) + os.Setenv("PGPASSFILE", "") +} + +func TestExec(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Exec("CREATE TEMP TABLE temp (a int)") + if err != nil { + t.Fatal(err) + } + + r, err := db.Exec("INSERT INTO temp VALUES (1)") + if err != nil { + t.Fatal(err) + } + + if n, _ := r.RowsAffected(); n != 1 { + t.Fatalf("expected 1 row affected, not %d", n) + } + + r, err = db.Exec("INSERT INTO temp VALUES ($1), ($2), ($3)", 1, 2, 3) + if err != nil { + t.Fatal(err) + } + + if n, _ := r.RowsAffected(); n != 3 { + t.Fatalf("expected 3 rows affected, not %d", n) + } + + // SELECT doesn't send the number of returned rows in the command tag + // before 9.0 + if getServerVersion(t, db) >= 90000 { + r, err = db.Exec("SELECT g FROM generate_series(1, 2) g") + if err != nil { + t.Fatal(err) + } + if n, _ := r.RowsAffected(); n != 2 { + t.Fatalf("expected 2 rows affected, not %d", n) + } + + r, err = db.Exec("SELECT g FROM generate_series(1, $1) g", 3) + if err != nil { + t.Fatal(err) + } + if n, _ := r.RowsAffected(); n != 3 { + t.Fatalf("expected 3 rows affected, not %d", n) + } + } +} + +func TestStatment(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + st, err := db.Prepare("SELECT 1") + if err != nil { + t.Fatal(err) + } + + st1, err := db.Prepare("SELECT 2") + if err != nil { + t.Fatal(err) + } + + r, err := st.Query() + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if !r.Next() { + t.Fatal("expected row") + } + + var i int + err = r.Scan(&i) + if err != nil { + t.Fatal(err) + } + + if i != 1 { + t.Fatalf("expected 1, got %d", i) + } + + // st1 + + r1, err := st1.Query() + if err != nil { + t.Fatal(err) + } + defer r1.Close() + + if !r1.Next() { + if r.Err() != nil { + t.Fatal(r1.Err()) + } + t.Fatal("expected row") + } + + err = r1.Scan(&i) + if err != nil { + t.Fatal(err) + } + + if i != 2 { + t.Fatalf("expected 2, got %d", i) + } +} + +func TestRowsCloseBeforeDone(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + r, err := db.Query("SELECT 1") + if err != nil { + t.Fatal(err) + } + + err = r.Close() + if err != nil { + t.Fatal(err) + } + + if r.Next() { + t.Fatal("unexpected row") + } + + if r.Err() != nil { + t.Fatal(r.Err()) + } +} + +func TestParameterCountMismatch(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + var notused int + err := db.QueryRow("SELECT false", 1).Scan(¬used) + if err == nil { + t.Fatal("expected err") + } + // make sure we clean up correctly + err = db.QueryRow("SELECT 1").Scan(¬used) + if err != nil { + t.Fatal(err) + } + + err = db.QueryRow("SELECT $1").Scan(¬used) + if err == nil { + t.Fatal("expected err") + } + // make sure we clean up correctly + err = db.QueryRow("SELECT 1").Scan(¬used) + if err != nil { + t.Fatal(err) + } +} + +// Test that EmptyQueryResponses are handled correctly. +func TestEmptyQuery(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Exec("") + if err != nil { + t.Fatal(err) + } + rows, err := db.Query("") + if err != nil { + t.Fatal(err) + } + cols, err := rows.Columns() + if err != nil { + t.Fatal(err) + } + if len(cols) != 0 { + t.Fatalf("unexpected number of columns %d in response to an empty query", len(cols)) + } + if rows.Next() { + t.Fatal("unexpected row") + } + if rows.Err() != nil { + t.Fatal(rows.Err()) + } + + stmt, err := db.Prepare("") + if err != nil { + t.Fatal(err) + } + _, err = stmt.Exec() + if err != nil { + t.Fatal(err) + } + rows, err = stmt.Query() + if err != nil { + t.Fatal(err) + } + cols, err = rows.Columns() + if err != nil { + t.Fatal(err) + } + if len(cols) != 0 { + t.Fatalf("unexpected number of columns %d in response to an empty query", len(cols)) + } + if rows.Next() { + t.Fatal("unexpected row") + } + if rows.Err() != nil { + t.Fatal(rows.Err()) + } +} + +// Test that rows.Columns() is correct even if there are no result rows. +func TestEmptyResultSetColumns(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + rows, err := db.Query("SELECT 1 AS a, text 'bar' AS bar WHERE FALSE") + if err != nil { + t.Fatal(err) + } + cols, err := rows.Columns() + if err != nil { + t.Fatal(err) + } + if len(cols) != 2 { + t.Fatalf("unexpected number of columns %d in response to an empty query", len(cols)) + } + if rows.Next() { + t.Fatal("unexpected row") + } + if rows.Err() != nil { + t.Fatal(rows.Err()) + } + if cols[0] != "a" || cols[1] != "bar" { + t.Fatalf("unexpected Columns result %v", cols) + } + + stmt, err := db.Prepare("SELECT $1::int AS a, text 'bar' AS bar WHERE FALSE") + if err != nil { + t.Fatal(err) + } + rows, err = stmt.Query(1) + if err != nil { + t.Fatal(err) + } + cols, err = rows.Columns() + if err != nil { + t.Fatal(err) + } + if len(cols) != 2 { + t.Fatalf("unexpected number of columns %d in response to an empty query", len(cols)) + } + if rows.Next() { + t.Fatal("unexpected row") + } + if rows.Err() != nil { + t.Fatal(rows.Err()) + } + if cols[0] != "a" || cols[1] != "bar" { + t.Fatalf("unexpected Columns result %v", cols) + } + +} + +func TestEncodeDecode(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + q := ` + SELECT + E'\\000\\001\\002'::bytea, + 'foobar'::text, + NULL::integer, + '2000-1-1 01:02:03.04-7'::timestamptz, + 0::boolean, + 123, + -321, + 3.14::float8 + WHERE + E'\\000\\001\\002'::bytea = $1 + AND 'foobar'::text = $2 + AND $3::integer is NULL + ` + // AND '2000-1-1 12:00:00.000000-7'::timestamp = $3 + + exp1 := []byte{0, 1, 2} + exp2 := "foobar" + + r, err := db.Query(q, exp1, exp2, nil) + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if !r.Next() { + if r.Err() != nil { + t.Fatal(r.Err()) + } + t.Fatal("expected row") + } + + var got1 []byte + var got2 string + var got3 = sql.NullInt64{Valid: true} + var got4 time.Time + var got5, got6, got7, got8 interface{} + + err = r.Scan(&got1, &got2, &got3, &got4, &got5, &got6, &got7, &got8) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(exp1, got1) { + t.Errorf("expected %q byte: %q", exp1, got1) + } + + if !reflect.DeepEqual(exp2, got2) { + t.Errorf("expected %q byte: %q", exp2, got2) + } + + if got3.Valid { + t.Fatal("expected invalid") + } + + if got4.Year() != 2000 { + t.Fatal("wrong year") + } + + if got5 != false { + t.Fatalf("expected false, got %q", got5) + } + + if got6 != int64(123) { + t.Fatalf("expected 123, got %d", got6) + } + + if got7 != int64(-321) { + t.Fatalf("expected -321, got %d", got7) + } + + if got8 != float64(3.14) { + t.Fatalf("expected 3.14, got %f", got8) + } +} + +func TestNoData(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + st, err := db.Prepare("SELECT 1 WHERE true = false") + if err != nil { + t.Fatal(err) + } + defer st.Close() + + r, err := st.Query() + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if r.Next() { + if r.Err() != nil { + t.Fatal(r.Err()) + } + t.Fatal("unexpected row") + } + + _, err = db.Query("SELECT * FROM nonexistenttable WHERE age=$1", 20) + if err == nil { + t.Fatal("Should have raised an error on non existent table") + } + + _, err = db.Query("SELECT * FROM nonexistenttable") + if err == nil { + t.Fatal("Should have raised an error on non existent table") + } +} + +func TestErrorDuringStartup(t *testing.T) { + // Don't use the normal connection setup, this is intended to + // blow up in the startup packet from a non-existent user. + db, err := openTestConnConninfo("user=thisuserreallydoesntexist") + if err != nil { + t.Fatal(err) + } + defer db.Close() + + _, err = db.Begin() + if err == nil { + t.Fatal("expected error") + } + + e, ok := err.(*Error) + if !ok { + t.Fatalf("expected Error, got %#v", err) + } else if e.Code.Name() != "invalid_authorization_specification" && e.Code.Name() != "invalid_password" { + t.Fatalf("expected invalid_authorization_specification or invalid_password, got %s (%+v)", e.Code.Name(), err) + } +} + +func TestBadConn(t *testing.T) { + var err error + + cn := conn{} + func() { + defer cn.errRecover(&err) + panic(io.EOF) + }() + if err != driver.ErrBadConn { + t.Fatalf("expected driver.ErrBadConn, got: %#v", err) + } + if !cn.bad { + t.Fatalf("expected cn.bad") + } + + cn = conn{} + func() { + defer cn.errRecover(&err) + e := &Error{Severity: Efatal} + panic(e) + }() + if err != driver.ErrBadConn { + t.Fatalf("expected driver.ErrBadConn, got: %#v", err) + } + if !cn.bad { + t.Fatalf("expected cn.bad") + } +} + +func TestErrorOnExec(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMPORARY TABLE foo(f1 int PRIMARY KEY)") + if err != nil { + t.Fatal(err) + } + + _, err = txn.Exec("INSERT INTO foo VALUES (0), (0)") + if err == nil { + t.Fatal("Should have raised error") + } + + e, ok := err.(*Error) + if !ok { + t.Fatalf("expected Error, got %#v", err) + } else if e.Code.Name() != "unique_violation" { + t.Fatalf("expected unique_violation, got %s (%+v)", e.Code.Name(), err) + } +} + +func TestErrorOnQuery(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMPORARY TABLE foo(f1 int PRIMARY KEY)") + if err != nil { + t.Fatal(err) + } + + _, err = txn.Query("INSERT INTO foo VALUES (0), (0)") + if err == nil { + t.Fatal("Should have raised error") + } + + e, ok := err.(*Error) + if !ok { + t.Fatalf("expected Error, got %#v", err) + } else if e.Code.Name() != "unique_violation" { + t.Fatalf("expected unique_violation, got %s (%+v)", e.Code.Name(), err) + } +} + +func TestErrorOnQueryRowSimpleQuery(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMPORARY TABLE foo(f1 int PRIMARY KEY)") + if err != nil { + t.Fatal(err) + } + + var v int + err = txn.QueryRow("INSERT INTO foo VALUES (0), (0)").Scan(&v) + if err == nil { + t.Fatal("Should have raised error") + } + + e, ok := err.(*Error) + if !ok { + t.Fatalf("expected Error, got %#v", err) + } else if e.Code.Name() != "unique_violation" { + t.Fatalf("expected unique_violation, got %s (%+v)", e.Code.Name(), err) + } +} + +// Test the QueryRow bug workarounds in stmt.exec() and simpleQuery() +func TestQueryRowBugWorkaround(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + // stmt.exec() + _, err := db.Exec("CREATE TEMP TABLE notnulltemp (a varchar(10) not null)") + if err != nil { + t.Fatal(err) + } + + var a string + err = db.QueryRow("INSERT INTO notnulltemp(a) values($1) RETURNING a", nil).Scan(&a) + if err == sql.ErrNoRows { + t.Fatalf("expected constraint violation error; got: %v", err) + } + pge, ok := err.(*Error) + if !ok { + t.Fatalf("expected *Error; got: %#v", err) + } + if pge.Code.Name() != "not_null_violation" { + t.Fatalf("expected not_null_violation; got: %s (%+v)", pge.Code.Name(), err) + } + + // Test workaround in simpleQuery() + tx, err := db.Begin() + if err != nil { + t.Fatalf("unexpected error %s in Begin", err) + } + defer tx.Rollback() + + _, err = tx.Exec("SET LOCAL check_function_bodies TO FALSE") + if err != nil { + t.Fatalf("could not disable check_function_bodies: %s", err) + } + _, err = tx.Exec(` +CREATE OR REPLACE FUNCTION bad_function() +RETURNS integer +-- hack to prevent the function from being inlined +SET check_function_bodies TO TRUE +AS $$ + SELECT text 'bad' +$$ LANGUAGE sql`) + if err != nil { + t.Fatalf("could not create function: %s", err) + } + + err = tx.QueryRow("SELECT * FROM bad_function()").Scan(&a) + if err == nil { + t.Fatalf("expected error") + } + pge, ok = err.(*Error) + if !ok { + t.Fatalf("expected *Error; got: %#v", err) + } + if pge.Code.Name() != "invalid_function_definition" { + t.Fatalf("expected invalid_function_definition; got: %s (%+v)", pge.Code.Name(), err) + } + + err = tx.Rollback() + if err != nil { + t.Fatalf("unexpected error %s in Rollback", err) + } + + // Also test that simpleQuery()'s workaround works when the query fails + // after a row has been received. + rows, err := db.Query(` +select + (select generate_series(1, ss.i)) +from (select gs.i + from generate_series(1, 2) gs(i) + order by gs.i limit 2) ss`) + if err != nil { + t.Fatalf("query failed: %s", err) + } + if !rows.Next() { + t.Fatalf("expected at least one result row; got %s", rows.Err()) + } + var i int + err = rows.Scan(&i) + if err != nil { + t.Fatalf("rows.Scan() failed: %s", err) + } + if i != 1 { + t.Fatalf("unexpected value for i: %d", i) + } + if rows.Next() { + t.Fatalf("unexpected row") + } + pge, ok = rows.Err().(*Error) + if !ok { + t.Fatalf("expected *Error; got: %#v", err) + } + if pge.Code.Name() != "cardinality_violation" { + t.Fatalf("expected cardinality_violation; got: %s (%+v)", pge.Code.Name(), rows.Err()) + } +} + +func TestSimpleQuery(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + r, err := db.Query("select 1") + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if !r.Next() { + t.Fatal("expected row") + } +} + +func TestBindError(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Exec("create temp table test (i integer)") + if err != nil { + t.Fatal(err) + } + + _, err = db.Query("select * from test where i=$1", "hhh") + if err == nil { + t.Fatal("expected an error") + } + + // Should not get error here + r, err := db.Query("select * from test where i=$1", 1) + if err != nil { + t.Fatal(err) + } + defer r.Close() +} + +func TestParseErrorInExtendedQuery(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + rows, err := db.Query("PARSE_ERROR $1", 1) + if err == nil { + t.Fatal("expected error") + } + + rows, err = db.Query("SELECT 1") + if err != nil { + t.Fatal(err) + } + rows.Close() +} + +// TestReturning tests that an INSERT query using the RETURNING clause returns a row. +func TestReturning(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Exec("CREATE TEMP TABLE distributors (did integer default 0, dname text)") + if err != nil { + t.Fatal(err) + } + + rows, err := db.Query("INSERT INTO distributors (did, dname) VALUES (DEFAULT, 'XYZ Widgets') " + + "RETURNING did;") + if err != nil { + t.Fatal(err) + } + if !rows.Next() { + t.Fatal("no rows") + } + var did int + err = rows.Scan(&did) + if err != nil { + t.Fatal(err) + } + if did != 0 { + t.Fatalf("bad value for did: got %d, want %d", did, 0) + } + + if rows.Next() { + t.Fatal("unexpected next row") + } + err = rows.Err() + if err != nil { + t.Fatal(err) + } +} + +func TestIssue186(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + // Exec() a query which returns results + _, err := db.Exec("VALUES (1), (2), (3)") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("VALUES ($1), ($2), ($3)", 1, 2, 3) + if err != nil { + t.Fatal(err) + } + + // Query() a query which doesn't return any results + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + rows, err := txn.Query("CREATE TEMP TABLE foo(f1 int)") + if err != nil { + t.Fatal(err) + } + if err = rows.Close(); err != nil { + t.Fatal(err) + } + + // small trick to get NoData from a parameterized query + _, err = txn.Exec("CREATE RULE nodata AS ON INSERT TO foo DO INSTEAD NOTHING") + if err != nil { + t.Fatal(err) + } + rows, err = txn.Query("INSERT INTO foo VALUES ($1)", 1) + if err != nil { + t.Fatal(err) + } + if err = rows.Close(); err != nil { + t.Fatal(err) + } +} + +func TestIssue196(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + row := db.QueryRow("SELECT float4 '0.10000122' = $1, float8 '35.03554004971999' = $2", + float32(0.10000122), float64(35.03554004971999)) + + var float4match, float8match bool + err := row.Scan(&float4match, &float8match) + if err != nil { + t.Fatal(err) + } + if !float4match { + t.Errorf("Expected float4 fidelity to be maintained; got no match") + } + if !float8match { + t.Errorf("Expected float8 fidelity to be maintained; got no match") + } +} + +// Test that any CommandComplete messages sent before the query results are +// ignored. +func TestIssue282(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + var search_path string + err := db.QueryRow(` + SET LOCAL search_path TO pg_catalog; + SET LOCAL search_path TO pg_catalog; + SHOW search_path`).Scan(&search_path) + if err != nil { + t.Fatal(err) + } + if search_path != "pg_catalog" { + t.Fatalf("unexpected search_path %s", search_path) + } +} + +func TestReadFloatPrecision(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + row := db.QueryRow("SELECT float4 '0.10000122', float8 '35.03554004971999'") + var float4val float32 + var float8val float64 + err := row.Scan(&float4val, &float8val) + if err != nil { + t.Fatal(err) + } + if float4val != float32(0.10000122) { + t.Errorf("Expected float4 fidelity to be maintained; got no match") + } + if float8val != float64(35.03554004971999) { + t.Errorf("Expected float8 fidelity to be maintained; got no match") + } +} + +func TestXactMultiStmt(t *testing.T) { + // minified test case based on bug reports from + // pico303@gmail.com and rangelspam@gmail.com + t.Skip("Skipping failing test") + db := openTestConn(t) + defer db.Close() + + tx, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer tx.Commit() + + rows, err := tx.Query("select 1") + if err != nil { + t.Fatal(err) + } + + if rows.Next() { + var val int32 + if err = rows.Scan(&val); err != nil { + t.Fatal(err) + } + } else { + t.Fatal("Expected at least one row in first query in xact") + } + + rows2, err := tx.Query("select 2") + if err != nil { + t.Fatal(err) + } + + if rows2.Next() { + var val2 int32 + if err := rows2.Scan(&val2); err != nil { + t.Fatal(err) + } + } else { + t.Fatal("Expected at least one row in second query in xact") + } + + if err = rows.Err(); err != nil { + t.Fatal(err) + } + + if err = rows2.Err(); err != nil { + t.Fatal(err) + } + + if err = tx.Commit(); err != nil { + t.Fatal(err) + } +} + +var envParseTests = []struct { + Expected map[string]string + Env []string +}{ + { + Env: []string{"PGDATABASE=hello", "PGUSER=goodbye"}, + Expected: map[string]string{"dbname": "hello", "user": "goodbye"}, + }, + { + Env: []string{"PGDATESTYLE=ISO, MDY"}, + Expected: map[string]string{"datestyle": "ISO, MDY"}, + }, + { + Env: []string{"PGCONNECT_TIMEOUT=30"}, + Expected: map[string]string{"connect_timeout": "30"}, + }, +} + +func TestParseEnviron(t *testing.T) { + for i, tt := range envParseTests { + results := parseEnviron(tt.Env) + if !reflect.DeepEqual(tt.Expected, results) { + t.Errorf("%d: Expected: %#v Got: %#v", i, tt.Expected, results) + } + } +} + +func TestParseComplete(t *testing.T) { + tpc := func(commandTag string, command string, affectedRows int64, shouldFail bool) { + defer func() { + if p := recover(); p != nil { + if !shouldFail { + t.Error(p) + } + } + }() + cn := &conn{} + res, c := cn.parseComplete(commandTag) + if c != command { + t.Errorf("Expected %v, got %v", command, c) + } + n, err := res.RowsAffected() + if err != nil { + t.Fatal(err) + } + if n != affectedRows { + t.Errorf("Expected %d, got %d", affectedRows, n) + } + } + + tpc("ALTER TABLE", "ALTER TABLE", 0, false) + tpc("INSERT 0 1", "INSERT", 1, false) + tpc("UPDATE 100", "UPDATE", 100, false) + tpc("SELECT 100", "SELECT", 100, false) + tpc("FETCH 100", "FETCH", 100, false) + // allow COPY (and others) without row count + tpc("COPY", "COPY", 0, false) + // don't fail on command tags we don't recognize + tpc("UNKNOWNCOMMANDTAG", "UNKNOWNCOMMANDTAG", 0, false) + + // failure cases + tpc("INSERT 1", "", 0, true) // missing oid + tpc("UPDATE 0 1", "", 0, true) // too many numbers + tpc("SELECT foo", "", 0, true) // invalid row count +} + +func TestExecerInterface(t *testing.T) { + // Gin up a straw man private struct just for the type check + cn := &conn{c: nil} + var cni interface{} = cn + + _, ok := cni.(driver.Execer) + if !ok { + t.Fatal("Driver doesn't implement Execer") + } +} + +func TestNullAfterNonNull(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + r, err := db.Query("SELECT 9::integer UNION SELECT NULL::integer") + if err != nil { + t.Fatal(err) + } + + var n sql.NullInt64 + + if !r.Next() { + if r.Err() != nil { + t.Fatal(err) + } + t.Fatal("expected row") + } + + if err := r.Scan(&n); err != nil { + t.Fatal(err) + } + + if n.Int64 != 9 { + t.Fatalf("expected 2, not %d", n.Int64) + } + + if !r.Next() { + if r.Err() != nil { + t.Fatal(err) + } + t.Fatal("expected row") + } + + if err := r.Scan(&n); err != nil { + t.Fatal(err) + } + + if n.Valid { + t.Fatal("expected n to be invalid") + } + + if n.Int64 != 0 { + t.Fatalf("expected n to 2, not %d", n.Int64) + } +} + +func Test64BitErrorChecking(t *testing.T) { + defer func() { + if err := recover(); err != nil { + t.Fatal("panic due to 0xFFFFFFFF != -1 " + + "when int is 64 bits") + } + }() + + db := openTestConn(t) + defer db.Close() + + r, err := db.Query(`SELECT * +FROM (VALUES (0::integer, NULL::text), (1, 'test string')) AS t;`) + + if err != nil { + t.Fatal(err) + } + + defer r.Close() + + for r.Next() { + } +} + +func TestCommit(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Exec("CREATE TEMP TABLE temp (a int)") + if err != nil { + t.Fatal(err) + } + sqlInsert := "INSERT INTO temp VALUES (1)" + sqlSelect := "SELECT * FROM temp" + tx, err := db.Begin() + if err != nil { + t.Fatal(err) + } + _, err = tx.Exec(sqlInsert) + if err != nil { + t.Fatal(err) + } + err = tx.Commit() + if err != nil { + t.Fatal(err) + } + var i int + err = db.QueryRow(sqlSelect).Scan(&i) + if err != nil { + t.Fatal(err) + } + if i != 1 { + t.Fatalf("expected 1, got %d", i) + } +} + +func TestErrorClass(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Query("SELECT int 'notint'") + if err == nil { + t.Fatal("expected error") + } + pge, ok := err.(*Error) + if !ok { + t.Fatalf("expected *pq.Error, got %#+v", err) + } + if pge.Code.Class() != "22" { + t.Fatalf("expected class 28, got %v", pge.Code.Class()) + } + if pge.Code.Class().Name() != "data_exception" { + t.Fatalf("expected data_exception, got %v", pge.Code.Class().Name()) + } +} + +func TestParseOpts(t *testing.T) { + tests := []struct { + in string + expected values + valid bool + }{ + {"dbname=hello user=goodbye", values{"dbname": "hello", "user": "goodbye"}, true}, + {"dbname=hello user=goodbye ", values{"dbname": "hello", "user": "goodbye"}, true}, + {"dbname = hello user=goodbye", values{"dbname": "hello", "user": "goodbye"}, true}, + {"dbname=hello user =goodbye", values{"dbname": "hello", "user": "goodbye"}, true}, + {"dbname=hello user= goodbye", values{"dbname": "hello", "user": "goodbye"}, true}, + {"host=localhost password='correct horse battery staple'", values{"host": "localhost", "password": "correct horse battery staple"}, true}, + {"dbname=データベース password=パスワード", values{"dbname": "データベース", "password": "パスワード"}, true}, + {"dbname=hello user=''", values{"dbname": "hello", "user": ""}, true}, + {"user='' dbname=hello", values{"dbname": "hello", "user": ""}, true}, + // The last option value is an empty string if there's no non-whitespace after its = + {"dbname=hello user= ", values{"dbname": "hello", "user": ""}, true}, + + // The parser ignores spaces after = and interprets the next set of non-whitespace characters as the value. + {"user= password=foo", values{"user": "password=foo"}, true}, + + // Backslash escapes next char + {`user=a\ \'\\b`, values{"user": `a '\b`}, true}, + {`user='a \'b'`, values{"user": `a 'b`}, true}, + + // Incomplete escape + {`user=x\`, values{}, false}, + + // No '=' after the key + {"postgre://marko@internet", values{}, false}, + {"dbname user=goodbye", values{}, false}, + {"user=foo blah", values{}, false}, + {"user=foo blah ", values{}, false}, + + // Unterminated quoted value + {"dbname=hello user='unterminated", values{}, false}, + } + + for _, test := range tests { + o := make(values) + err := parseOpts(test.in, o) + + switch { + case err != nil && test.valid: + t.Errorf("%q got unexpected error: %s", test.in, err) + case err == nil && test.valid && !reflect.DeepEqual(test.expected, o): + t.Errorf("%q got: %#v want: %#v", test.in, o, test.expected) + case err == nil && !test.valid: + t.Errorf("%q expected an error", test.in) + } + } +} + +func TestRuntimeParameters(t *testing.T) { + type RuntimeTestResult int + const ( + ResultUnknown RuntimeTestResult = iota + ResultSuccess + ResultError // other error + ) + + tests := []struct { + conninfo string + param string + expected string + expectedOutcome RuntimeTestResult + }{ + // invalid parameter + {"DOESNOTEXIST=foo", "", "", ResultError}, + // we can only work with a specific value for these two + {"client_encoding=SQL_ASCII", "", "", ResultError}, + {"datestyle='ISO, YDM'", "", "", ResultError}, + // "options" should work exactly as it does in libpq + {"options='-c search_path=pqgotest'", "search_path", "pqgotest", ResultSuccess}, + // pq should override client_encoding in this case + {"options='-c client_encoding=SQL_ASCII'", "client_encoding", "UTF8", ResultSuccess}, + // allow client_encoding to be set explicitly + {"client_encoding=UTF8", "client_encoding", "UTF8", ResultSuccess}, + // test a runtime parameter not supported by libpq + {"work_mem='139kB'", "work_mem", "139kB", ResultSuccess}, + // test fallback_application_name + {"application_name=foo fallback_application_name=bar", "application_name", "foo", ResultSuccess}, + {"application_name='' fallback_application_name=bar", "application_name", "", ResultSuccess}, + {"fallback_application_name=bar", "application_name", "bar", ResultSuccess}, + } + + for _, test := range tests { + db, err := openTestConnConninfo(test.conninfo) + if err != nil { + t.Fatal(err) + } + + // application_name didn't exist before 9.0 + if test.param == "application_name" && getServerVersion(t, db) < 90000 { + db.Close() + continue + } + + tryGetParameterValue := func() (value string, outcome RuntimeTestResult) { + defer db.Close() + row := db.QueryRow("SELECT current_setting($1)", test.param) + err = row.Scan(&value) + if err != nil { + return "", ResultError + } + return value, ResultSuccess + } + + value, outcome := tryGetParameterValue() + if outcome != test.expectedOutcome && outcome == ResultError { + t.Fatalf("%v: unexpected error: %v", test.conninfo, err) + } + if outcome != test.expectedOutcome { + t.Fatalf("unexpected outcome %v (was expecting %v) for conninfo \"%s\"", + outcome, test.expectedOutcome, test.conninfo) + } + if value != test.expected { + t.Fatalf("bad value for %s: got %s, want %s with conninfo \"%s\"", + test.param, value, test.expected, test.conninfo) + } + } +} + +func TestIsUTF8(t *testing.T) { + var cases = []struct { + name string + want bool + }{ + {"unicode", true}, + {"utf-8", true}, + {"utf_8", true}, + {"UTF-8", true}, + {"UTF8", true}, + {"utf8", true}, + {"u n ic_ode", true}, + {"ut_f%8", true}, + {"ubf8", false}, + {"punycode", false}, + } + + for _, test := range cases { + if g := isUTF8(test.name); g != test.want { + t.Errorf("isUTF8(%q) = %v want %v", test.name, g, test.want) + } + } +} + +func TestQuoteIdentifier(t *testing.T) { + var cases = []struct { + input string + want string + }{ + {`foo`, `"foo"`}, + {`foo bar baz`, `"foo bar baz"`}, + {`foo"bar`, `"foo""bar"`}, + {"foo\x00bar", `"foo"`}, + {"\x00foo", `""`}, + } + + for _, test := range cases { + got := QuoteIdentifier(test.input) + if got != test.want { + t.Errorf("QuoteIdentifier(%q) = %v want %v", test.input, got, test.want) + } + } +} diff --git a/services/templeton/vendor/src/github.com/lib/pq/copy.go b/services/templeton/vendor/src/github.com/lib/pq/copy.go new file mode 100644 index 000000000..101f11133 --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/copy.go @@ -0,0 +1,267 @@ +package pq + +import ( + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "sync" +) + +var ( + errCopyInClosed = errors.New("pq: copyin statement has already been closed") + errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY") + errCopyToNotSupported = errors.New("pq: COPY TO is not supported") + errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction") +) + +// CopyIn creates a COPY FROM statement which can be prepared with +// Tx.Prepare(). The target table should be visible in search_path. +func CopyIn(table string, columns ...string) string { + stmt := "COPY " + QuoteIdentifier(table) + " (" + for i, col := range columns { + if i != 0 { + stmt += ", " + } + stmt += QuoteIdentifier(col) + } + stmt += ") FROM STDIN" + return stmt +} + +// CopyInSchema creates a COPY FROM statement which can be prepared with +// Tx.Prepare(). +func CopyInSchema(schema, table string, columns ...string) string { + stmt := "COPY " + QuoteIdentifier(schema) + "." + QuoteIdentifier(table) + " (" + for i, col := range columns { + if i != 0 { + stmt += ", " + } + stmt += QuoteIdentifier(col) + } + stmt += ") FROM STDIN" + return stmt +} + +type copyin struct { + cn *conn + buffer []byte + rowData chan []byte + done chan bool + + closed bool + + sync.Mutex // guards err + err error +} + +const ciBufferSize = 64 * 1024 + +// flush buffer before the buffer is filled up and needs reallocation +const ciBufferFlushSize = 63 * 1024 + +func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) { + if !cn.isInTransaction() { + return nil, errCopyNotSupportedOutsideTxn + } + + ci := ©in{ + cn: cn, + buffer: make([]byte, 0, ciBufferSize), + rowData: make(chan []byte), + done: make(chan bool, 1), + } + // add CopyData identifier + 4 bytes for message length + ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0) + + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + +awaitCopyInResponse: + for { + t, r := cn.recv1() + switch t { + case 'G': + if r.byte() != 0 { + err = errBinaryCopyNotSupported + break awaitCopyInResponse + } + go ci.resploop() + return ci, nil + case 'H': + err = errCopyToNotSupported + break awaitCopyInResponse + case 'E': + err = parseError(r) + case 'Z': + if err == nil { + cn.bad = true + errorf("unexpected ReadyForQuery in response to COPY") + } + cn.processReadyForQuery(r) + return nil, err + default: + cn.bad = true + errorf("unknown response for copy query: %q", t) + } + } + + // something went wrong, abort COPY before we return + b = cn.writeBuf('f') + b.string(err.Error()) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'c', 'C', 'E': + case 'Z': + // correctly aborted, we're done + cn.processReadyForQuery(r) + return nil, err + default: + cn.bad = true + errorf("unknown response for CopyFail: %q", t) + } + } +} + +func (ci *copyin) flush(buf []byte) { + // set message length (without message identifier) + binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1)) + + _, err := ci.cn.c.Write(buf) + if err != nil { + panic(err) + } +} + +func (ci *copyin) resploop() { + for { + var r readBuf + t, err := ci.cn.recvMessage(&r) + if err != nil { + ci.cn.bad = true + ci.setError(err) + ci.done <- true + return + } + switch t { + case 'C': + // complete + case 'N': + // NoticeResponse + case 'Z': + ci.cn.processReadyForQuery(&r) + ci.done <- true + return + case 'E': + err := parseError(&r) + ci.setError(err) + default: + ci.cn.bad = true + ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t)) + ci.done <- true + return + } + } +} + +func (ci *copyin) isErrorSet() bool { + ci.Lock() + isSet := (ci.err != nil) + ci.Unlock() + return isSet +} + +// setError() sets ci.err if one has not been set already. Caller must not be +// holding ci.Mutex. +func (ci *copyin) setError(err error) { + ci.Lock() + if ci.err == nil { + ci.err = err + } + ci.Unlock() +} + +func (ci *copyin) NumInput() int { + return -1 +} + +func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) { + return nil, ErrNotSupported +} + +// Exec inserts values into the COPY stream. The insert is asynchronous +// and Exec can return errors from previous Exec calls to the same +// COPY stmt. +// +// You need to call Exec(nil) to sync the COPY stream and to get any +// errors from pending data, since Stmt.Close() doesn't return errors +// to the user. +func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) { + if ci.closed { + return nil, errCopyInClosed + } + + if ci.cn.bad { + return nil, driver.ErrBadConn + } + defer ci.cn.errRecover(&err) + + if ci.isErrorSet() { + return nil, ci.err + } + + if len(v) == 0 { + return nil, ci.Close() + } + + numValues := len(v) + for i, value := range v { + ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value) + if i < numValues-1 { + ci.buffer = append(ci.buffer, '\t') + } + } + + ci.buffer = append(ci.buffer, '\n') + + if len(ci.buffer) > ciBufferFlushSize { + ci.flush(ci.buffer) + // reset buffer, keep bytes for message identifier and length + ci.buffer = ci.buffer[:5] + } + + return driver.RowsAffected(0), nil +} + +func (ci *copyin) Close() (err error) { + if ci.closed { // Don't do anything, we're already closed + return nil + } + ci.closed = true + + if ci.cn.bad { + return driver.ErrBadConn + } + defer ci.cn.errRecover(&err) + + if len(ci.buffer) > 0 { + ci.flush(ci.buffer) + } + // Avoid touching the scratch buffer as resploop could be using it. + err = ci.cn.sendSimpleMessage('c') + if err != nil { + return err + } + + <-ci.done + + if ci.isErrorSet() { + err = ci.err + return err + } + return nil +} diff --git a/services/templeton/vendor/src/github.com/lib/pq/copy_test.go b/services/templeton/vendor/src/github.com/lib/pq/copy_test.go new file mode 100644 index 000000000..86745b38f --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/copy_test.go @@ -0,0 +1,465 @@ +package pq + +import ( + "bytes" + "database/sql" + "database/sql/driver" + "strings" + "testing" +) + +func TestCopyInStmt(t *testing.T) { + var stmt string + stmt = CopyIn("table name") + if stmt != `COPY "table name" () FROM STDIN` { + t.Fatal(stmt) + } + + stmt = CopyIn("table name", "column 1", "column 2") + if stmt != `COPY "table name" ("column 1", "column 2") FROM STDIN` { + t.Fatal(stmt) + } + + stmt = CopyIn(`table " name """`, `co"lumn""`) + if stmt != `COPY "table "" name """"""" ("co""lumn""""") FROM STDIN` { + t.Fatal(stmt) + } +} + +func TestCopyInSchemaStmt(t *testing.T) { + var stmt string + stmt = CopyInSchema("schema name", "table name") + if stmt != `COPY "schema name"."table name" () FROM STDIN` { + t.Fatal(stmt) + } + + stmt = CopyInSchema("schema name", "table name", "column 1", "column 2") + if stmt != `COPY "schema name"."table name" ("column 1", "column 2") FROM STDIN` { + t.Fatal(stmt) + } + + stmt = CopyInSchema(`schema " name """`, `table " name """`, `co"lumn""`) + if stmt != `COPY "schema "" name """"""".`+ + `"table "" name """"""" ("co""lumn""""") FROM STDIN` { + t.Fatal(stmt) + } +} + +func TestCopyInMultipleValues(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (a int, b varchar)") + if err != nil { + t.Fatal(err) + } + + stmt, err := txn.Prepare(CopyIn("temp", "a", "b")) + if err != nil { + t.Fatal(err) + } + + longString := strings.Repeat("#", 500) + + for i := 0; i < 500; i++ { + _, err = stmt.Exec(int64(i), longString) + if err != nil { + t.Fatal(err) + } + } + + _, err = stmt.Exec() + if err != nil { + t.Fatal(err) + } + + err = stmt.Close() + if err != nil { + t.Fatal(err) + } + + var num int + err = txn.QueryRow("SELECT COUNT(*) FROM temp").Scan(&num) + if err != nil { + t.Fatal(err) + } + + if num != 500 { + t.Fatalf("expected 500 items, not %d", num) + } +} + +func TestCopyInRaiseStmtTrigger(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + if getServerVersion(t, db) < 90000 { + var exists int + err := db.QueryRow("SELECT 1 FROM pg_language WHERE lanname = 'plpgsql'").Scan(&exists) + if err == sql.ErrNoRows { + t.Skip("language PL/PgSQL does not exist; skipping TestCopyInRaiseStmtTrigger") + } else if err != nil { + t.Fatal(err) + } + } + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (a int, b varchar)") + if err != nil { + t.Fatal(err) + } + + _, err = txn.Exec(` + CREATE OR REPLACE FUNCTION pg_temp.temptest() + RETURNS trigger AS + $BODY$ begin + raise notice 'Hello world'; + return new; + end $BODY$ + LANGUAGE plpgsql`) + if err != nil { + t.Fatal(err) + } + + _, err = txn.Exec(` + CREATE TRIGGER temptest_trigger + BEFORE INSERT + ON temp + FOR EACH ROW + EXECUTE PROCEDURE pg_temp.temptest()`) + if err != nil { + t.Fatal(err) + } + + stmt, err := txn.Prepare(CopyIn("temp", "a", "b")) + if err != nil { + t.Fatal(err) + } + + longString := strings.Repeat("#", 500) + + _, err = stmt.Exec(int64(1), longString) + if err != nil { + t.Fatal(err) + } + + _, err = stmt.Exec() + if err != nil { + t.Fatal(err) + } + + err = stmt.Close() + if err != nil { + t.Fatal(err) + } + + var num int + err = txn.QueryRow("SELECT COUNT(*) FROM temp").Scan(&num) + if err != nil { + t.Fatal(err) + } + + if num != 1 { + t.Fatalf("expected 1 items, not %d", num) + } +} + +func TestCopyInTypes(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER, text VARCHAR, blob BYTEA, nothing VARCHAR)") + if err != nil { + t.Fatal(err) + } + + stmt, err := txn.Prepare(CopyIn("temp", "num", "text", "blob", "nothing")) + if err != nil { + t.Fatal(err) + } + + _, err = stmt.Exec(int64(1234567890), "Héllö\n ☃!\r\t\\", []byte{0, 255, 9, 10, 13}, nil) + if err != nil { + t.Fatal(err) + } + + _, err = stmt.Exec() + if err != nil { + t.Fatal(err) + } + + err = stmt.Close() + if err != nil { + t.Fatal(err) + } + + var num int + var text string + var blob []byte + var nothing sql.NullString + + err = txn.QueryRow("SELECT * FROM temp").Scan(&num, &text, &blob, ¬hing) + if err != nil { + t.Fatal(err) + } + + if num != 1234567890 { + t.Fatal("unexpected result", num) + } + if text != "Héllö\n ☃!\r\t\\" { + t.Fatal("unexpected result", text) + } + if bytes.Compare(blob, []byte{0, 255, 9, 10, 13}) != 0 { + t.Fatal("unexpected result", blob) + } + if nothing.Valid { + t.Fatal("unexpected result", nothing.String) + } +} + +func TestCopyInWrongType(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER)") + if err != nil { + t.Fatal(err) + } + + stmt, err := txn.Prepare(CopyIn("temp", "num")) + if err != nil { + t.Fatal(err) + } + defer stmt.Close() + + _, err = stmt.Exec("Héllö\n ☃!\r\t\\") + if err != nil { + t.Fatal(err) + } + + _, err = stmt.Exec() + if err == nil { + t.Fatal("expected error") + } + if pge := err.(*Error); pge.Code.Name() != "invalid_text_representation" { + t.Fatalf("expected 'invalid input syntax for integer' error, got %s (%+v)", pge.Code.Name(), pge) + } +} + +func TestCopyOutsideOfTxnError(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + _, err := db.Prepare(CopyIn("temp", "num")) + if err == nil { + t.Fatal("COPY outside of transaction did not return an error") + } + if err != errCopyNotSupportedOutsideTxn { + t.Fatalf("expected %s, got %s", err, err.Error()) + } +} + +func TestCopyInBinaryError(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER)") + if err != nil { + t.Fatal(err) + } + _, err = txn.Prepare("COPY temp (num) FROM STDIN WITH binary") + if err != errBinaryCopyNotSupported { + t.Fatalf("expected %s, got %+v", errBinaryCopyNotSupported, err) + } + // check that the protocol is in a valid state + err = txn.Rollback() + if err != nil { + t.Fatal(err) + } +} + +func TestCopyFromError(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER)") + if err != nil { + t.Fatal(err) + } + _, err = txn.Prepare("COPY temp (num) TO STDOUT") + if err != errCopyToNotSupported { + t.Fatalf("expected %s, got %+v", errCopyToNotSupported, err) + } + // check that the protocol is in a valid state + err = txn.Rollback() + if err != nil { + t.Fatal(err) + } +} + +func TestCopySyntaxError(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Prepare("COPY ") + if err == nil { + t.Fatal("expected error") + } + if pge := err.(*Error); pge.Code.Name() != "syntax_error" { + t.Fatalf("expected syntax error, got %s (%+v)", pge.Code.Name(), pge) + } + // check that the protocol is in a valid state + err = txn.Rollback() + if err != nil { + t.Fatal(err) + } +} + +// Tests for connection errors in copyin.resploop() +func TestCopyRespLoopConnectionError(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + var pid int + err = txn.QueryRow("SELECT pg_backend_pid()").Scan(&pid) + if err != nil { + t.Fatal(err) + } + + _, err = txn.Exec("CREATE TEMP TABLE temp (a int)") + if err != nil { + t.Fatal(err) + } + + stmt, err := txn.Prepare(CopyIn("temp", "a")) + if err != nil { + t.Fatal(err) + } + defer stmt.Close() + + _, err = db.Exec("SELECT pg_terminate_backend($1)", pid) + if err != nil { + t.Fatal(err) + } + + if getServerVersion(t, db) < 90500 { + // We have to try and send something over, since postgres before + // version 9.5 won't process SIGTERMs while it's waiting for + // CopyData/CopyEnd messages; see tcop/postgres.c. + _, err = stmt.Exec(1) + if err != nil { + t.Fatal(err) + } + } + _, err = stmt.Exec() + if err == nil { + t.Fatalf("expected error") + } + pge, ok := err.(*Error) + if !ok { + if err == driver.ErrBadConn { + // likely an EPIPE + } else { + t.Fatalf("expected *pq.Error or driver.ErrBadConn, got %+#v", err) + } + } else if pge.Code.Name() != "admin_shutdown" { + t.Fatalf("expected admin_shutdown, got %s", pge.Code.Name()) + } + + _ = stmt.Close() +} + +func BenchmarkCopyIn(b *testing.B) { + db := openTestConn(b) + defer db.Close() + + txn, err := db.Begin() + if err != nil { + b.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("CREATE TEMP TABLE temp (a int, b varchar)") + if err != nil { + b.Fatal(err) + } + + stmt, err := txn.Prepare(CopyIn("temp", "a", "b")) + if err != nil { + b.Fatal(err) + } + + for i := 0; i < b.N; i++ { + _, err = stmt.Exec(int64(i), "hello world!") + if err != nil { + b.Fatal(err) + } + } + + _, err = stmt.Exec() + if err != nil { + b.Fatal(err) + } + + err = stmt.Close() + if err != nil { + b.Fatal(err) + } + + var num int + err = txn.QueryRow("SELECT COUNT(*) FROM temp").Scan(&num) + if err != nil { + b.Fatal(err) + } + + if num != b.N { + b.Fatalf("expected %d items, not %d", b.N, num) + } +} diff --git a/services/templeton/vendor/src/github.com/lib/pq/doc.go b/services/templeton/vendor/src/github.com/lib/pq/doc.go new file mode 100644 index 000000000..19798dfc9 --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/doc.go @@ -0,0 +1,212 @@ +/* +Package pq is a pure Go Postgres driver for the database/sql package. + +In most cases clients will use the database/sql package instead of +using this package directly. For example: + + import ( + "database/sql" + + _ "github.com/lib/pq" + ) + + func main() { + db, err := sql.Open("postgres", "user=pqgotest dbname=pqgotest sslmode=verify-full") + if err != nil { + log.Fatal(err) + } + + age := 21 + rows, err := db.Query("SELECT name FROM users WHERE age = $1", age) + … + } + +You can also connect to a database using a URL. For example: + + db, err := sql.Open("postgres", "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full") + + +Connection String Parameters + + +Similarly to libpq, when establishing a connection using pq you are expected to +supply a connection string containing zero or more parameters. +A subset of the connection parameters supported by libpq are also supported by pq. +Additionally, pq also lets you specify run-time parameters (such as search_path or work_mem) +directly in the connection string. This is different from libpq, which does not allow +run-time parameters in the connection string, instead requiring you to supply +them in the options parameter. + +For compatibility with libpq, the following special connection parameters are +supported: + + * dbname - The name of the database to connect to + * user - The user to sign in as + * password - The user's password + * host - The host to connect to. Values that start with / are for unix domain sockets. (default is localhost) + * port - The port to bind to. (default is 5432) + * sslmode - Whether or not to use SSL (default is require, this is not the default for libpq) + * fallback_application_name - An application_name to fall back to if one isn't provided. + * connect_timeout - Maximum wait for connection, in seconds. Zero or not specified means wait indefinitely. + * sslcert - Cert file location. The file must contain PEM encoded data. + * sslkey - Key file location. The file must contain PEM encoded data. + * sslrootcert - The location of the root certificate file. The file must contain PEM encoded data. + +Valid values for sslmode are: + + * disable - No SSL + * require - Always SSL (skip verification) + * verify-ca - Always SSL (verify that the certificate presented by the server was signed by a trusted CA) + * verify-full - Always SSL (verify that the certification presented by the server was signed by a trusted CA and the server host name matches the one in the certificate) + +See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING +for more information about connection string parameters. + +Use single quotes for values that contain whitespace: + + "user=pqgotest password='with spaces'" + +A backslash will escape the next character in values: + + "user=space\ man password='it\'s valid' + +Note that the connection parameter client_encoding (which sets the +text encoding for the connection) may be set but must be "UTF8", +matching with the same rules as Postgres. It is an error to provide +any other value. + +In addition to the parameters listed above, any run-time parameter that can be +set at backend start time can be set in the connection string. For more +information, see +http://www.postgresql.org/docs/current/static/runtime-config.html. + +Most environment variables as specified at http://www.postgresql.org/docs/current/static/libpq-envars.html +supported by libpq are also supported by pq. If any of the environment +variables not supported by pq are set, pq will panic during connection +establishment. Environment variables have a lower precedence than explicitly +provided connection parameters. + +The pgpass mechanism as described in http://www.postgresql.org/docs/current/static/libpq-pgpass.html +is supported, but on Windows PGPASSFILE must be specified explicitly. + +Queries + +database/sql does not dictate any specific format for parameter +markers in query strings, and pq uses the Postgres-native ordinal markers, +as shown above. The same marker can be reused for the same parameter: + + rows, err := db.Query(`SELECT name FROM users WHERE favorite_fruit = $1 + OR age BETWEEN $2 AND $2 + 3`, "orange", 64) + +pq does not support the LastInsertId() method of the Result type in database/sql. +To return the identifier of an INSERT (or UPDATE or DELETE), use the Postgres +RETURNING clause with a standard Query or QueryRow call: + + var userid int + err := db.QueryRow(`INSERT INTO users(name, favorite_fruit, age) + VALUES('beatrice', 'starfruit', 93) RETURNING id`).Scan(&userid) + +For more details on RETURNING, see the Postgres documentation: + + http://www.postgresql.org/docs/current/static/sql-insert.html + http://www.postgresql.org/docs/current/static/sql-update.html + http://www.postgresql.org/docs/current/static/sql-delete.html + +For additional instructions on querying see the documentation for the database/sql package. + +Errors + +pq may return errors of type *pq.Error which can be interrogated for error details: + + if err, ok := err.(*pq.Error); ok { + fmt.Println("pq error:", err.Code.Name()) + } + +See the pq.Error type for details. + + +Bulk imports + +You can perform bulk imports by preparing a statement returned by pq.CopyIn (or +pq.CopyInSchema) in an explicit transaction (sql.Tx). The returned statement +handle can then be repeatedly "executed" to copy data into the target table. +After all data has been processed you should call Exec() once with no arguments +to flush all buffered data. Any call to Exec() might return an error which +should be handled appropriately, but because of the internal buffering an error +returned by Exec() might not be related to the data passed in the call that +failed. + +CopyIn uses COPY FROM internally. It is not possible to COPY outside of an +explicit transaction in pq. + +Usage example: + + txn, err := db.Begin() + if err != nil { + log.Fatal(err) + } + + stmt, err := txn.Prepare(pq.CopyIn("users", "name", "age")) + if err != nil { + log.Fatal(err) + } + + for _, user := range users { + _, err = stmt.Exec(user.Name, int64(user.Age)) + if err != nil { + log.Fatal(err) + } + } + + _, err = stmt.Exec() + if err != nil { + log.Fatal(err) + } + + err = stmt.Close() + if err != nil { + log.Fatal(err) + } + + err = txn.Commit() + if err != nil { + log.Fatal(err) + } + + +Notifications + + +PostgreSQL supports a simple publish/subscribe model over database +connections. See http://www.postgresql.org/docs/current/static/sql-notify.html +for more information about the general mechanism. + +To start listening for notifications, you first have to open a new connection +to the database by calling NewListener. This connection can not be used for +anything other than LISTEN / NOTIFY. Calling Listen will open a "notification +channel"; once a notification channel is open, a notification generated on that +channel will effect a send on the Listener.Notify channel. A notification +channel will remain open until Unlisten is called, though connection loss might +result in some notifications being lost. To solve this problem, Listener sends +a nil pointer over the Notify channel any time the connection is re-established +following a connection loss. The application can get information about the +state of the underlying connection by setting an event callback in the call to +NewListener. + +A single Listener can safely be used from concurrent goroutines, which means +that there is often no need to create more than one Listener in your +application. However, a Listener is always connected to a single database, so +you will need to create a new Listener instance for every database you want to +receive notifications in. + +The channel name in both Listen and Unlisten is case sensitive, and can contain +any characters legal in an identifier (see +http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS +for more information). Note that the channel name will be truncated to 63 +bytes by the PostgreSQL server. + +You can find a complete, working example of Listener usage at +http://godoc.org/github.com/lib/pq/listen_example. + +*/ +package pq diff --git a/services/templeton/vendor/src/github.com/lib/pq/encode.go b/services/templeton/vendor/src/github.com/lib/pq/encode.go new file mode 100644 index 000000000..9fa90f5c9 --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/encode.go @@ -0,0 +1,538 @@ +package pq + +import ( + "bytes" + "database/sql/driver" + "encoding/binary" + "encoding/hex" + "fmt" + "math" + "strconv" + "strings" + "sync" + "time" + + "github.com/lib/pq/oid" +) + +func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte { + switch v := x.(type) { + case []byte: + return v + default: + return encode(parameterStatus, x, oid.T_unknown) + } + panic("not reached") +} + +func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte { + switch v := x.(type) { + case int64: + return strconv.AppendInt(nil, v, 10) + case float64: + return strconv.AppendFloat(nil, v, 'f', -1, 64) + case []byte: + if pgtypOid == oid.T_bytea { + return encodeBytea(parameterStatus.serverVersion, v) + } + + return v + case string: + if pgtypOid == oid.T_bytea { + return encodeBytea(parameterStatus.serverVersion, []byte(v)) + } + + return []byte(v) + case bool: + return strconv.AppendBool(nil, v) + case time.Time: + return formatTs(v) + + default: + errorf("encode: unknown type for %T", v) + } + + panic("not reached") +} + +func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} { + if f == formatBinary { + return binaryDecode(parameterStatus, s, typ) + } else { + return textDecode(parameterStatus, s, typ) + } +} + +func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { + switch typ { + case oid.T_bytea: + return s + case oid.T_int8: + return int64(binary.BigEndian.Uint64(s)) + case oid.T_int4: + return int64(int32(binary.BigEndian.Uint32(s))) + case oid.T_int2: + return int64(int16(binary.BigEndian.Uint16(s))) + + default: + errorf("don't know how to decode binary parameter of type %u", uint32(typ)) + } + + panic("not reached") +} + +func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { + switch typ { + case oid.T_bytea: + return parseBytea(s) + case oid.T_timestamptz: + return parseTs(parameterStatus.currentLocation, string(s)) + case oid.T_timestamp, oid.T_date: + return parseTs(nil, string(s)) + case oid.T_time: + return mustParse("15:04:05", typ, s) + case oid.T_timetz: + return mustParse("15:04:05-07", typ, s) + case oid.T_bool: + return s[0] == 't' + case oid.T_int8, oid.T_int4, oid.T_int2: + i, err := strconv.ParseInt(string(s), 10, 64) + if err != nil { + errorf("%s", err) + } + return i + case oid.T_float4, oid.T_float8: + bits := 64 + if typ == oid.T_float4 { + bits = 32 + } + f, err := strconv.ParseFloat(string(s), bits) + if err != nil { + errorf("%s", err) + } + return f + } + + return s +} + +// appendEncodedText encodes item in text format as required by COPY +// and appends to buf +func appendEncodedText(parameterStatus *parameterStatus, buf []byte, x interface{}) []byte { + switch v := x.(type) { + case int64: + return strconv.AppendInt(buf, v, 10) + case float64: + return strconv.AppendFloat(buf, v, 'f', -1, 64) + case []byte: + encodedBytea := encodeBytea(parameterStatus.serverVersion, v) + return appendEscapedText(buf, string(encodedBytea)) + case string: + return appendEscapedText(buf, v) + case bool: + return strconv.AppendBool(buf, v) + case time.Time: + return append(buf, formatTs(v)...) + case nil: + return append(buf, "\\N"...) + default: + errorf("encode: unknown type for %T", v) + } + + panic("not reached") +} + +func appendEscapedText(buf []byte, text string) []byte { + escapeNeeded := false + startPos := 0 + var c byte + + // check if we need to escape + for i := 0; i < len(text); i++ { + c = text[i] + if c == '\\' || c == '\n' || c == '\r' || c == '\t' { + escapeNeeded = true + startPos = i + break + } + } + if !escapeNeeded { + return append(buf, text...) + } + + // copy till first char to escape, iterate the rest + result := append(buf, text[:startPos]...) + for i := startPos; i < len(text); i++ { + c = text[i] + switch c { + case '\\': + result = append(result, '\\', '\\') + case '\n': + result = append(result, '\\', 'n') + case '\r': + result = append(result, '\\', 'r') + case '\t': + result = append(result, '\\', 't') + default: + result = append(result, c) + } + } + return result +} + +func mustParse(f string, typ oid.Oid, s []byte) time.Time { + str := string(s) + + // check for a 30-minute-offset timezone + if (typ == oid.T_timestamptz || typ == oid.T_timetz) && + str[len(str)-3] == ':' { + f += ":00" + } + t, err := time.Parse(f, str) + if err != nil { + errorf("decode: %s", err) + } + return t +} + +func expect(str, char string, pos int) { + if c := str[pos : pos+1]; c != char { + errorf("expected '%v' at position %v; got '%v'", char, pos, c) + } +} + +func mustAtoi(str string) int { + result, err := strconv.Atoi(str) + if err != nil { + errorf("expected number; got '%v'", str) + } + return result +} + +// The location cache caches the time zones typically used by the client. +type locationCache struct { + cache map[int]*time.Location + lock sync.Mutex +} + +// All connections share the same list of timezones. Benchmarking shows that +// about 5% speed could be gained by putting the cache in the connection and +// losing the mutex, at the cost of a small amount of memory and a somewhat +// significant increase in code complexity. +var globalLocationCache *locationCache = newLocationCache() + +func newLocationCache() *locationCache { + return &locationCache{cache: make(map[int]*time.Location)} +} + +// Returns the cached timezone for the specified offset, creating and caching +// it if necessary. +func (c *locationCache) getLocation(offset int) *time.Location { + c.lock.Lock() + defer c.lock.Unlock() + + location, ok := c.cache[offset] + if !ok { + location = time.FixedZone("", offset) + c.cache[offset] = location + } + + return location +} + +var infinityTsEnabled = false +var infinityTsNegative time.Time +var infinityTsPositive time.Time + +const ( + infinityTsEnabledAlready = "pq: infinity timestamp enabled already" + infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive" +) + +/* + * If EnableInfinityTs is not called, "-infinity" and "infinity" will return + * []byte("-infinity") and []byte("infinity") respectively, and potentially + * cause error "sql: Scan error on column index 0: unsupported driver -> Scan pair: []uint8 -> *time.Time", + * when scanning into a time.Time value. + * + * Once EnableInfinityTs has been called, all connections created using this + * driver will decode Postgres' "-infinity" and "infinity" for "timestamp", + * "timestamp with time zone" and "date" types to the predefined minimum and + * maximum times, respectively. When encoding time.Time values, any time which + * equals or precedes the predefined minimum time will be encoded to + * "-infinity". Any values at or past the maximum time will similarly be + * encoded to "infinity". + * + * + * If EnableInfinityTs is called with negative >= positive, it will panic. + * Calling EnableInfinityTs after a connection has been established results in + * undefined behavior. If EnableInfinityTs is called more than once, it will + * panic. + */ +func EnableInfinityTs(negative time.Time, positive time.Time) { + if infinityTsEnabled { + panic(infinityTsEnabledAlready) + } + if !negative.Before(positive) { + panic(infinityTsNegativeMustBeSmaller) + } + infinityTsEnabled = true + infinityTsNegative = negative + infinityTsPositive = positive +} + +/* + * Testing might want to toggle infinityTsEnabled + */ +func disableInfinityTs() { + infinityTsEnabled = false +} + +// This is a time function specific to the Postgres default DateStyle +// setting ("ISO, MDY"), the only one we currently support. This +// accounts for the discrepancies between the parsing available with +// time.Parse and the Postgres date formatting quirks. +func parseTs(currentLocation *time.Location, str string) interface{} { + switch str { + case "-infinity": + if infinityTsEnabled { + return infinityTsNegative + } + return []byte(str) + case "infinity": + if infinityTsEnabled { + return infinityTsPositive + } + return []byte(str) + } + + monSep := strings.IndexRune(str, '-') + // this is Gregorian year, not ISO Year + // In Gregorian system, the year 1 BC is followed by AD 1 + year := mustAtoi(str[:monSep]) + daySep := monSep + 3 + month := mustAtoi(str[monSep+1 : daySep]) + expect(str, "-", daySep) + timeSep := daySep + 3 + day := mustAtoi(str[daySep+1 : timeSep]) + + var hour, minute, second int + if len(str) > monSep+len("01-01")+1 { + expect(str, " ", timeSep) + minSep := timeSep + 3 + expect(str, ":", minSep) + hour = mustAtoi(str[timeSep+1 : minSep]) + secSep := minSep + 3 + expect(str, ":", secSep) + minute = mustAtoi(str[minSep+1 : secSep]) + secEnd := secSep + 3 + second = mustAtoi(str[secSep+1 : secEnd]) + } + remainderIdx := monSep + len("01-01 00:00:00") + 1 + // Three optional (but ordered) sections follow: the + // fractional seconds, the time zone offset, and the BC + // designation. We set them up here and adjust the other + // offsets if the preceding sections exist. + + nanoSec := 0 + tzOff := 0 + + if remainderIdx < len(str) && str[remainderIdx:remainderIdx+1] == "." { + fracStart := remainderIdx + 1 + fracOff := strings.IndexAny(str[fracStart:], "-+ ") + if fracOff < 0 { + fracOff = len(str) - fracStart + } + fracSec := mustAtoi(str[fracStart : fracStart+fracOff]) + nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff)))) + + remainderIdx += fracOff + 1 + } + if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart:tzStart+1] == "-" || str[tzStart:tzStart+1] == "+") { + // time zone separator is always '-' or '+' (UTC is +00) + var tzSign int + if c := str[tzStart : tzStart+1]; c == "-" { + tzSign = -1 + } else if c == "+" { + tzSign = +1 + } else { + errorf("expected '-' or '+' at position %v; got %v", tzStart, c) + } + tzHours := mustAtoi(str[tzStart+1 : tzStart+3]) + remainderIdx += 3 + var tzMin, tzSec int + if tzStart+3 < len(str) && str[tzStart+3:tzStart+4] == ":" { + tzMin = mustAtoi(str[tzStart+4 : tzStart+6]) + remainderIdx += 3 + } + if tzStart+6 < len(str) && str[tzStart+6:tzStart+7] == ":" { + tzSec = mustAtoi(str[tzStart+7 : tzStart+9]) + remainderIdx += 3 + } + tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec) + } + var isoYear int + if remainderIdx < len(str) && str[remainderIdx:remainderIdx+3] == " BC" { + isoYear = 1 - year + remainderIdx += 3 + } else { + isoYear = year + } + if remainderIdx < len(str) { + errorf("expected end of input, got %v", str[remainderIdx:]) + } + t := time.Date(isoYear, time.Month(month), day, + hour, minute, second, nanoSec, + globalLocationCache.getLocation(tzOff)) + + if currentLocation != nil { + // Set the location of the returned Time based on the session's + // TimeZone value, but only if the local time zone database agrees with + // the remote database on the offset. + lt := t.In(currentLocation) + _, newOff := lt.Zone() + if newOff == tzOff { + t = lt + } + } + + return t +} + +// formatTs formats t into a format postgres understands. +func formatTs(t time.Time) (b []byte) { + if infinityTsEnabled { + // t <= -infinity : ! (t > -infinity) + if !t.After(infinityTsNegative) { + return []byte("-infinity") + } + // t >= infinity : ! (!t < infinity) + if !t.Before(infinityTsPositive) { + return []byte("infinity") + } + } + // Need to send dates before 0001 A.D. with " BC" suffix, instead of the + // minus sign preferred by Go. + // Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on + bc := false + if t.Year() <= 0 { + // flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11" + t = t.AddDate((-t.Year())*2+1, 0, 0) + bc = true + } + b = []byte(t.Format(time.RFC3339Nano)) + + _, offset := t.Zone() + offset = offset % 60 + if offset != 0 { + // RFC3339Nano already printed the minus sign + if offset < 0 { + offset = -offset + } + + b = append(b, ':') + if offset < 10 { + b = append(b, '0') + } + b = strconv.AppendInt(b, int64(offset), 10) + } + + if bc { + b = append(b, " BC"...) + } + return b +} + +// Parse a bytea value received from the server. Both "hex" and the legacy +// "escape" format are supported. +func parseBytea(s []byte) (result []byte) { + if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) { + // bytea_output = hex + s = s[2:] // trim off leading "\\x" + result = make([]byte, hex.DecodedLen(len(s))) + _, err := hex.Decode(result, s) + if err != nil { + errorf("%s", err) + } + } else { + // bytea_output = escape + for len(s) > 0 { + if s[0] == '\\' { + // escaped '\\' + if len(s) >= 2 && s[1] == '\\' { + result = append(result, '\\') + s = s[2:] + continue + } + + // '\\' followed by an octal number + if len(s) < 4 { + errorf("invalid bytea sequence %v", s) + } + r, err := strconv.ParseInt(string(s[1:4]), 8, 9) + if err != nil { + errorf("could not parse bytea value: %s", err.Error()) + } + result = append(result, byte(r)) + s = s[4:] + } else { + // We hit an unescaped, raw byte. Try to read in as many as + // possible in one go. + i := bytes.IndexByte(s, '\\') + if i == -1 { + result = append(result, s...) + break + } + result = append(result, s[:i]...) + s = s[i:] + } + } + } + + return result +} + +func encodeBytea(serverVersion int, v []byte) (result []byte) { + if serverVersion >= 90000 { + // Use the hex format if we know that the server supports it + result = make([]byte, 2+hex.EncodedLen(len(v))) + result[0] = '\\' + result[1] = 'x' + hex.Encode(result[2:], v) + } else { + // .. or resort to "escape" + for _, b := range v { + if b == '\\' { + result = append(result, '\\', '\\') + } else if b < 0x20 || b > 0x7e { + result = append(result, []byte(fmt.Sprintf("\\%03o", b))...) + } else { + result = append(result, b) + } + } + } + + return result +} + +// NullTime represents a time.Time that may be null. NullTime implements the +// sql.Scanner interface so it can be used as a scan destination, similar to +// sql.NullString. +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +func (nt *NullTime) Scan(value interface{}) error { + nt.Time, nt.Valid = value.(time.Time) + return nil +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} diff --git a/services/templeton/vendor/src/github.com/lib/pq/encode_test.go b/services/templeton/vendor/src/github.com/lib/pq/encode_test.go new file mode 100644 index 000000000..146a04f4a --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/encode_test.go @@ -0,0 +1,720 @@ +package pq + +import ( + "bytes" + "database/sql" + "fmt" + "strings" + "testing" + "time" + + "github.com/lib/pq/oid" +) + +func TestScanTimestamp(t *testing.T) { + var nt NullTime + tn := time.Now() + nt.Scan(tn) + if !nt.Valid { + t.Errorf("Expected Valid=false") + } + if nt.Time != tn { + t.Errorf("Time value mismatch") + } +} + +func TestScanNilTimestamp(t *testing.T) { + var nt NullTime + nt.Scan(nil) + if nt.Valid { + t.Errorf("Expected Valid=false") + } +} + +var timeTests = []struct { + str string + timeval time.Time +}{ + {"22001-02-03", time.Date(22001, time.February, 3, 0, 0, 0, 0, time.FixedZone("", 0))}, + {"2001-02-03", time.Date(2001, time.February, 3, 0, 0, 0, 0, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06", time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.000001", time.Date(2001, time.February, 3, 4, 5, 6, 1000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.00001", time.Date(2001, time.February, 3, 4, 5, 6, 10000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.0001", time.Date(2001, time.February, 3, 4, 5, 6, 100000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.001", time.Date(2001, time.February, 3, 4, 5, 6, 1000000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.01", time.Date(2001, time.February, 3, 4, 5, 6, 10000000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.1", time.Date(2001, time.February, 3, 4, 5, 6, 100000000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.12", time.Date(2001, time.February, 3, 4, 5, 6, 120000000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.123", time.Date(2001, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.1234", time.Date(2001, time.February, 3, 4, 5, 6, 123400000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.12345", time.Date(2001, time.February, 3, 4, 5, 6, 123450000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.123456", time.Date(2001, time.February, 3, 4, 5, 6, 123456000, time.FixedZone("", 0))}, + {"2001-02-03 04:05:06.123-07", time.Date(2001, time.February, 3, 4, 5, 6, 123000000, + time.FixedZone("", -7*60*60))}, + {"2001-02-03 04:05:06-07", time.Date(2001, time.February, 3, 4, 5, 6, 0, + time.FixedZone("", -7*60*60))}, + {"2001-02-03 04:05:06-07:42", time.Date(2001, time.February, 3, 4, 5, 6, 0, + time.FixedZone("", -(7*60*60+42*60)))}, + {"2001-02-03 04:05:06-07:30:09", time.Date(2001, time.February, 3, 4, 5, 6, 0, + time.FixedZone("", -(7*60*60+30*60+9)))}, + {"2001-02-03 04:05:06+07", time.Date(2001, time.February, 3, 4, 5, 6, 0, + time.FixedZone("", 7*60*60))}, + {"0011-02-03 04:05:06 BC", time.Date(-10, time.February, 3, 4, 5, 6, 0, time.FixedZone("", 0))}, + {"0011-02-03 04:05:06.123 BC", time.Date(-10, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))}, + {"0011-02-03 04:05:06.123-07 BC", time.Date(-10, time.February, 3, 4, 5, 6, 123000000, + time.FixedZone("", -7*60*60))}, + {"0001-02-03 04:05:06.123", time.Date(1, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))}, + {"0001-02-03 04:05:06.123 BC", time.Date(1, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0)).AddDate(-1, 0, 0)}, + {"0001-02-03 04:05:06.123 BC", time.Date(0, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))}, + {"0002-02-03 04:05:06.123 BC", time.Date(0, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0)).AddDate(-1, 0, 0)}, + {"0002-02-03 04:05:06.123 BC", time.Date(-1, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))}, + {"12345-02-03 04:05:06.1", time.Date(12345, time.February, 3, 4, 5, 6, 100000000, time.FixedZone("", 0))}, + {"123456-02-03 04:05:06.1", time.Date(123456, time.February, 3, 4, 5, 6, 100000000, time.FixedZone("", 0))}, +} + +// Helper function for the two tests below +func tryParse(str string) (t time.Time, err error) { + defer func() { + if p := recover(); p != nil { + err = fmt.Errorf("%v", p) + return + } + }() + i := parseTs(nil, str) + t, ok := i.(time.Time) + if !ok { + err = fmt.Errorf("Not a time.Time type, got %#v", i) + } + return +} + +// Test that parsing the string results in the expected value. +func TestParseTs(t *testing.T) { + for i, tt := range timeTests { + val, err := tryParse(tt.str) + if err != nil { + t.Errorf("%d: got error: %v", i, err) + } else if val.String() != tt.timeval.String() { + t.Errorf("%d: expected to parse %q into %q; got %q", + i, tt.str, tt.timeval, val) + } + } +} + +// Now test that sending the value into the database and parsing it back +// returns the same time.Time value. +func TestEncodeAndParseTs(t *testing.T) { + db, err := openTestConnConninfo("timezone='Etc/UTC'") + if err != nil { + t.Fatal(err) + } + defer db.Close() + + for i, tt := range timeTests { + var dbstr string + err = db.QueryRow("SELECT ($1::timestamptz)::text", tt.timeval).Scan(&dbstr) + if err != nil { + t.Errorf("%d: could not send value %q to the database: %s", i, tt.timeval, err) + continue + } + + val, err := tryParse(dbstr) + if err != nil { + t.Errorf("%d: could not parse value %q: %s", i, dbstr, err) + continue + } + val = val.In(tt.timeval.Location()) + if val.String() != tt.timeval.String() { + t.Errorf("%d: expected to parse %q into %q; got %q", i, dbstr, tt.timeval, val) + } + } +} + +var formatTimeTests = []struct { + time time.Time + expected string +}{ + {time.Time{}, "0001-01-01T00:00:00Z"}, + {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "2001-02-03T04:05:06.123456789Z"}, + {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "2001-02-03T04:05:06.123456789+02:00"}, + {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "2001-02-03T04:05:06.123456789-06:00"}, + {time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "2001-02-03T04:05:06-07:30:09"}, + + {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "0001-02-03T04:05:06.123456789Z"}, + {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "0001-02-03T04:05:06.123456789+02:00"}, + {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "0001-02-03T04:05:06.123456789-06:00"}, + + {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "0001-02-03T04:05:06.123456789Z BC"}, + {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "0001-02-03T04:05:06.123456789+02:00 BC"}, + {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "0001-02-03T04:05:06.123456789-06:00 BC"}, + + {time.Date(1, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "0001-02-03T04:05:06-07:30:09"}, + {time.Date(0, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "0001-02-03T04:05:06-07:30:09 BC"}, +} + +func TestFormatTs(t *testing.T) { + for i, tt := range formatTimeTests { + val := string(formatTs(tt.time)) + if val != tt.expected { + t.Errorf("%d: incorrect time format %q, want %q", i, val, tt.expected) + } + } +} + +func TestTimestampWithTimeZone(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + tx, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer tx.Rollback() + + // try several different locations, all included in Go's zoneinfo.zip + for _, locName := range []string{ + "UTC", + "America/Chicago", + "America/New_York", + "Australia/Darwin", + "Australia/Perth", + } { + loc, err := time.LoadLocation(locName) + if err != nil { + t.Logf("Could not load time zone %s - skipping", locName) + continue + } + + // Postgres timestamps have a resolution of 1 microsecond, so don't + // use the full range of the Nanosecond argument + refTime := time.Date(2012, 11, 6, 10, 23, 42, 123456000, loc) + + for _, pgTimeZone := range []string{"US/Eastern", "Australia/Darwin"} { + // Switch Postgres's timezone to test different output timestamp formats + _, err = tx.Exec(fmt.Sprintf("set time zone '%s'", pgTimeZone)) + if err != nil { + t.Fatal(err) + } + + var gotTime time.Time + row := tx.QueryRow("select $1::timestamp with time zone", refTime) + err = row.Scan(&gotTime) + if err != nil { + t.Fatal(err) + } + + if !refTime.Equal(gotTime) { + t.Errorf("timestamps not equal: %s != %s", refTime, gotTime) + } + + // check that the time zone is set correctly based on TimeZone + pgLoc, err := time.LoadLocation(pgTimeZone) + if err != nil { + t.Logf("Could not load time zone %s - skipping", pgLoc) + continue + } + translated := refTime.In(pgLoc) + if translated.String() != gotTime.String() { + t.Errorf("timestamps not equal: %s != %s", translated, gotTime) + } + } + } +} + +func TestTimestampWithOutTimezone(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + test := func(ts, pgts string) { + r, err := db.Query("SELECT $1::timestamp", pgts) + if err != nil { + t.Fatalf("Could not run query: %v", err) + } + + n := r.Next() + + if n != true { + t.Fatal("Expected at least one row") + } + + var result time.Time + err = r.Scan(&result) + if err != nil { + t.Fatalf("Did not expect error scanning row: %v", err) + } + + expected, err := time.Parse(time.RFC3339, ts) + if err != nil { + t.Fatalf("Could not parse test time literal: %v", err) + } + + if !result.Equal(expected) { + t.Fatalf("Expected time to match %v: got mismatch %v", + expected, result) + } + + n = r.Next() + if n != false { + t.Fatal("Expected only one row") + } + } + + test("2000-01-01T00:00:00Z", "2000-01-01T00:00:00") + + // Test higher precision time + test("2013-01-04T20:14:58.80033Z", "2013-01-04 20:14:58.80033") +} + +func TestInfinityTimestamp(t *testing.T) { + db := openTestConn(t) + defer db.Close() + var err error + var resultT time.Time + + expectedErrorStrPrefix := `sql: Scan error on column index 0: unsupported` + type testCases []struct { + Query string + Param string + ExpectedErrStrPrefix string + ExpectedVal interface{} + } + tc := testCases{ + {"SELECT $1::timestamp", "-infinity", expectedErrorStrPrefix, "-infinity"}, + {"SELECT $1::timestamptz", "-infinity", expectedErrorStrPrefix, "-infinity"}, + {"SELECT $1::timestamp", "infinity", expectedErrorStrPrefix, "infinity"}, + {"SELECT $1::timestamptz", "infinity", expectedErrorStrPrefix, "infinity"}, + } + // try to assert []byte to time.Time + for _, q := range tc { + err = db.QueryRow(q.Query, q.Param).Scan(&resultT) + if !strings.HasPrefix(err.Error(), q.ExpectedErrStrPrefix) { + t.Errorf("Scanning -/+infinity, expected error to have prefix %q, got %q", q.ExpectedErrStrPrefix, err) + } + } + // yield []byte + for _, q := range tc { + var resultI interface{} + err = db.QueryRow(q.Query, q.Param).Scan(&resultI) + if err != nil { + t.Errorf("Scanning -/+infinity, expected no error, got %q", err) + } + result, ok := resultI.([]byte) + if !ok { + t.Errorf("Scanning -/+infinity, expected []byte, got %#v", resultI) + } + if string(result) != q.ExpectedVal { + t.Errorf("Scanning -/+infinity, expected %q, got %q", q.ExpectedVal, result) + } + } + + y1500 := time.Date(1500, time.January, 1, 0, 0, 0, 0, time.UTC) + y2500 := time.Date(2500, time.January, 1, 0, 0, 0, 0, time.UTC) + EnableInfinityTs(y1500, y2500) + + err = db.QueryRow("SELECT $1::timestamp", "infinity").Scan(&resultT) + if err != nil { + t.Errorf("Scanning infinity, expected no error, got %q", err) + } + if !resultT.Equal(y2500) { + t.Errorf("Scanning infinity, expected %q, got %q", y2500, resultT) + } + + err = db.QueryRow("SELECT $1::timestamptz", "infinity").Scan(&resultT) + if err != nil { + t.Errorf("Scanning infinity, expected no error, got %q", err) + } + if !resultT.Equal(y2500) { + t.Errorf("Scanning Infinity, expected time %q, got %q", y2500, resultT.String()) + } + + err = db.QueryRow("SELECT $1::timestamp", "-infinity").Scan(&resultT) + if err != nil { + t.Errorf("Scanning -infinity, expected no error, got %q", err) + } + if !resultT.Equal(y1500) { + t.Errorf("Scanning -infinity, expected time %q, got %q", y1500, resultT.String()) + } + + err = db.QueryRow("SELECT $1::timestamptz", "-infinity").Scan(&resultT) + if err != nil { + t.Errorf("Scanning -infinity, expected no error, got %q", err) + } + if !resultT.Equal(y1500) { + t.Errorf("Scanning -infinity, expected time %q, got %q", y1500, resultT.String()) + } + + y_1500 := time.Date(-1500, time.January, 1, 0, 0, 0, 0, time.UTC) + y11500 := time.Date(11500, time.January, 1, 0, 0, 0, 0, time.UTC) + var s string + err = db.QueryRow("SELECT $1::timestamp::text", y_1500).Scan(&s) + if err != nil { + t.Errorf("Encoding -infinity, expected no error, got %q", err) + } + if s != "-infinity" { + t.Errorf("Encoding -infinity, expected %q, got %q", "-infinity", s) + } + err = db.QueryRow("SELECT $1::timestamptz::text", y_1500).Scan(&s) + if err != nil { + t.Errorf("Encoding -infinity, expected no error, got %q", err) + } + if s != "-infinity" { + t.Errorf("Encoding -infinity, expected %q, got %q", "-infinity", s) + } + + err = db.QueryRow("SELECT $1::timestamp::text", y11500).Scan(&s) + if err != nil { + t.Errorf("Encoding infinity, expected no error, got %q", err) + } + if s != "infinity" { + t.Errorf("Encoding infinity, expected %q, got %q", "infinity", s) + } + err = db.QueryRow("SELECT $1::timestamptz::text", y11500).Scan(&s) + if err != nil { + t.Errorf("Encoding infinity, expected no error, got %q", err) + } + if s != "infinity" { + t.Errorf("Encoding infinity, expected %q, got %q", "infinity", s) + } + + disableInfinityTs() + + var panicErrorString string + func() { + defer func() { + panicErrorString, _ = recover().(string) + }() + EnableInfinityTs(y2500, y1500) + }() + if panicErrorString != infinityTsNegativeMustBeSmaller { + t.Errorf("Expected error, %q, got %q", infinityTsNegativeMustBeSmaller, panicErrorString) + } +} + +func TestStringWithNul(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + hello0world := string("hello\x00world") + _, err := db.Query("SELECT $1::text", &hello0world) + if err == nil { + t.Fatal("Postgres accepts a string with nul in it; " + + "injection attacks may be plausible") + } +} + +func TestByteSliceToText(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + b := []byte("hello world") + row := db.QueryRow("SELECT $1::text", b) + + var result []byte + err := row.Scan(&result) + if err != nil { + t.Fatal(err) + } + + if string(result) != string(b) { + t.Fatalf("expected %v but got %v", b, result) + } +} + +func TestStringToBytea(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + b := "hello world" + row := db.QueryRow("SELECT $1::bytea", b) + + var result []byte + err := row.Scan(&result) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(result, []byte(b)) { + t.Fatalf("expected %v but got %v", b, result) + } +} + +func TestTextByteSliceToUUID(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + b := []byte("a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11") + row := db.QueryRow("SELECT $1::uuid", b) + + var result string + err := row.Scan(&result) + if forceBinaryParameters() { + pqErr := err.(*Error) + if pqErr == nil { + t.Errorf("Expected to get error") + } else if pqErr.Code != "22P03" { + t.Fatalf("Expected to get invalid binary encoding error (22P03), got %s", pqErr.Code) + } + } else { + if err != nil { + t.Fatal(err) + } + + if result != string(b) { + t.Fatalf("expected %v but got %v", b, result) + } + } +} + +func TestBinaryByteSlicetoUUID(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + b := []byte{'\xa0', '\xee', '\xbc', '\x99', + '\x9c', '\x0b', + '\x4e', '\xf8', + '\xbb', '\x00', '\x6b', + '\xb9', '\xbd', '\x38', '\x0a', '\x11'} + row := db.QueryRow("SELECT $1::uuid", b) + + var result string + err := row.Scan(&result) + if forceBinaryParameters() { + if err != nil { + t.Fatal(err) + } + + if result != string("a0eebc99-9c0b-4ef8-bb00-6bb9bd380a11") { + t.Fatalf("expected %v but got %v", b, result) + } + } else { + pqErr := err.(*Error) + if pqErr == nil { + t.Errorf("Expected to get error") + } else if pqErr.Code != "22021" { + t.Fatalf("Expected to get invalid byte sequence for encoding error (22021), got %s", pqErr.Code) + } + } +} + +func TestStringToUUID(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + s := "a0eebc99-9c0b-4ef8-bb00-6bb9bd380a11" + row := db.QueryRow("SELECT $1::uuid", s) + + var result string + err := row.Scan(&result) + if err != nil { + t.Fatal(err) + } + + if result != s { + t.Fatalf("expected %v but got %v", s, result) + } +} + +func TestTextByteSliceToInt(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + expected := 12345678 + b := []byte(fmt.Sprintf("%d", expected)) + row := db.QueryRow("SELECT $1::int", b) + + var result int + err := row.Scan(&result) + if forceBinaryParameters() { + pqErr := err.(*Error) + if pqErr == nil { + t.Errorf("Expected to get error") + } else if pqErr.Code != "22P03" { + t.Fatalf("Expected to get invalid binary encoding error (22P03), got %s", pqErr.Code) + } + } else { + if err != nil { + t.Fatal(err) + } + if result != expected { + t.Fatalf("expected %v but got %v", expected, result) + } + } +} + +func TestBinaryByteSliceToInt(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + expected := 12345678 + b := []byte{'\x00', '\xbc', '\x61', '\x4e'} + row := db.QueryRow("SELECT $1::int", b) + + var result int + err := row.Scan(&result) + if forceBinaryParameters() { + if err != nil { + t.Fatal(err) + } + if result != expected { + t.Fatalf("expected %v but got %v", expected, result) + } + } else { + pqErr := err.(*Error) + if pqErr == nil { + t.Errorf("Expected to get error") + } else if pqErr.Code != "22021" { + t.Fatalf("Expected to get invalid byte sequence for encoding error (22021), got %s", pqErr.Code) + } + } +} + +func TestByteaOutputFormatEncoding(t *testing.T) { + input := []byte("\\x\x00\x01\x02\xFF\xFEabcdefg0123") + want := []byte("\\x5c78000102fffe6162636465666730313233") + got := encode(¶meterStatus{serverVersion: 90000}, input, oid.T_bytea) + if !bytes.Equal(want, got) { + t.Errorf("invalid hex bytea output, got %v but expected %v", got, want) + } + + want = []byte("\\\\x\\000\\001\\002\\377\\376abcdefg0123") + got = encode(¶meterStatus{serverVersion: 84000}, input, oid.T_bytea) + if !bytes.Equal(want, got) { + t.Errorf("invalid escape bytea output, got %v but expected %v", got, want) + } +} + +func TestByteaOutputFormats(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + if getServerVersion(t, db) < 90000 { + // skip + return + } + + testByteaOutputFormat := func(f string, usePrepared bool) { + expectedData := []byte("\x5c\x78\x00\xff\x61\x62\x63\x01\x08") + sqlQuery := "SELECT decode('5c7800ff6162630108', 'hex')" + + var data []byte + + // use a txn to avoid relying on getting the same connection + txn, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer txn.Rollback() + + _, err = txn.Exec("SET LOCAL bytea_output TO " + f) + if err != nil { + t.Fatal(err) + } + var rows *sql.Rows + var stmt *sql.Stmt + if usePrepared { + stmt, err = txn.Prepare(sqlQuery) + if err != nil { + t.Fatal(err) + } + rows, err = stmt.Query() + } else { + // use Query; QueryRow would hide the actual error + rows, err = txn.Query(sqlQuery) + } + if err != nil { + t.Fatal(err) + } + if !rows.Next() { + if rows.Err() != nil { + t.Fatal(rows.Err()) + } + t.Fatal("shouldn't happen") + } + err = rows.Scan(&data) + if err != nil { + t.Fatal(err) + } + err = rows.Close() + if err != nil { + t.Fatal(err) + } + if stmt != nil { + err = stmt.Close() + if err != nil { + t.Fatal(err) + } + } + if !bytes.Equal(data, expectedData) { + t.Errorf("unexpected bytea value %v for format %s; expected %v", data, f, expectedData) + } + } + + testByteaOutputFormat("hex", false) + testByteaOutputFormat("escape", false) + testByteaOutputFormat("hex", true) + testByteaOutputFormat("escape", true) +} + +func TestAppendEncodedText(t *testing.T) { + var buf []byte + + buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, int64(10)) + buf = append(buf, '\t') + buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, 42.0000000001) + buf = append(buf, '\t') + buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, "hello\tworld") + buf = append(buf, '\t') + buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, []byte{0, 128, 255}) + + if string(buf) != "10\t42.0000000001\thello\\tworld\t\\\\x0080ff" { + t.Fatal(string(buf)) + } +} + +func TestAppendEscapedText(t *testing.T) { + if esc := appendEscapedText(nil, "hallo\tescape"); string(esc) != "hallo\\tescape" { + t.Fatal(string(esc)) + } + if esc := appendEscapedText(nil, "hallo\\tescape\n"); string(esc) != "hallo\\\\tescape\\n" { + t.Fatal(string(esc)) + } + if esc := appendEscapedText(nil, "\n\r\t\f"); string(esc) != "\\n\\r\\t\f" { + t.Fatal(string(esc)) + } +} + +func TestAppendEscapedTextExistingBuffer(t *testing.T) { + var buf []byte + buf = []byte("123\t") + if esc := appendEscapedText(buf, "hallo\tescape"); string(esc) != "123\thallo\\tescape" { + t.Fatal(string(esc)) + } + buf = []byte("123\t") + if esc := appendEscapedText(buf, "hallo\\tescape\n"); string(esc) != "123\thallo\\\\tescape\\n" { + t.Fatal(string(esc)) + } + buf = []byte("123\t") + if esc := appendEscapedText(buf, "\n\r\t\f"); string(esc) != "123\t\\n\\r\\t\f" { + t.Fatal(string(esc)) + } +} + +func BenchmarkAppendEscapedText(b *testing.B) { + longString := "" + for i := 0; i < 100; i++ { + longString += "123456789\n" + } + for i := 0; i < b.N; i++ { + appendEscapedText(nil, longString) + } +} + +func BenchmarkAppendEscapedTextNoEscape(b *testing.B) { + longString := "" + for i := 0; i < 100; i++ { + longString += "1234567890" + } + for i := 0; i < b.N; i++ { + appendEscapedText(nil, longString) + } +} diff --git a/services/templeton/vendor/src/github.com/lib/pq/error.go b/services/templeton/vendor/src/github.com/lib/pq/error.go new file mode 100644 index 000000000..b4bb44cee --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/error.go @@ -0,0 +1,508 @@ +package pq + +import ( + "database/sql/driver" + "fmt" + "io" + "net" + "runtime" +) + +// Error severities +const ( + Efatal = "FATAL" + Epanic = "PANIC" + Ewarning = "WARNING" + Enotice = "NOTICE" + Edebug = "DEBUG" + Einfo = "INFO" + Elog = "LOG" +) + +// Error represents an error communicating with the server. +// +// See http://www.postgresql.org/docs/current/static/protocol-error-fields.html for details of the fields +type Error struct { + Severity string + Code ErrorCode + Message string + Detail string + Hint string + Position string + InternalPosition string + InternalQuery string + Where string + Schema string + Table string + Column string + DataTypeName string + Constraint string + File string + Line string + Routine string +} + +// ErrorCode is a five-character error code. +type ErrorCode string + +// Name returns a more human friendly rendering of the error code, namely the +// "condition name". +// +// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for +// details. +func (ec ErrorCode) Name() string { + return errorCodeNames[ec] +} + +// ErrorClass is only the class part of an error code. +type ErrorClass string + +// Name returns the condition name of an error class. It is equivalent to the +// condition name of the "standard" error code (i.e. the one having the last +// three characters "000"). +func (ec ErrorClass) Name() string { + return errorCodeNames[ErrorCode(ec+"000")] +} + +// Class returns the error class, e.g. "28". +// +// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for +// details. +func (ec ErrorCode) Class() ErrorClass { + return ErrorClass(ec[0:2]) +} + +// errorCodeNames is a mapping between the five-character error codes and the +// human readable "condition names". It is derived from the list at +// http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html +var errorCodeNames = map[ErrorCode]string{ + // Class 00 - Successful Completion + "00000": "successful_completion", + // Class 01 - Warning + "01000": "warning", + "0100C": "dynamic_result_sets_returned", + "01008": "implicit_zero_bit_padding", + "01003": "null_value_eliminated_in_set_function", + "01007": "privilege_not_granted", + "01006": "privilege_not_revoked", + "01004": "string_data_right_truncation", + "01P01": "deprecated_feature", + // Class 02 - No Data (this is also a warning class per the SQL standard) + "02000": "no_data", + "02001": "no_additional_dynamic_result_sets_returned", + // Class 03 - SQL Statement Not Yet Complete + "03000": "sql_statement_not_yet_complete", + // Class 08 - Connection Exception + "08000": "connection_exception", + "08003": "connection_does_not_exist", + "08006": "connection_failure", + "08001": "sqlclient_unable_to_establish_sqlconnection", + "08004": "sqlserver_rejected_establishment_of_sqlconnection", + "08007": "transaction_resolution_unknown", + "08P01": "protocol_violation", + // Class 09 - Triggered Action Exception + "09000": "triggered_action_exception", + // Class 0A - Feature Not Supported + "0A000": "feature_not_supported", + // Class 0B - Invalid Transaction Initiation + "0B000": "invalid_transaction_initiation", + // Class 0F - Locator Exception + "0F000": "locator_exception", + "0F001": "invalid_locator_specification", + // Class 0L - Invalid Grantor + "0L000": "invalid_grantor", + "0LP01": "invalid_grant_operation", + // Class 0P - Invalid Role Specification + "0P000": "invalid_role_specification", + // Class 0Z - Diagnostics Exception + "0Z000": "diagnostics_exception", + "0Z002": "stacked_diagnostics_accessed_without_active_handler", + // Class 20 - Case Not Found + "20000": "case_not_found", + // Class 21 - Cardinality Violation + "21000": "cardinality_violation", + // Class 22 - Data Exception + "22000": "data_exception", + "2202E": "array_subscript_error", + "22021": "character_not_in_repertoire", + "22008": "datetime_field_overflow", + "22012": "division_by_zero", + "22005": "error_in_assignment", + "2200B": "escape_character_conflict", + "22022": "indicator_overflow", + "22015": "interval_field_overflow", + "2201E": "invalid_argument_for_logarithm", + "22014": "invalid_argument_for_ntile_function", + "22016": "invalid_argument_for_nth_value_function", + "2201F": "invalid_argument_for_power_function", + "2201G": "invalid_argument_for_width_bucket_function", + "22018": "invalid_character_value_for_cast", + "22007": "invalid_datetime_format", + "22019": "invalid_escape_character", + "2200D": "invalid_escape_octet", + "22025": "invalid_escape_sequence", + "22P06": "nonstandard_use_of_escape_character", + "22010": "invalid_indicator_parameter_value", + "22023": "invalid_parameter_value", + "2201B": "invalid_regular_expression", + "2201W": "invalid_row_count_in_limit_clause", + "2201X": "invalid_row_count_in_result_offset_clause", + "22009": "invalid_time_zone_displacement_value", + "2200C": "invalid_use_of_escape_character", + "2200G": "most_specific_type_mismatch", + "22004": "null_value_not_allowed", + "22002": "null_value_no_indicator_parameter", + "22003": "numeric_value_out_of_range", + "22026": "string_data_length_mismatch", + "22001": "string_data_right_truncation", + "22011": "substring_error", + "22027": "trim_error", + "22024": "unterminated_c_string", + "2200F": "zero_length_character_string", + "22P01": "floating_point_exception", + "22P02": "invalid_text_representation", + "22P03": "invalid_binary_representation", + "22P04": "bad_copy_file_format", + "22P05": "untranslatable_character", + "2200L": "not_an_xml_document", + "2200M": "invalid_xml_document", + "2200N": "invalid_xml_content", + "2200S": "invalid_xml_comment", + "2200T": "invalid_xml_processing_instruction", + // Class 23 - Integrity Constraint Violation + "23000": "integrity_constraint_violation", + "23001": "restrict_violation", + "23502": "not_null_violation", + "23503": "foreign_key_violation", + "23505": "unique_violation", + "23514": "check_violation", + "23P01": "exclusion_violation", + // Class 24 - Invalid Cursor State + "24000": "invalid_cursor_state", + // Class 25 - Invalid Transaction State + "25000": "invalid_transaction_state", + "25001": "active_sql_transaction", + "25002": "branch_transaction_already_active", + "25008": "held_cursor_requires_same_isolation_level", + "25003": "inappropriate_access_mode_for_branch_transaction", + "25004": "inappropriate_isolation_level_for_branch_transaction", + "25005": "no_active_sql_transaction_for_branch_transaction", + "25006": "read_only_sql_transaction", + "25007": "schema_and_data_statement_mixing_not_supported", + "25P01": "no_active_sql_transaction", + "25P02": "in_failed_sql_transaction", + // Class 26 - Invalid SQL Statement Name + "26000": "invalid_sql_statement_name", + // Class 27 - Triggered Data Change Violation + "27000": "triggered_data_change_violation", + // Class 28 - Invalid Authorization Specification + "28000": "invalid_authorization_specification", + "28P01": "invalid_password", + // Class 2B - Dependent Privilege Descriptors Still Exist + "2B000": "dependent_privilege_descriptors_still_exist", + "2BP01": "dependent_objects_still_exist", + // Class 2D - Invalid Transaction Termination + "2D000": "invalid_transaction_termination", + // Class 2F - SQL Routine Exception + "2F000": "sql_routine_exception", + "2F005": "function_executed_no_return_statement", + "2F002": "modifying_sql_data_not_permitted", + "2F003": "prohibited_sql_statement_attempted", + "2F004": "reading_sql_data_not_permitted", + // Class 34 - Invalid Cursor Name + "34000": "invalid_cursor_name", + // Class 38 - External Routine Exception + "38000": "external_routine_exception", + "38001": "containing_sql_not_permitted", + "38002": "modifying_sql_data_not_permitted", + "38003": "prohibited_sql_statement_attempted", + "38004": "reading_sql_data_not_permitted", + // Class 39 - External Routine Invocation Exception + "39000": "external_routine_invocation_exception", + "39001": "invalid_sqlstate_returned", + "39004": "null_value_not_allowed", + "39P01": "trigger_protocol_violated", + "39P02": "srf_protocol_violated", + // Class 3B - Savepoint Exception + "3B000": "savepoint_exception", + "3B001": "invalid_savepoint_specification", + // Class 3D - Invalid Catalog Name + "3D000": "invalid_catalog_name", + // Class 3F - Invalid Schema Name + "3F000": "invalid_schema_name", + // Class 40 - Transaction Rollback + "40000": "transaction_rollback", + "40002": "transaction_integrity_constraint_violation", + "40001": "serialization_failure", + "40003": "statement_completion_unknown", + "40P01": "deadlock_detected", + // Class 42 - Syntax Error or Access Rule Violation + "42000": "syntax_error_or_access_rule_violation", + "42601": "syntax_error", + "42501": "insufficient_privilege", + "42846": "cannot_coerce", + "42803": "grouping_error", + "42P20": "windowing_error", + "42P19": "invalid_recursion", + "42830": "invalid_foreign_key", + "42602": "invalid_name", + "42622": "name_too_long", + "42939": "reserved_name", + "42804": "datatype_mismatch", + "42P18": "indeterminate_datatype", + "42P21": "collation_mismatch", + "42P22": "indeterminate_collation", + "42809": "wrong_object_type", + "42703": "undefined_column", + "42883": "undefined_function", + "42P01": "undefined_table", + "42P02": "undefined_parameter", + "42704": "undefined_object", + "42701": "duplicate_column", + "42P03": "duplicate_cursor", + "42P04": "duplicate_database", + "42723": "duplicate_function", + "42P05": "duplicate_prepared_statement", + "42P06": "duplicate_schema", + "42P07": "duplicate_table", + "42712": "duplicate_alias", + "42710": "duplicate_object", + "42702": "ambiguous_column", + "42725": "ambiguous_function", + "42P08": "ambiguous_parameter", + "42P09": "ambiguous_alias", + "42P10": "invalid_column_reference", + "42611": "invalid_column_definition", + "42P11": "invalid_cursor_definition", + "42P12": "invalid_database_definition", + "42P13": "invalid_function_definition", + "42P14": "invalid_prepared_statement_definition", + "42P15": "invalid_schema_definition", + "42P16": "invalid_table_definition", + "42P17": "invalid_object_definition", + // Class 44 - WITH CHECK OPTION Violation + "44000": "with_check_option_violation", + // Class 53 - Insufficient Resources + "53000": "insufficient_resources", + "53100": "disk_full", + "53200": "out_of_memory", + "53300": "too_many_connections", + "53400": "configuration_limit_exceeded", + // Class 54 - Program Limit Exceeded + "54000": "program_limit_exceeded", + "54001": "statement_too_complex", + "54011": "too_many_columns", + "54023": "too_many_arguments", + // Class 55 - Object Not In Prerequisite State + "55000": "object_not_in_prerequisite_state", + "55006": "object_in_use", + "55P02": "cant_change_runtime_param", + "55P03": "lock_not_available", + // Class 57 - Operator Intervention + "57000": "operator_intervention", + "57014": "query_canceled", + "57P01": "admin_shutdown", + "57P02": "crash_shutdown", + "57P03": "cannot_connect_now", + "57P04": "database_dropped", + // Class 58 - System Error (errors external to PostgreSQL itself) + "58000": "system_error", + "58030": "io_error", + "58P01": "undefined_file", + "58P02": "duplicate_file", + // Class F0 - Configuration File Error + "F0000": "config_file_error", + "F0001": "lock_file_exists", + // Class HV - Foreign Data Wrapper Error (SQL/MED) + "HV000": "fdw_error", + "HV005": "fdw_column_name_not_found", + "HV002": "fdw_dynamic_parameter_value_needed", + "HV010": "fdw_function_sequence_error", + "HV021": "fdw_inconsistent_descriptor_information", + "HV024": "fdw_invalid_attribute_value", + "HV007": "fdw_invalid_column_name", + "HV008": "fdw_invalid_column_number", + "HV004": "fdw_invalid_data_type", + "HV006": "fdw_invalid_data_type_descriptors", + "HV091": "fdw_invalid_descriptor_field_identifier", + "HV00B": "fdw_invalid_handle", + "HV00C": "fdw_invalid_option_index", + "HV00D": "fdw_invalid_option_name", + "HV090": "fdw_invalid_string_length_or_buffer_length", + "HV00A": "fdw_invalid_string_format", + "HV009": "fdw_invalid_use_of_null_pointer", + "HV014": "fdw_too_many_handles", + "HV001": "fdw_out_of_memory", + "HV00P": "fdw_no_schemas", + "HV00J": "fdw_option_name_not_found", + "HV00K": "fdw_reply_handle", + "HV00Q": "fdw_schema_not_found", + "HV00R": "fdw_table_not_found", + "HV00L": "fdw_unable_to_create_execution", + "HV00M": "fdw_unable_to_create_reply", + "HV00N": "fdw_unable_to_establish_connection", + // Class P0 - PL/pgSQL Error + "P0000": "plpgsql_error", + "P0001": "raise_exception", + "P0002": "no_data_found", + "P0003": "too_many_rows", + // Class XX - Internal Error + "XX000": "internal_error", + "XX001": "data_corrupted", + "XX002": "index_corrupted", +} + +func parseError(r *readBuf) *Error { + err := new(Error) + for t := r.byte(); t != 0; t = r.byte() { + msg := r.string() + switch t { + case 'S': + err.Severity = msg + case 'C': + err.Code = ErrorCode(msg) + case 'M': + err.Message = msg + case 'D': + err.Detail = msg + case 'H': + err.Hint = msg + case 'P': + err.Position = msg + case 'p': + err.InternalPosition = msg + case 'q': + err.InternalQuery = msg + case 'W': + err.Where = msg + case 's': + err.Schema = msg + case 't': + err.Table = msg + case 'c': + err.Column = msg + case 'd': + err.DataTypeName = msg + case 'n': + err.Constraint = msg + case 'F': + err.File = msg + case 'L': + err.Line = msg + case 'R': + err.Routine = msg + } + } + return err +} + +// Fatal returns true if the Error Severity is fatal. +func (err *Error) Fatal() bool { + return err.Severity == Efatal +} + +// Get implements the legacy PGError interface. New code should use the fields +// of the Error struct directly. +func (err *Error) Get(k byte) (v string) { + switch k { + case 'S': + return err.Severity + case 'C': + return string(err.Code) + case 'M': + return err.Message + case 'D': + return err.Detail + case 'H': + return err.Hint + case 'P': + return err.Position + case 'p': + return err.InternalPosition + case 'q': + return err.InternalQuery + case 'W': + return err.Where + case 's': + return err.Schema + case 't': + return err.Table + case 'c': + return err.Column + case 'd': + return err.DataTypeName + case 'n': + return err.Constraint + case 'F': + return err.File + case 'L': + return err.Line + case 'R': + return err.Routine + } + return "" +} + +func (err Error) Error() string { + return "pq: " + err.Message +} + +// PGError is an interface used by previous versions of pq. It is provided +// only to support legacy code. New code should use the Error type. +type PGError interface { + Error() string + Fatal() bool + Get(k byte) (v string) +} + +func errorf(s string, args ...interface{}) { + panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...))) +} + +func errRecoverNoErrBadConn(err *error) { + e := recover() + if e == nil { + // Do nothing + return + } + var ok bool + *err, ok = e.(error) + if !ok { + *err = fmt.Errorf("pq: unexpected error: %#v", e) + } +} + +func (c *conn) errRecover(err *error) { + e := recover() + switch v := e.(type) { + case nil: + // Do nothing + case runtime.Error: + c.bad = true + panic(v) + case *Error: + if v.Fatal() { + *err = driver.ErrBadConn + } else { + *err = v + } + case *net.OpError: + *err = driver.ErrBadConn + case error: + if v == io.EOF || v.(error).Error() == "remote error: handshake failure" { + *err = driver.ErrBadConn + } else { + *err = v + } + + default: + c.bad = true + panic(fmt.Sprintf("unknown error: %#v", e)) + } + + // Any time we return ErrBadConn, we need to remember it since *Tx doesn't + // mark the connection bad in database/sql. + if *err == driver.ErrBadConn { + c.bad = true + } +} diff --git a/services/templeton/vendor/src/github.com/lib/pq/hstore/hstore.go b/services/templeton/vendor/src/github.com/lib/pq/hstore/hstore.go new file mode 100644 index 000000000..72d5abf51 --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/hstore/hstore.go @@ -0,0 +1,118 @@ +package hstore + +import ( + "database/sql" + "database/sql/driver" + "strings" +) + +// A wrapper for transferring Hstore values back and forth easily. +type Hstore struct { + Map map[string]sql.NullString +} + +// escapes and quotes hstore keys/values +// s should be a sql.NullString or string +func hQuote(s interface{}) string { + var str string + switch v := s.(type) { + case sql.NullString: + if !v.Valid { + return "NULL" + } + str = v.String + case string: + str = v + default: + panic("not a string or sql.NullString") + } + + str = strings.Replace(str, "\\", "\\\\", -1) + return `"` + strings.Replace(str, "\"", "\\\"", -1) + `"` +} + +// Scan implements the Scanner interface. +// +// Note h.Map is reallocated before the scan to clear existing values. If the +// hstore column's database value is NULL, then h.Map is set to nil instead. +func (h *Hstore) Scan(value interface{}) error { + if value == nil { + h.Map = nil + return nil + } + h.Map = make(map[string]sql.NullString) + var b byte + pair := [][]byte{{}, {}} + pi := 0 + inQuote := false + didQuote := false + sawSlash := false + bindex := 0 + for bindex, b = range value.([]byte) { + if sawSlash { + pair[pi] = append(pair[pi], b) + sawSlash = false + continue + } + + switch b { + case '\\': + sawSlash = true + continue + case '"': + inQuote = !inQuote + if !didQuote { + didQuote = true + } + continue + default: + if !inQuote { + switch b { + case ' ', '\t', '\n', '\r': + continue + case '=': + continue + case '>': + pi = 1 + didQuote = false + continue + case ',': + s := string(pair[1]) + if !didQuote && len(s) == 4 && strings.ToLower(s) == "null" { + h.Map[string(pair[0])] = sql.NullString{String: "", Valid: false} + } else { + h.Map[string(pair[0])] = sql.NullString{String: string(pair[1]), Valid: true} + } + pair[0] = []byte{} + pair[1] = []byte{} + pi = 0 + continue + } + } + } + pair[pi] = append(pair[pi], b) + } + if bindex > 0 { + s := string(pair[1]) + if !didQuote && len(s) == 4 && strings.ToLower(s) == "null" { + h.Map[string(pair[0])] = sql.NullString{String: "", Valid: false} + } else { + h.Map[string(pair[0])] = sql.NullString{String: string(pair[1]), Valid: true} + } + } + return nil +} + +// Value implements the driver Valuer interface. Note if h.Map is nil, the +// database column value will be set to NULL. +func (h Hstore) Value() (driver.Value, error) { + if h.Map == nil { + return nil, nil + } + parts := []string{} + for key, val := range h.Map { + thispart := hQuote(key) + "=>" + hQuote(val) + parts = append(parts, thispart) + } + return []byte(strings.Join(parts, ",")), nil +} diff --git a/services/templeton/vendor/src/github.com/lib/pq/hstore/hstore_test.go b/services/templeton/vendor/src/github.com/lib/pq/hstore/hstore_test.go new file mode 100644 index 000000000..c9c108fc3 --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/hstore/hstore_test.go @@ -0,0 +1,148 @@ +package hstore + +import ( + "database/sql" + "os" + "testing" + + _ "github.com/lib/pq" +) + +type Fatalistic interface { + Fatal(args ...interface{}) +} + +func openTestConn(t Fatalistic) *sql.DB { + datname := os.Getenv("PGDATABASE") + sslmode := os.Getenv("PGSSLMODE") + + if datname == "" { + os.Setenv("PGDATABASE", "pqgotest") + } + + if sslmode == "" { + os.Setenv("PGSSLMODE", "disable") + } + + conn, err := sql.Open("postgres", "") + if err != nil { + t.Fatal(err) + } + + return conn +} + +func TestHstore(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + // quitely create hstore if it doesn't exist + _, err := db.Exec("CREATE EXTENSION IF NOT EXISTS hstore") + if err != nil { + t.Skipf("Skipping hstore tests - hstore extension create failed: %s", err.Error()) + } + + hs := Hstore{} + + // test for null-valued hstores + err = db.QueryRow("SELECT NULL::hstore").Scan(&hs) + if err != nil { + t.Fatal(err) + } + if hs.Map != nil { + t.Fatalf("expected null map") + } + + err = db.QueryRow("SELECT $1::hstore", hs).Scan(&hs) + if err != nil { + t.Fatalf("re-query null map failed: %s", err.Error()) + } + if hs.Map != nil { + t.Fatalf("expected null map") + } + + // test for empty hstores + err = db.QueryRow("SELECT ''::hstore").Scan(&hs) + if err != nil { + t.Fatal(err) + } + if hs.Map == nil { + t.Fatalf("expected empty map, got null map") + } + if len(hs.Map) != 0 { + t.Fatalf("expected empty map, got len(map)=%d", len(hs.Map)) + } + + err = db.QueryRow("SELECT $1::hstore", hs).Scan(&hs) + if err != nil { + t.Fatalf("re-query empty map failed: %s", err.Error()) + } + if hs.Map == nil { + t.Fatalf("expected empty map, got null map") + } + if len(hs.Map) != 0 { + t.Fatalf("expected empty map, got len(map)=%d", len(hs.Map)) + } + + // a few example maps to test out + hsOnePair := Hstore{ + Map: map[string]sql.NullString{ + "key1": {"value1", true}, + }, + } + + hsThreePairs := Hstore{ + Map: map[string]sql.NullString{ + "key1": {"value1", true}, + "key2": {"value2", true}, + "key3": {"value3", true}, + }, + } + + hsSmorgasbord := Hstore{ + Map: map[string]sql.NullString{ + "nullstring": {"NULL", true}, + "actuallynull": {"", false}, + "NULL": {"NULL string key", true}, + "withbracket": {"value>42", true}, + "withequal": {"value=42", true}, + `"withquotes1"`: {`this "should" be fine`, true}, + `"withquotes"2"`: {`this "should\" also be fine`, true}, + "embedded1": {"value1=>x1", true}, + "embedded2": {`"value2"=>x2`, true}, + "withnewlines": {"\n\nvalue\t=>2", true}, + "<>": {`this, "should,\" also, => be fine`, true}, + }, + } + + // test encoding in query params, then decoding during Scan + testBidirectional := func(h Hstore) { + err = db.QueryRow("SELECT $1::hstore", h).Scan(&hs) + if err != nil { + t.Fatalf("re-query %d-pair map failed: %s", len(h.Map), err.Error()) + } + if hs.Map == nil { + t.Fatalf("expected %d-pair map, got null map", len(h.Map)) + } + if len(hs.Map) != len(h.Map) { + t.Fatalf("expected %d-pair map, got len(map)=%d", len(h.Map), len(hs.Map)) + } + + for key, val := range hs.Map { + otherval, found := h.Map[key] + if !found { + t.Fatalf(" key '%v' not found in %d-pair map", key, len(h.Map)) + } + if otherval.Valid != val.Valid { + t.Fatalf(" value %v <> %v in %d-pair map", otherval, val, len(h.Map)) + } + if otherval.String != val.String { + t.Fatalf(" value '%v' <> '%v' in %d-pair map", otherval.String, val.String, len(h.Map)) + } + } + } + + testBidirectional(hsOnePair) + testBidirectional(hsThreePairs) + testBidirectional(hsSmorgasbord) +} diff --git a/services/templeton/vendor/src/github.com/lib/pq/listen_example/doc.go b/services/templeton/vendor/src/github.com/lib/pq/listen_example/doc.go new file mode 100644 index 000000000..5bc99f5c1 --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/listen_example/doc.go @@ -0,0 +1,102 @@ +/* + +Below you will find a self-contained Go program which uses the LISTEN / NOTIFY +mechanism to avoid polling the database while waiting for more work to arrive. + + // + // You can see the program in action by defining a function similar to + // the following: + // + // CREATE OR REPLACE FUNCTION public.get_work() + // RETURNS bigint + // LANGUAGE sql + // AS $$ + // SELECT CASE WHEN random() >= 0.2 THEN int8 '1' END + // $$ + // ; + + package main + + import ( + "database/sql" + "fmt" + "time" + + "github.com/lib/pq" + ) + + func doWork(db *sql.DB, work int64) { + // work here + } + + func getWork(db *sql.DB) { + for { + // get work from the database here + var work sql.NullInt64 + err := db.QueryRow("SELECT get_work()").Scan(&work) + if err != nil { + fmt.Println("call to get_work() failed: ", err) + time.Sleep(10 * time.Second) + continue + } + if !work.Valid { + // no more work to do + fmt.Println("ran out of work") + return + } + + fmt.Println("starting work on ", work.Int64) + go doWork(db, work.Int64) + } + } + + func waitForNotification(l *pq.Listener) { + for { + select { + case <-l.Notify: + fmt.Println("received notification, new work available") + return + case <-time.After(90 * time.Second): + go func() { + l.Ping() + }() + // Check if there's more work available, just in case it takes + // a while for the Listener to notice connection loss and + // reconnect. + fmt.Println("received no work for 90 seconds, checking for new work") + return + } + } + } + + func main() { + var conninfo string = "" + + db, err := sql.Open("postgres", conninfo) + if err != nil { + panic(err) + } + + reportProblem := func(ev pq.ListenerEventType, err error) { + if err != nil { + fmt.Println(err.Error()) + } + } + + listener := pq.NewListener(conninfo, 10 * time.Second, time.Minute, reportProblem) + err = listener.Listen("getwork") + if err != nil { + panic(err) + } + + fmt.Println("entering main loop") + for { + // process all available work before waiting for notifications + getWork(db) + waitForNotification(listener) + } + } + + +*/ +package listen_example diff --git a/services/templeton/vendor/src/github.com/lib/pq/notify.go b/services/templeton/vendor/src/github.com/lib/pq/notify.go new file mode 100644 index 000000000..8cad57815 --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/notify.go @@ -0,0 +1,766 @@ +package pq + +// Package pq is a pure Go Postgres driver for the database/sql package. +// This module contains support for Postgres LISTEN/NOTIFY. + +import ( + "errors" + "fmt" + "sync" + "sync/atomic" + "time" +) + +// Notification represents a single notification from the database. +type Notification struct { + // Process ID (PID) of the notifying postgres backend. + BePid int + // Name of the channel the notification was sent on. + Channel string + // Payload, or the empty string if unspecified. + Extra string +} + +func recvNotification(r *readBuf) *Notification { + bePid := r.int32() + channel := r.string() + extra := r.string() + + return &Notification{bePid, channel, extra} +} + +const ( + connStateIdle int32 = iota + connStateExpectResponse + connStateExpectReadyForQuery +) + +type message struct { + typ byte + err error +} + +var errListenerConnClosed = errors.New("pq: ListenerConn has been closed") + +// ListenerConn is a low-level interface for waiting for notifications. You +// should use Listener instead. +type ListenerConn struct { + // guards cn and err + connectionLock sync.Mutex + cn *conn + err error + + connState int32 + + // the sending goroutine will be holding this lock + senderLock sync.Mutex + + notificationChan chan<- *Notification + + replyChan chan message +} + +// Creates a new ListenerConn. Use NewListener instead. +func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) { + cn, err := Open(name) + if err != nil { + return nil, err + } + + l := &ListenerConn{ + cn: cn.(*conn), + notificationChan: notificationChan, + connState: connStateIdle, + replyChan: make(chan message, 2), + } + + go l.listenerConnMain() + + return l, nil +} + +// We can only allow one goroutine at a time to be running a query on the +// connection for various reasons, so the goroutine sending on the connection +// must be holding senderLock. +// +// Returns an error if an unrecoverable error has occurred and the ListenerConn +// should be abandoned. +func (l *ListenerConn) acquireSenderLock() error { + // we must acquire senderLock first to avoid deadlocks; see ExecSimpleQuery + l.senderLock.Lock() + + l.connectionLock.Lock() + err := l.err + l.connectionLock.Unlock() + if err != nil { + l.senderLock.Unlock() + return err + } + return nil +} + +func (l *ListenerConn) releaseSenderLock() { + l.senderLock.Unlock() +} + +// setState advances the protocol state to newState. Returns false if moving +// to that state from the current state is not allowed. +func (l *ListenerConn) setState(newState int32) bool { + var expectedState int32 + + switch newState { + case connStateIdle: + expectedState = connStateExpectReadyForQuery + case connStateExpectResponse: + expectedState = connStateIdle + case connStateExpectReadyForQuery: + expectedState = connStateExpectResponse + default: + panic(fmt.Sprintf("unexpected listenerConnState %d", newState)) + } + + return atomic.CompareAndSwapInt32(&l.connState, expectedState, newState) +} + +// Main logic is here: receive messages from the postgres backend, forward +// notifications and query replies and keep the internal state in sync with the +// protocol state. Returns when the connection has been lost, is about to go +// away or should be discarded because we couldn't agree on the state with the +// server backend. +func (l *ListenerConn) listenerConnLoop() (err error) { + defer errRecoverNoErrBadConn(&err) + + r := &readBuf{} + for { + t, err := l.cn.recvMessage(r) + if err != nil { + return err + } + + switch t { + case 'A': + // recvNotification copies all the data so we don't need to worry + // about the scratch buffer being overwritten. + l.notificationChan <- recvNotification(r) + + case 'T', 'D': + // only used by tests; ignore + + case 'E': + // We might receive an ErrorResponse even when not in a query; it + // is expected that the server will close the connection after + // that, but we should make sure that the error we display is the + // one from the stray ErrorResponse, not io.ErrUnexpectedEOF. + if !l.setState(connStateExpectReadyForQuery) { + return parseError(r) + } + l.replyChan <- message{t, parseError(r)} + + case 'C', 'I': + if !l.setState(connStateExpectReadyForQuery) { + // protocol out of sync + return fmt.Errorf("unexpected CommandComplete") + } + // ExecSimpleQuery doesn't need to know about this message + + case 'Z': + if !l.setState(connStateIdle) { + // protocol out of sync + return fmt.Errorf("unexpected ReadyForQuery") + } + l.replyChan <- message{t, nil} + + case 'N', 'S': + // ignore + default: + return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t) + } + } +} + +// This is the main routine for the goroutine receiving on the database +// connection. Most of the main logic is in listenerConnLoop. +func (l *ListenerConn) listenerConnMain() { + err := l.listenerConnLoop() + + // listenerConnLoop terminated; we're done, but we still have to clean up. + // Make sure nobody tries to start any new queries by making sure the err + // pointer is set. It is important that we do not overwrite its value; a + // connection could be closed by either this goroutine or one sending on + // the connection -- whoever closes the connection is assumed to have the + // more meaningful error message (as the other one will probably get + // net.errClosed), so that goroutine sets the error we expose while the + // other error is discarded. If the connection is lost while two + // goroutines are operating on the socket, it probably doesn't matter which + // error we expose so we don't try to do anything more complex. + l.connectionLock.Lock() + if l.err == nil { + l.err = err + } + l.cn.Close() + l.connectionLock.Unlock() + + // There might be a query in-flight; make sure nobody's waiting for a + // response to it, since there's not going to be one. + close(l.replyChan) + + // let the listener know we're done + close(l.notificationChan) + + // this ListenerConn is done +} + +// Send a LISTEN query to the server. See ExecSimpleQuery. +func (l *ListenerConn) Listen(channel string) (bool, error) { + return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel)) +} + +// Send an UNLISTEN query to the server. See ExecSimpleQuery. +func (l *ListenerConn) Unlisten(channel string) (bool, error) { + return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel)) +} + +// Send `UNLISTEN *` to the server. See ExecSimpleQuery. +func (l *ListenerConn) UnlistenAll() (bool, error) { + return l.ExecSimpleQuery("UNLISTEN *") +} + +// Ping the remote server to make sure it's alive. Non-nil error means the +// connection has failed and should be abandoned. +func (l *ListenerConn) Ping() error { + sent, err := l.ExecSimpleQuery("") + if !sent { + return err + } + if err != nil { + // shouldn't happen + panic(err) + } + return nil +} + +// Attempt to send a query on the connection. Returns an error if sending the +// query failed, and the caller should initiate closure of this connection. +// The caller must be holding senderLock (see acquireSenderLock and +// releaseSenderLock). +func (l *ListenerConn) sendSimpleQuery(q string) (err error) { + defer errRecoverNoErrBadConn(&err) + + // must set connection state before sending the query + if !l.setState(connStateExpectResponse) { + panic("two queries running at the same time") + } + + // Can't use l.cn.writeBuf here because it uses the scratch buffer which + // might get overwritten by listenerConnLoop. + b := &writeBuf{ + buf: []byte("Q\x00\x00\x00\x00"), + pos: 1, + } + b.string(q) + l.cn.send(b) + + return nil +} + +// Execute a "simple query" (i.e. one with no bindable parameters) on the +// connection. The possible return values are: +// 1) "executed" is true; the query was executed to completion on the +// database server. If the query failed, err will be set to the error +// returned by the database, otherwise err will be nil. +// 2) If "executed" is false, the query could not be executed on the remote +// server. err will be non-nil. +// +// After a call to ExecSimpleQuery has returned an executed=false value, the +// connection has either been closed or will be closed shortly thereafter, and +// all subsequently executed queries will return an error. +func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) { + if err = l.acquireSenderLock(); err != nil { + return false, err + } + defer l.releaseSenderLock() + + err = l.sendSimpleQuery(q) + if err != nil { + // We can't know what state the protocol is in, so we need to abandon + // this connection. + l.connectionLock.Lock() + // Set the error pointer if it hasn't been set already; see + // listenerConnMain. + if l.err == nil { + l.err = err + } + l.connectionLock.Unlock() + l.cn.c.Close() + return false, err + } + + // now we just wait for a reply.. + for { + m, ok := <-l.replyChan + if !ok { + // We lost the connection to server, don't bother waiting for a + // a response. err should have been set already. + l.connectionLock.Lock() + err := l.err + l.connectionLock.Unlock() + return false, err + } + switch m.typ { + case 'Z': + // sanity check + if m.err != nil { + panic("m.err != nil") + } + // done; err might or might not be set + return true, err + + case 'E': + // sanity check + if m.err == nil { + panic("m.err == nil") + } + // server responded with an error; ReadyForQuery to follow + err = m.err + + default: + return false, fmt.Errorf("unknown response for simple query: %q", m.typ) + } + } +} + +func (l *ListenerConn) Close() error { + l.connectionLock.Lock() + if l.err != nil { + l.connectionLock.Unlock() + return errListenerConnClosed + } + l.err = errListenerConnClosed + l.connectionLock.Unlock() + // We can't send anything on the connection without holding senderLock. + // Simply close the net.Conn to wake up everyone operating on it. + return l.cn.c.Close() +} + +// Err() returns the reason the connection was closed. It is not safe to call +// this function until l.Notify has been closed. +func (l *ListenerConn) Err() error { + return l.err +} + +var errListenerClosed = errors.New("pq: Listener has been closed") + +var ErrChannelAlreadyOpen = errors.New("pq: channel is already open") +var ErrChannelNotOpen = errors.New("pq: channel is not open") + +type ListenerEventType int + +const ( + // Emitted only when the database connection has been initially + // initialized. err will always be nil. + ListenerEventConnected ListenerEventType = iota + + // Emitted after a database connection has been lost, either because of an + // error or because Close has been called. err will be set to the reason + // the database connection was lost. + ListenerEventDisconnected + + // Emitted after a database connection has been re-established after + // connection loss. err will always be nil. After this event has been + // emitted, a nil pq.Notification is sent on the Listener.Notify channel. + ListenerEventReconnected + + // Emitted after a connection to the database was attempted, but failed. + // err will be set to an error describing why the connection attempt did + // not succeed. + ListenerEventConnectionAttemptFailed +) + +type EventCallbackType func(event ListenerEventType, err error) + +// Listener provides an interface for listening to notifications from a +// PostgreSQL database. For general usage information, see section +// "Notifications". +// +// Listener can safely be used from concurrently running goroutines. +type Listener struct { + // Channel for receiving notifications from the database. In some cases a + // nil value will be sent. See section "Notifications" above. + Notify chan *Notification + + name string + minReconnectInterval time.Duration + maxReconnectInterval time.Duration + eventCallback EventCallbackType + + lock sync.Mutex + isClosed bool + reconnectCond *sync.Cond + cn *ListenerConn + connNotificationChan <-chan *Notification + channels map[string]struct{} +} + +// NewListener creates a new database connection dedicated to LISTEN / NOTIFY. +// +// name should be set to a connection string to be used to establish the +// database connection (see section "Connection String Parameters" above). +// +// minReconnectInterval controls the duration to wait before trying to +// re-establish the database connection after connection loss. After each +// consecutive failure this interval is doubled, until maxReconnectInterval is +// reached. Successfully completing the connection establishment procedure +// resets the interval back to minReconnectInterval. +// +// The last parameter eventCallback can be set to a function which will be +// called by the Listener when the state of the underlying database connection +// changes. This callback will be called by the goroutine which dispatches the +// notifications over the Notify channel, so you should try to avoid doing +// potentially time-consuming operations from the callback. +func NewListener(name string, + minReconnectInterval time.Duration, + maxReconnectInterval time.Duration, + eventCallback EventCallbackType) *Listener { + l := &Listener{ + name: name, + minReconnectInterval: minReconnectInterval, + maxReconnectInterval: maxReconnectInterval, + eventCallback: eventCallback, + + channels: make(map[string]struct{}), + + Notify: make(chan *Notification, 32), + } + l.reconnectCond = sync.NewCond(&l.lock) + + go l.listenerMain() + + return l +} + +// Returns the notification channel for this listener. This is the same +// channel as Notify, and will not be recreated during the life time of the +// Listener. +func (l *Listener) NotificationChannel() <-chan *Notification { + return l.Notify +} + +// Listen starts listening for notifications on a channel. Calls to this +// function will block until an acknowledgement has been received from the +// server. Note that Listener automatically re-establishes the connection +// after connection loss, so this function may block indefinitely if the +// connection can not be re-established. +// +// Listen will only fail in three conditions: +// 1) The channel is already open. The returned error will be +// ErrChannelAlreadyOpen. +// 2) The query was executed on the remote server, but PostgreSQL returned an +// error message in response to the query. The returned error will be a +// pq.Error containing the information the server supplied. +// 3) Close is called on the Listener before the request could be completed. +// +// The channel name is case-sensitive. +func (l *Listener) Listen(channel string) error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + // The server allows you to issue a LISTEN on a channel which is already + // open, but it seems useful to be able to detect this case to spot for + // mistakes in application logic. If the application genuinely does't + // care, it can check the exported error and ignore it. + _, exists := l.channels[channel] + if exists { + return ErrChannelAlreadyOpen + } + + if l.cn != nil { + // If gotResponse is true but error is set, the query was executed on + // the remote server, but resulted in an error. This should be + // relatively rare, so it's fine if we just pass the error to our + // caller. However, if gotResponse is false, we could not complete the + // query on the remote server and our underlying connection is about + // to go away, so we only add relname to l.channels, and wait for + // resync() to take care of the rest. + gotResponse, err := l.cn.Listen(channel) + if gotResponse && err != nil { + return err + } + } + + l.channels[channel] = struct{}{} + for l.cn == nil { + l.reconnectCond.Wait() + // we let go of the mutex for a while + if l.isClosed { + return errListenerClosed + } + } + + return nil +} + +// Unlisten removes a channel from the Listener's channel list. Returns +// ErrChannelNotOpen if the Listener is not listening on the specified channel. +// Returns immediately with no error if there is no connection. Note that you +// might still get notifications for this channel even after Unlisten has +// returned. +// +// The channel name is case-sensitive. +func (l *Listener) Unlisten(channel string) error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + // Similarly to LISTEN, this is not an error in Postgres, but it seems + // useful to distinguish from the normal conditions. + _, exists := l.channels[channel] + if !exists { + return ErrChannelNotOpen + } + + if l.cn != nil { + // Similarly to Listen (see comment in that function), the caller + // should only be bothered with an error if it came from the backend as + // a response to our query. + gotResponse, err := l.cn.Unlisten(channel) + if gotResponse && err != nil { + return err + } + } + + // Don't bother waiting for resync if there's no connection. + delete(l.channels, channel) + return nil +} + +// UnlistenAll removes all channels from the Listener's channel list. Returns +// immediately with no error if there is no connection. Note that you might +// still get notifications for any of the deleted channels even after +// UnlistenAll has returned. +func (l *Listener) UnlistenAll() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + if l.cn != nil { + // Similarly to Listen (see comment in that function), the caller + // should only be bothered with an error if it came from the backend as + // a response to our query. + gotResponse, err := l.cn.UnlistenAll() + if gotResponse && err != nil { + return err + } + } + + // Don't bother waiting for resync if there's no connection. + l.channels = make(map[string]struct{}) + return nil +} + +// Ping the remote server to make sure it's alive. Non-nil return value means +// that there is no active connection. +func (l *Listener) Ping() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + if l.cn == nil { + return errors.New("no connection") + } + + return l.cn.Ping() +} + +// Clean up after losing the server connection. Returns l.cn.Err(), which +// should have the reason the connection was lost. +func (l *Listener) disconnectCleanup() error { + l.lock.Lock() + defer l.lock.Unlock() + + // sanity check; can't look at Err() until the channel has been closed + select { + case _, ok := <-l.connNotificationChan: + if ok { + panic("connNotificationChan not closed") + } + default: + panic("connNotificationChan not closed") + } + + err := l.cn.Err() + l.cn.Close() + l.cn = nil + return err +} + +// Synchronize the list of channels we want to be listening on with the server +// after the connection has been established. +func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notification) error { + doneChan := make(chan error) + go func() { + for channel := range l.channels { + // If we got a response, return that error to our caller as it's + // going to be more descriptive than cn.Err(). + gotResponse, err := cn.Listen(channel) + if gotResponse && err != nil { + doneChan <- err + return + } + + // If we couldn't reach the server, wait for notificationChan to + // close and then return the error message from the connection, as + // per ListenerConn's interface. + if err != nil { + for _ = range notificationChan { + } + doneChan <- cn.Err() + return + } + } + doneChan <- nil + }() + + // Ignore notifications while synchronization is going on to avoid + // deadlocks. We have to send a nil notification over Notify anyway as + // we can't possibly know which notifications (if any) were lost while + // the connection was down, so there's no reason to try and process + // these messages at all. + for { + select { + case _, ok := <-notificationChan: + if !ok { + notificationChan = nil + } + + case err := <-doneChan: + return err + } + } +} + +// caller should NOT be holding l.lock +func (l *Listener) closed() bool { + l.lock.Lock() + defer l.lock.Unlock() + + return l.isClosed +} + +func (l *Listener) connect() error { + notificationChan := make(chan *Notification, 32) + cn, err := NewListenerConn(l.name, notificationChan) + if err != nil { + return err + } + + l.lock.Lock() + defer l.lock.Unlock() + + err = l.resync(cn, notificationChan) + if err != nil { + cn.Close() + return err + } + + l.cn = cn + l.connNotificationChan = notificationChan + l.reconnectCond.Broadcast() + + return nil +} + +// Close disconnects the Listener from the database and shuts it down. +// Subsequent calls to its methods will return an error. Close returns an +// error if the connection has already been closed. +func (l *Listener) Close() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + if l.cn != nil { + l.cn.Close() + } + l.isClosed = true + + return nil +} + +func (l *Listener) emitEvent(event ListenerEventType, err error) { + if l.eventCallback != nil { + l.eventCallback(event, err) + } +} + +// Main logic here: maintain a connection to the server when possible, wait +// for notifications and emit events. +func (l *Listener) listenerConnLoop() { + var nextReconnect time.Time + + reconnectInterval := l.minReconnectInterval + for { + for { + err := l.connect() + if err == nil { + break + } + + if l.closed() { + return + } + l.emitEvent(ListenerEventConnectionAttemptFailed, err) + + time.Sleep(reconnectInterval) + reconnectInterval *= 2 + if reconnectInterval > l.maxReconnectInterval { + reconnectInterval = l.maxReconnectInterval + } + } + + if nextReconnect.IsZero() { + l.emitEvent(ListenerEventConnected, nil) + } else { + l.emitEvent(ListenerEventReconnected, nil) + l.Notify <- nil + } + + reconnectInterval = l.minReconnectInterval + nextReconnect = time.Now().Add(reconnectInterval) + + for { + notification, ok := <-l.connNotificationChan + if !ok { + // lost connection, loop again + break + } + l.Notify <- notification + } + + err := l.disconnectCleanup() + if l.closed() { + return + } + l.emitEvent(ListenerEventDisconnected, err) + + time.Sleep(nextReconnect.Sub(time.Now())) + } +} + +func (l *Listener) listenerMain() { + l.listenerConnLoop() + close(l.Notify) +} diff --git a/services/templeton/vendor/src/github.com/lib/pq/notify_test.go b/services/templeton/vendor/src/github.com/lib/pq/notify_test.go new file mode 100644 index 000000000..fe8941a4e --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/notify_test.go @@ -0,0 +1,574 @@ +package pq + +import ( + "errors" + "fmt" + "io" + "os" + "runtime" + "sync" + "sync/atomic" + "testing" + "time" +) + +var errNilNotification = errors.New("nil notification") + +func expectNotification(t *testing.T, ch <-chan *Notification, relname string, extra string) error { + select { + case n := <-ch: + if n == nil { + return errNilNotification + } + if n.Channel != relname || n.Extra != extra { + return fmt.Errorf("unexpected notification %v", n) + } + return nil + case <-time.After(1500 * time.Millisecond): + return fmt.Errorf("timeout") + } +} + +func expectNoNotification(t *testing.T, ch <-chan *Notification) error { + select { + case n := <-ch: + return fmt.Errorf("unexpected notification %v", n) + case <-time.After(100 * time.Millisecond): + return nil + } +} + +func expectEvent(t *testing.T, eventch <-chan ListenerEventType, et ListenerEventType) error { + select { + case e := <-eventch: + if e != et { + return fmt.Errorf("unexpected event %v", e) + } + return nil + case <-time.After(1500 * time.Millisecond): + panic("expectEvent timeout") + } +} + +func expectNoEvent(t *testing.T, eventch <-chan ListenerEventType) error { + select { + case e := <-eventch: + return fmt.Errorf("unexpected event %v", e) + case <-time.After(100 * time.Millisecond): + return nil + } +} + +func newTestListenerConn(t *testing.T) (*ListenerConn, <-chan *Notification) { + datname := os.Getenv("PGDATABASE") + sslmode := os.Getenv("PGSSLMODE") + + if datname == "" { + os.Setenv("PGDATABASE", "pqgotest") + } + + if sslmode == "" { + os.Setenv("PGSSLMODE", "disable") + } + + notificationChan := make(chan *Notification) + l, err := NewListenerConn("", notificationChan) + if err != nil { + t.Fatal(err) + } + + return l, notificationChan +} + +func TestNewListenerConn(t *testing.T) { + l, _ := newTestListenerConn(t) + + defer l.Close() +} + +func TestConnListen(t *testing.T) { + l, channel := newTestListenerConn(t) + + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + ok, err := l.Listen("notify_test") + if !ok || err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_test") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, channel, "notify_test", "") + if err != nil { + t.Fatal(err) + } +} + +func TestConnUnlisten(t *testing.T) { + l, channel := newTestListenerConn(t) + + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + ok, err := l.Listen("notify_test") + if !ok || err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_test") + + err = expectNotification(t, channel, "notify_test", "") + if err != nil { + t.Fatal(err) + } + + ok, err = l.Unlisten("notify_test") + if !ok || err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_test") + if err != nil { + t.Fatal(err) + } + + err = expectNoNotification(t, channel) + if err != nil { + t.Fatal(err) + } +} + +func TestConnUnlistenAll(t *testing.T) { + l, channel := newTestListenerConn(t) + + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + ok, err := l.Listen("notify_test") + if !ok || err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_test") + + err = expectNotification(t, channel, "notify_test", "") + if err != nil { + t.Fatal(err) + } + + ok, err = l.UnlistenAll() + if !ok || err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_test") + if err != nil { + t.Fatal(err) + } + + err = expectNoNotification(t, channel) + if err != nil { + t.Fatal(err) + } +} + +func TestConnClose(t *testing.T) { + l, _ := newTestListenerConn(t) + defer l.Close() + + err := l.Close() + if err != nil { + t.Fatal(err) + } + err = l.Close() + if err != errListenerConnClosed { + t.Fatalf("expected errListenerConnClosed; got %v", err) + } +} + +func TestConnPing(t *testing.T) { + l, _ := newTestListenerConn(t) + defer l.Close() + err := l.Ping() + if err != nil { + t.Fatal(err) + } + err = l.Close() + if err != nil { + t.Fatal(err) + } + err = l.Ping() + if err != errListenerConnClosed { + t.Fatalf("expected errListenerConnClosed; got %v", err) + } +} + +// Test for deadlock where a query fails while another one is queued +func TestConnExecDeadlock(t *testing.T) { + l, _ := newTestListenerConn(t) + defer l.Close() + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + l.ExecSimpleQuery("SELECT pg_sleep(60)") + wg.Done() + }() + runtime.Gosched() + go func() { + l.ExecSimpleQuery("SELECT 1") + wg.Done() + }() + // give the two goroutines some time to get into position + runtime.Gosched() + // calls Close on the net.Conn; equivalent to a network failure + l.Close() + + var done int32 = 0 + go func() { + time.Sleep(10 * time.Second) + if atomic.LoadInt32(&done) != 1 { + panic("timed out") + } + }() + wg.Wait() + atomic.StoreInt32(&done, 1) +} + +// Test for ListenerConn being closed while a slow query is executing +func TestListenerConnCloseWhileQueryIsExecuting(t *testing.T) { + l, _ := newTestListenerConn(t) + defer l.Close() + + var wg sync.WaitGroup + wg.Add(1) + + go func() { + sent, err := l.ExecSimpleQuery("SELECT pg_sleep(60)") + if sent { + panic("expected sent=false") + } + // could be any of a number of errors + if err == nil { + panic("expected error") + } + wg.Done() + }() + // give the above goroutine some time to get into position + runtime.Gosched() + err := l.Close() + if err != nil { + t.Fatal(err) + } + var done int32 = 0 + go func() { + time.Sleep(10 * time.Second) + if atomic.LoadInt32(&done) != 1 { + panic("timed out") + } + }() + wg.Wait() + atomic.StoreInt32(&done, 1) +} + +func TestNotifyExtra(t *testing.T) { + db := openTestConn(t) + defer db.Close() + + if getServerVersion(t, db) < 90000 { + t.Skip("skipping NOTIFY payload test since the server does not appear to support it") + } + + l, channel := newTestListenerConn(t) + defer l.Close() + + ok, err := l.Listen("notify_test") + if !ok || err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_test, 'something'") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, channel, "notify_test", "something") + if err != nil { + t.Fatal(err) + } +} + +// create a new test listener and also set the timeouts +func newTestListenerTimeout(t *testing.T, min time.Duration, max time.Duration) (*Listener, <-chan ListenerEventType) { + datname := os.Getenv("PGDATABASE") + sslmode := os.Getenv("PGSSLMODE") + + if datname == "" { + os.Setenv("PGDATABASE", "pqgotest") + } + + if sslmode == "" { + os.Setenv("PGSSLMODE", "disable") + } + + eventch := make(chan ListenerEventType, 16) + l := NewListener("", min, max, func(t ListenerEventType, err error) { eventch <- t }) + err := expectEvent(t, eventch, ListenerEventConnected) + if err != nil { + t.Fatal(err) + } + return l, eventch +} + +func newTestListener(t *testing.T) (*Listener, <-chan ListenerEventType) { + return newTestListenerTimeout(t, time.Hour, time.Hour) +} + +func TestListenerListen(t *testing.T) { + l, _ := newTestListener(t) + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + err := l.Listen("notify_listen_test") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } +} + +func TestListenerUnlisten(t *testing.T) { + l, _ := newTestListener(t) + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + err := l.Listen("notify_listen_test") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = l.Unlisten("notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNoNotification(t, l.Notify) + if err != nil { + t.Fatal(err) + } +} + +func TestListenerUnlistenAll(t *testing.T) { + l, _ := newTestListener(t) + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + err := l.Listen("notify_listen_test") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = l.UnlistenAll() + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNoNotification(t, l.Notify) + if err != nil { + t.Fatal(err) + } +} + +func TestListenerFailedQuery(t *testing.T) { + l, eventch := newTestListener(t) + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + err := l.Listen("notify_listen_test") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } + + // shouldn't cause a disconnect + ok, err := l.cn.ExecSimpleQuery("SELECT error") + if !ok { + t.Fatalf("could not send query to server: %v", err) + } + _, ok = err.(PGError) + if !ok { + t.Fatalf("unexpected error %v", err) + } + err = expectNoEvent(t, eventch) + if err != nil { + t.Fatal(err) + } + + // should still work + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } +} + +func TestListenerReconnect(t *testing.T) { + l, eventch := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour) + defer l.Close() + + db := openTestConn(t) + defer db.Close() + + err := l.Listen("notify_listen_test") + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } + + // kill the connection and make sure it comes back up + ok, err := l.cn.ExecSimpleQuery("SELECT pg_terminate_backend(pg_backend_pid())") + if ok { + t.Fatalf("could not kill the connection: %v", err) + } + if err != io.EOF { + t.Fatalf("unexpected error %v", err) + } + err = expectEvent(t, eventch, ListenerEventDisconnected) + if err != nil { + t.Fatal(err) + } + err = expectEvent(t, eventch, ListenerEventReconnected) + if err != nil { + t.Fatal(err) + } + + // should still work + _, err = db.Exec("NOTIFY notify_listen_test") + if err != nil { + t.Fatal(err) + } + + // should get nil after Reconnected + err = expectNotification(t, l.Notify, "", "") + if err != errNilNotification { + t.Fatal(err) + } + + err = expectNotification(t, l.Notify, "notify_listen_test", "") + if err != nil { + t.Fatal(err) + } +} + +func TestListenerClose(t *testing.T) { + l, _ := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour) + defer l.Close() + + err := l.Close() + if err != nil { + t.Fatal(err) + } + err = l.Close() + if err != errListenerClosed { + t.Fatalf("expected errListenerClosed; got %v", err) + } +} + +func TestListenerPing(t *testing.T) { + l, _ := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour) + defer l.Close() + + err := l.Ping() + if err != nil { + t.Fatal(err) + } + + err = l.Close() + if err != nil { + t.Fatal(err) + } + + err = l.Ping() + if err != errListenerClosed { + t.Fatalf("expected errListenerClosed; got %v", err) + } +} diff --git a/services/templeton/vendor/src/github.com/lib/pq/oid/doc.go b/services/templeton/vendor/src/github.com/lib/pq/oid/doc.go new file mode 100644 index 000000000..caaede248 --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/oid/doc.go @@ -0,0 +1,6 @@ +// Package oid contains OID constants +// as defined by the Postgres server. +package oid + +// Oid is a Postgres Object ID. +type Oid uint32 diff --git a/services/templeton/vendor/src/github.com/lib/pq/oid/gen.go b/services/templeton/vendor/src/github.com/lib/pq/oid/gen.go new file mode 100644 index 000000000..cd4aea808 --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/oid/gen.go @@ -0,0 +1,74 @@ +// +build ignore + +// Generate the table of OID values +// Run with 'go run gen.go'. +package main + +import ( + "database/sql" + "fmt" + "log" + "os" + "os/exec" + + _ "github.com/lib/pq" +) + +func main() { + datname := os.Getenv("PGDATABASE") + sslmode := os.Getenv("PGSSLMODE") + + if datname == "" { + os.Setenv("PGDATABASE", "pqgotest") + } + + if sslmode == "" { + os.Setenv("PGSSLMODE", "disable") + } + + db, err := sql.Open("postgres", "") + if err != nil { + log.Fatal(err) + } + cmd := exec.Command("gofmt") + cmd.Stderr = os.Stderr + w, err := cmd.StdinPipe() + if err != nil { + log.Fatal(err) + } + f, err := os.Create("types.go") + if err != nil { + log.Fatal(err) + } + cmd.Stdout = f + err = cmd.Start() + if err != nil { + log.Fatal(err) + } + fmt.Fprintln(w, "// generated by 'go run gen.go'; do not edit") + fmt.Fprintln(w, "\npackage oid") + fmt.Fprintln(w, "const (") + rows, err := db.Query(` + SELECT typname, oid + FROM pg_type WHERE oid < 10000 + ORDER BY oid; + `) + if err != nil { + log.Fatal(err) + } + var name string + var oid int + for rows.Next() { + err = rows.Scan(&name, &oid) + if err != nil { + log.Fatal(err) + } + fmt.Fprintf(w, "T_%s Oid = %d\n", name, oid) + } + if err = rows.Err(); err != nil { + log.Fatal(err) + } + fmt.Fprintln(w, ")") + w.Close() + cmd.Wait() +} diff --git a/services/templeton/vendor/src/github.com/lib/pq/oid/types.go b/services/templeton/vendor/src/github.com/lib/pq/oid/types.go new file mode 100644 index 000000000..03df05a61 --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/oid/types.go @@ -0,0 +1,161 @@ +// generated by 'go run gen.go'; do not edit + +package oid + +const ( + T_bool Oid = 16 + T_bytea Oid = 17 + T_char Oid = 18 + T_name Oid = 19 + T_int8 Oid = 20 + T_int2 Oid = 21 + T_int2vector Oid = 22 + T_int4 Oid = 23 + T_regproc Oid = 24 + T_text Oid = 25 + T_oid Oid = 26 + T_tid Oid = 27 + T_xid Oid = 28 + T_cid Oid = 29 + T_oidvector Oid = 30 + T_pg_type Oid = 71 + T_pg_attribute Oid = 75 + T_pg_proc Oid = 81 + T_pg_class Oid = 83 + T_json Oid = 114 + T_xml Oid = 142 + T__xml Oid = 143 + T_pg_node_tree Oid = 194 + T__json Oid = 199 + T_smgr Oid = 210 + T_point Oid = 600 + T_lseg Oid = 601 + T_path Oid = 602 + T_box Oid = 603 + T_polygon Oid = 604 + T_line Oid = 628 + T__line Oid = 629 + T_cidr Oid = 650 + T__cidr Oid = 651 + T_float4 Oid = 700 + T_float8 Oid = 701 + T_abstime Oid = 702 + T_reltime Oid = 703 + T_tinterval Oid = 704 + T_unknown Oid = 705 + T_circle Oid = 718 + T__circle Oid = 719 + T_money Oid = 790 + T__money Oid = 791 + T_macaddr Oid = 829 + T_inet Oid = 869 + T__bool Oid = 1000 + T__bytea Oid = 1001 + T__char Oid = 1002 + T__name Oid = 1003 + T__int2 Oid = 1005 + T__int2vector Oid = 1006 + T__int4 Oid = 1007 + T__regproc Oid = 1008 + T__text Oid = 1009 + T__tid Oid = 1010 + T__xid Oid = 1011 + T__cid Oid = 1012 + T__oidvector Oid = 1013 + T__bpchar Oid = 1014 + T__varchar Oid = 1015 + T__int8 Oid = 1016 + T__point Oid = 1017 + T__lseg Oid = 1018 + T__path Oid = 1019 + T__box Oid = 1020 + T__float4 Oid = 1021 + T__float8 Oid = 1022 + T__abstime Oid = 1023 + T__reltime Oid = 1024 + T__tinterval Oid = 1025 + T__polygon Oid = 1027 + T__oid Oid = 1028 + T_aclitem Oid = 1033 + T__aclitem Oid = 1034 + T__macaddr Oid = 1040 + T__inet Oid = 1041 + T_bpchar Oid = 1042 + T_varchar Oid = 1043 + T_date Oid = 1082 + T_time Oid = 1083 + T_timestamp Oid = 1114 + T__timestamp Oid = 1115 + T__date Oid = 1182 + T__time Oid = 1183 + T_timestamptz Oid = 1184 + T__timestamptz Oid = 1185 + T_interval Oid = 1186 + T__interval Oid = 1187 + T__numeric Oid = 1231 + T_pg_database Oid = 1248 + T__cstring Oid = 1263 + T_timetz Oid = 1266 + T__timetz Oid = 1270 + T_bit Oid = 1560 + T__bit Oid = 1561 + T_varbit Oid = 1562 + T__varbit Oid = 1563 + T_numeric Oid = 1700 + T_refcursor Oid = 1790 + T__refcursor Oid = 2201 + T_regprocedure Oid = 2202 + T_regoper Oid = 2203 + T_regoperator Oid = 2204 + T_regclass Oid = 2205 + T_regtype Oid = 2206 + T__regprocedure Oid = 2207 + T__regoper Oid = 2208 + T__regoperator Oid = 2209 + T__regclass Oid = 2210 + T__regtype Oid = 2211 + T_record Oid = 2249 + T_cstring Oid = 2275 + T_any Oid = 2276 + T_anyarray Oid = 2277 + T_void Oid = 2278 + T_trigger Oid = 2279 + T_language_handler Oid = 2280 + T_internal Oid = 2281 + T_opaque Oid = 2282 + T_anyelement Oid = 2283 + T__record Oid = 2287 + T_anynonarray Oid = 2776 + T_pg_authid Oid = 2842 + T_pg_auth_members Oid = 2843 + T__txid_snapshot Oid = 2949 + T_uuid Oid = 2950 + T__uuid Oid = 2951 + T_txid_snapshot Oid = 2970 + T_fdw_handler Oid = 3115 + T_anyenum Oid = 3500 + T_tsvector Oid = 3614 + T_tsquery Oid = 3615 + T_gtsvector Oid = 3642 + T__tsvector Oid = 3643 + T__gtsvector Oid = 3644 + T__tsquery Oid = 3645 + T_regconfig Oid = 3734 + T__regconfig Oid = 3735 + T_regdictionary Oid = 3769 + T__regdictionary Oid = 3770 + T_anyrange Oid = 3831 + T_event_trigger Oid = 3838 + T_int4range Oid = 3904 + T__int4range Oid = 3905 + T_numrange Oid = 3906 + T__numrange Oid = 3907 + T_tsrange Oid = 3908 + T__tsrange Oid = 3909 + T_tstzrange Oid = 3910 + T__tstzrange Oid = 3911 + T_daterange Oid = 3912 + T__daterange Oid = 3913 + T_int8range Oid = 3926 + T__int8range Oid = 3927 +) diff --git a/services/templeton/vendor/src/github.com/lib/pq/ssl_test.go b/services/templeton/vendor/src/github.com/lib/pq/ssl_test.go new file mode 100644 index 000000000..932b336f5 --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/ssl_test.go @@ -0,0 +1,226 @@ +package pq + +// This file contains SSL tests + +import ( + _ "crypto/sha256" + "crypto/x509" + "database/sql" + "fmt" + "os" + "path/filepath" + "testing" +) + +func maybeSkipSSLTests(t *testing.T) { + // Require some special variables for testing certificates + if os.Getenv("PQSSLCERTTEST_PATH") == "" { + t.Skip("PQSSLCERTTEST_PATH not set, skipping SSL tests") + } + + value := os.Getenv("PQGOSSLTESTS") + if value == "" || value == "0" { + t.Skip("PQGOSSLTESTS not enabled, skipping SSL tests") + } else if value != "1" { + t.Fatalf("unexpected value %q for PQGOSSLTESTS", value) + } +} + +func openSSLConn(t *testing.T, conninfo string) (*sql.DB, error) { + db, err := openTestConnConninfo(conninfo) + if err != nil { + // should never fail + t.Fatal(err) + } + // Do something with the connection to see whether it's working or not. + tx, err := db.Begin() + if err == nil { + return db, tx.Rollback() + } + _ = db.Close() + return nil, err +} + +func checkSSLSetup(t *testing.T, conninfo string) { + db, err := openSSLConn(t, conninfo) + if err == nil { + db.Close() + t.Fatalf("expected error with conninfo=%q", conninfo) + } +} + +// Connect over SSL and run a simple query to test the basics +func TestSSLConnection(t *testing.T) { + maybeSkipSSLTests(t) + // Environment sanity check: should fail without SSL + checkSSLSetup(t, "sslmode=disable user=pqgossltest") + + db, err := openSSLConn(t, "sslmode=require user=pqgossltest") + if err != nil { + t.Fatal(err) + } + rows, err := db.Query("SELECT 1") + if err != nil { + t.Fatal(err) + } + rows.Close() +} + +// Test sslmode=verify-full +func TestSSLVerifyFull(t *testing.T) { + maybeSkipSSLTests(t) + // Environment sanity check: should fail without SSL + checkSSLSetup(t, "sslmode=disable user=pqgossltest") + + // Not OK according to the system CA + _, err := openSSLConn(t, "host=postgres sslmode=verify-full user=pqgossltest") + if err == nil { + t.Fatal("expected error") + } + _, ok := err.(x509.UnknownAuthorityError) + if !ok { + t.Fatalf("expected x509.UnknownAuthorityError, got %#+v", err) + } + + rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt") + rootCert := "sslrootcert=" + rootCertPath + " " + // No match on Common Name + _, err = openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=verify-full user=pqgossltest") + if err == nil { + t.Fatal("expected error") + } + _, ok = err.(x509.HostnameError) + if !ok { + t.Fatalf("expected x509.HostnameError, got %#+v", err) + } + // OK + _, err = openSSLConn(t, rootCert+"host=postgres sslmode=verify-full user=pqgossltest") + if err != nil { + t.Fatal(err) + } +} + +// Test sslmode=verify-ca +func TestSSLVerifyCA(t *testing.T) { + maybeSkipSSLTests(t) + // Environment sanity check: should fail without SSL + checkSSLSetup(t, "sslmode=disable user=pqgossltest") + + // Not OK according to the system CA + _, err := openSSLConn(t, "host=postgres sslmode=verify-ca user=pqgossltest") + if err == nil { + t.Fatal("expected error") + } + _, ok := err.(x509.UnknownAuthorityError) + if !ok { + t.Fatalf("expected x509.UnknownAuthorityError, got %#+v", err) + } + + rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt") + rootCert := "sslrootcert=" + rootCertPath + " " + // No match on Common Name, but that's OK + _, err = openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=verify-ca user=pqgossltest") + if err != nil { + t.Fatal(err) + } + // Everything OK + _, err = openSSLConn(t, rootCert+"host=postgres sslmode=verify-ca user=pqgossltest") + if err != nil { + t.Fatal(err) + } +} + +func getCertConninfo(t *testing.T, source string) string { + var sslkey string + var sslcert string + + certpath := os.Getenv("PQSSLCERTTEST_PATH") + + switch source { + case "missingkey": + sslkey = "/tmp/filedoesnotexist" + sslcert = filepath.Join(certpath, "postgresql.crt") + case "missingcert": + sslkey = filepath.Join(certpath, "postgresql.key") + sslcert = "/tmp/filedoesnotexist" + case "certtwice": + sslkey = filepath.Join(certpath, "postgresql.crt") + sslcert = filepath.Join(certpath, "postgresql.crt") + case "valid": + sslkey = filepath.Join(certpath, "postgresql.key") + sslcert = filepath.Join(certpath, "postgresql.crt") + default: + t.Fatalf("invalid source %q", source) + } + return fmt.Sprintf("sslmode=require user=pqgosslcert sslkey=%s sslcert=%s", sslkey, sslcert) +} + +// Authenticate over SSL using client certificates +func TestSSLClientCertificates(t *testing.T) { + maybeSkipSSLTests(t) + // Environment sanity check: should fail without SSL + checkSSLSetup(t, "sslmode=disable user=pqgossltest") + + // Should also fail without a valid certificate + db, err := openSSLConn(t, "sslmode=require user=pqgosslcert") + if err == nil { + db.Close() + t.Fatal("expected error") + } + pge, ok := err.(*Error) + if !ok { + t.Fatal("expected pq.Error") + } + if pge.Code.Name() != "invalid_authorization_specification" { + t.Fatalf("unexpected error code %q", pge.Code.Name()) + } + + // Should work + db, err = openSSLConn(t, getCertConninfo(t, "valid")) + if err != nil { + t.Fatal(err) + } + rows, err := db.Query("SELECT 1") + if err != nil { + t.Fatal(err) + } + rows.Close() +} + +// Test errors with ssl certificates +func TestSSLClientCertificatesMissingFiles(t *testing.T) { + maybeSkipSSLTests(t) + // Environment sanity check: should fail without SSL + checkSSLSetup(t, "sslmode=disable user=pqgossltest") + + // Key missing, should fail + _, err := openSSLConn(t, getCertConninfo(t, "missingkey")) + if err == nil { + t.Fatal("expected error") + } + // should be a PathError + _, ok := err.(*os.PathError) + if !ok { + t.Fatalf("expected PathError, got %#+v", err) + } + + // Cert missing, should fail + _, err = openSSLConn(t, getCertConninfo(t, "missingcert")) + if err == nil { + t.Fatal("expected error") + } + // should be a PathError + _, ok = err.(*os.PathError) + if !ok { + t.Fatalf("expected PathError, got %#+v", err) + } + + // Key has wrong permissions, should fail + _, err = openSSLConn(t, getCertConninfo(t, "certtwice")) + if err == nil { + t.Fatal("expected error") + } + if err != ErrSSLKeyHasWorldPermissions { + t.Fatalf("expected ErrSSLKeyHasWorldPermissions, got %#+v", err) + } +} diff --git a/services/templeton/vendor/src/github.com/lib/pq/url.go b/services/templeton/vendor/src/github.com/lib/pq/url.go new file mode 100644 index 000000000..f4d8a7c20 --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/url.go @@ -0,0 +1,76 @@ +package pq + +import ( + "fmt" + "net" + nurl "net/url" + "sort" + "strings" +) + +// ParseURL no longer needs to be used by clients of this library since supplying a URL as a +// connection string to sql.Open() is now supported: +// +// sql.Open("postgres", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full") +// +// It remains exported here for backwards-compatibility. +// +// ParseURL converts a url to a connection string for driver.Open. +// Example: +// +// "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full" +// +// converts to: +// +// "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full" +// +// A minimal example: +// +// "postgres://" +// +// This will be blank, causing driver.Open to use all of the defaults +func ParseURL(url string) (string, error) { + u, err := nurl.Parse(url) + if err != nil { + return "", err + } + + if u.Scheme != "postgres" && u.Scheme != "postgresql" { + return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme) + } + + var kvs []string + escaper := strings.NewReplacer(` `, `\ `, `'`, `\'`, `\`, `\\`) + accrue := func(k, v string) { + if v != "" { + kvs = append(kvs, k+"="+escaper.Replace(v)) + } + } + + if u.User != nil { + v := u.User.Username() + accrue("user", v) + + v, _ = u.User.Password() + accrue("password", v) + } + + if host, port, err := net.SplitHostPort(u.Host); err != nil { + accrue("host", u.Host) + } else { + accrue("host", host) + accrue("port", port) + } + + if u.Path != "" { + accrue("dbname", u.Path[1:]) + } + + q := u.Query() + for k := range q { + accrue(k, q.Get(k)) + } + + sort.Strings(kvs) // Makes testing easier (not a performance concern) + return strings.Join(kvs, " "), nil +} diff --git a/services/templeton/vendor/src/github.com/lib/pq/url_test.go b/services/templeton/vendor/src/github.com/lib/pq/url_test.go new file mode 100644 index 000000000..4ff0ce034 --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/url_test.go @@ -0,0 +1,66 @@ +package pq + +import ( + "testing" +) + +func TestSimpleParseURL(t *testing.T) { + expected := "host=hostname.remote" + str, err := ParseURL("postgres://hostname.remote") + if err != nil { + t.Fatal(err) + } + + if str != expected { + t.Fatalf("unexpected result from ParseURL:\n+ %v\n- %v", str, expected) + } +} + +func TestIPv6LoopbackParseURL(t *testing.T) { + expected := "host=::1 port=1234" + str, err := ParseURL("postgres://[::1]:1234") + if err != nil { + t.Fatal(err) + } + + if str != expected { + t.Fatalf("unexpected result from ParseURL:\n+ %v\n- %v", str, expected) + } +} + +func TestFullParseURL(t *testing.T) { + expected := `dbname=database host=hostname.remote password=top\ secret port=1234 user=username` + str, err := ParseURL("postgres://username:top%20secret@hostname.remote:1234/database") + if err != nil { + t.Fatal(err) + } + + if str != expected { + t.Fatalf("unexpected result from ParseURL:\n+ %s\n- %s", str, expected) + } +} + +func TestInvalidProtocolParseURL(t *testing.T) { + _, err := ParseURL("http://hostname.remote") + switch err { + case nil: + t.Fatal("Expected an error from parsing invalid protocol") + default: + msg := "invalid connection protocol: http" + if err.Error() != msg { + t.Fatalf("Unexpected error message:\n+ %s\n- %s", + err.Error(), msg) + } + } +} + +func TestMinimalURL(t *testing.T) { + cs, err := ParseURL("postgres://") + if err != nil { + t.Fatal(err) + } + + if cs != "" { + t.Fatalf("expected blank connection string, got: %q", cs) + } +} diff --git a/services/templeton/vendor/src/github.com/lib/pq/user_posix.go b/services/templeton/vendor/src/github.com/lib/pq/user_posix.go new file mode 100644 index 000000000..e937d7d08 --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/user_posix.go @@ -0,0 +1,24 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. + +// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris + +package pq + +import ( + "os" + "os/user" +) + +func userCurrent() (string, error) { + u, err := user.Current() + if err == nil { + return u.Username, nil + } + + name := os.Getenv("USER") + if name != "" { + return name, nil + } + + return "", ErrCouldNotDetectUsername +} diff --git a/services/templeton/vendor/src/github.com/lib/pq/user_windows.go b/services/templeton/vendor/src/github.com/lib/pq/user_windows.go new file mode 100644 index 000000000..2b691267b --- /dev/null +++ b/services/templeton/vendor/src/github.com/lib/pq/user_windows.go @@ -0,0 +1,27 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. +package pq + +import ( + "path/filepath" + "syscall" +) + +// Perform Windows user name lookup identically to libpq. +// +// The PostgreSQL code makes use of the legacy Win32 function +// GetUserName, and that function has not been imported into stock Go. +// GetUserNameEx is available though, the difference being that a +// wider range of names are available. To get the output to be the +// same as GetUserName, only the base (or last) component of the +// result is returned. +func userCurrent() (string, error) { + pw_name := make([]uint16, 128) + pwname_size := uint32(len(pw_name)) - 1 + err := syscall.GetUserNameEx(syscall.NameSamCompatible, &pw_name[0], &pwname_size) + if err != nil { + return "", ErrCouldNotDetectUsername + } + s := syscall.UTF16ToString(pw_name) + u := filepath.Base(s) + return u, nil +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/baserequest.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/baserequest.go new file mode 100644 index 000000000..f9a9dcbff --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/baserequest.go @@ -0,0 +1,141 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "time" +) + +func (c *Conn) DoCommand(method string, url string, args map[string]interface{}, data interface{}) ([]byte, error) { + var response map[string]interface{} + var body []byte + var httpStatusCode int + + query, err := Escape(args) + if err != nil { + return nil, err + } + req, err := c.NewRequest(method, url, query) + if err != nil { + return body, err + } + + if data != nil { + switch v := data.(type) { + case string: + req.SetBodyString(v) + case io.Reader: + req.SetBody(v) + case []byte: + req.SetBodyBytes(v) + default: + err = req.SetBodyJson(v) + if err != nil { + return body, err + } + } + } + + // uncomment this to print out the request that hits the wire + // (requires net/http/httputil) + //reqbuf, err := httputil.DumpRequest(req.Request, true) + //log.Println(fmt.Sprintf("\n========= req:\nURL: %s\n%s", req.URL, bytes.NewBuffer(reqbuf).String())) + + // Copy request body for tracer + if c.RequestTracer != nil { + rbody := "" + if req.Body != nil { + requestBody, err := ioutil.ReadAll(req.Body) + if err != nil { + return body, err + } + + req.SetBody(bytes.NewReader(requestBody)) + rbody = string(requestBody) + } + c.RequestTracer(req.Method, req.URL.String(), rbody) + } + + httpStatusCode, body, err = req.Do(&response) + if err != nil { + return body, err + } + if httpStatusCode > 304 { + + jsonErr := json.Unmarshal(body, &response) + if jsonErr == nil { + if res_err, ok := response["error"]; ok { + status, _ := response["status"] + return body, ESError{time.Now(), fmt.Sprintf("Error [%s] Status [%v]", res_err, status), httpStatusCode} + } + } + return body, jsonErr + } + return body, nil +} + +// ESError is an error implementation that includes a time, message, and code. +type ESError struct { + When time.Time + What string + Code int +} + +func (e ESError) Error() string { + return fmt.Sprintf("%v: %v [%v]", e.When, e.What, e.Code) +} + +// Exists allows the caller to check for the existence of a document using HEAD +// This appears to be broken in the current version of elasticsearch 0.19.10, currently +// returning nothing +func (c *Conn) Exists(index string, _type string, id string, args map[string]interface{}) (BaseResponse, error) { + var response map[string]interface{} + var body []byte + var url string + var retval BaseResponse + var httpStatusCode int + + query, err := Escape(args) + if err != nil { + return retval, err + } + + if len(_type) > 0 { + url = fmt.Sprintf("/%s/%s/%s", index, _type, id) + } else { + url = fmt.Sprintf("/%s/%s", index, id) + } + req, err := c.NewRequest("HEAD", url, query) + if err != nil { + // some sort of generic error handler + } + httpStatusCode, body, err = req.Do(&response) + if httpStatusCode > 304 { + if error, ok := response["error"]; ok { + status, _ := response["status"] + log.Printf("Error: %v (%v)\n", error, status) + } + } else { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + log.Println(jsonErr) + } + } + return retval, err +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/baseresponse.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/baseresponse.go new file mode 100644 index 000000000..41770dfa9 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/baseresponse.go @@ -0,0 +1,146 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type BaseResponse struct { + Ok bool `json:"ok"` + Index string `json:"_index,omitempty"` + Type string `json:"_type,omitempty"` + Id string `json:"_id,omitempty"` + Source *json.RawMessage `json:"_source,omitempty"` // depends on the schema you've defined + Version int `json:"_version,omitempty"` + Found bool `json:"found,omitempty"` + Exists bool `json:"exists,omitempty"` + Created bool `json:"created,omitempty"` + Matches []string `json:"matches,omitempty"` // percolate matches +} + +// StatusInt is required because /_optimize, at least, returns its status as +// strings instead of integers. +type StatusInt int + +func (self *StatusInt) UnmarshalJSON(b []byte) error { + s := "" + if json.Unmarshal(b, &s) == nil { + if i, err := strconv.Atoi(s); err == nil { + *self = StatusInt(i) + return nil + } + } + i := 0 + err := json.Unmarshal(b, &i) + if err == nil { + *self = StatusInt(i) + } + return err +} + +func (self *StatusInt) MarshalJSON() ([]byte, error) { + return json.Marshal(*self) +} + +// StatusBool is required because /_optimize, at least, returns its status as +// strings instead of booleans. +type StatusBool bool + +func (self *StatusBool) UnmarshalJSON(b []byte) error { + s := "" + if json.Unmarshal(b, &s) == nil { + switch s { + case "true": + *self = StatusBool(true) + return nil + case "false": + *self = StatusBool(false) + return nil + default: + } + } + b2 := false + err := json.Unmarshal(b, &b2) + if err == nil { + *self = StatusBool(b2) + } + return err +} + +func (self *StatusBool) MarshalJSON() ([]byte, error) { + return json.Marshal(*self) +} + +type Status struct { + Total StatusInt `json:"total"` + Successful StatusInt `json:"successful"` + Failed StatusInt `json:"failed"` + Failures []Failure `json:"failures,omitempty"` +} + +type Failure struct { + Index string `json:"index"` + Shard StatusInt `json:"shard"` + Reason string `json:"reason"` +} + +func (f Failure) String() string { + return fmt.Sprintf("Failed on shard %d on index %s:\n%s", f.Shard, f.Index, f.Reason) +} + +// failures is a convenience type to allow []Failure formated easily in the +// library +type failures []Failure + +func (f failures) String() string { + message := make([]string, len(f)) + for i, failure := range f { + message[i] = failure.String() + } + return strings.Join(message, "\n") +} + +type ExtendedStatus struct { + Ok StatusBool `json:"ok"` + ShardsStatus Status `json:"_shards"` +} + +type MatchRes struct { + Index string `json:"_index"` + Id string `json:"_id"` +} + +type Match struct { + OK bool `json:"ok"` + Matches []MatchRes `json:"matches"` + Explanation *Explanation `json:"explanation,omitempty"` +} + +type Explanation struct { + Value float32 `json:"value"` + Description string `json:"description"` + Details []*Explanation `json:"details,omitempty"` +} + +func ScrollDuration(duration string) string { + scrollString := "" + if duration != "" { + scrollString = "&scroll=" + duration + } + return scrollString +} + +// http://www.elasticsearch.org/guide/reference/api/search/search-type/ diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catindexinfo.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catindexinfo.go new file mode 100644 index 000000000..44ca2d80c --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catindexinfo.go @@ -0,0 +1,80 @@ +package elastigo + +import ( + "errors" + "strconv" + "strings" +) + +var ErrInvalidIndexLine = errors.New("Cannot parse indexline") + +//Create an IndexInfo from the string _cat/indices would produce +//EX: health status index pri rep docs.count docs.deleted store.size pri.store.size +//green open logs-2015-06-19 2 0 135389346 0 53048922233 53048922233 +func NewCatIndexInfo(indexLine string) (catIndex *CatIndexInfo, err error) { + split := strings.Fields(indexLine) + if len(split) < 5 { + return nil, ErrInvalidIndexLine + } + catIndex = &CatIndexInfo{} + catIndex.Store = CatIndexStore{} + catIndex.Docs = CatIndexDocs{} + catIndex.Health = split[0] + catIndex.Status = split[1] + catIndex.Name = split[2] + catIndex.Shards, err = strconv.Atoi(split[3]) + if err != nil { + catIndex.Shards = 0 + } + catIndex.Replicas, err = strconv.Atoi(split[4]) + if err != nil { + catIndex.Replicas = 0 + } + if len(split) == 5 { + return catIndex, nil + } + catIndex.Docs.Count, err = strconv.ParseInt(split[5], 10, 64) + if err != nil { + catIndex.Docs.Count = 0 + } + if len(split) == 6 { + return catIndex, nil + } + catIndex.Docs.Deleted, err = strconv.ParseInt(split[6], 10, 64) + if err != nil { + catIndex.Docs.Deleted = 0 + } + if len(split) == 7 { + return catIndex, nil + } + catIndex.Store.Size, err = strconv.ParseInt(split[7], 10, 64) + if err != nil { + catIndex.Store.Size = 0 + } + if len(split) == 8 { + return catIndex, nil + } + catIndex.Store.PriSize, err = strconv.ParseInt(split[8], 10, 64) + if err != nil { + catIndex.Store.PriSize = 0 + } + return catIndex, nil +} + +// Pull all the index info from the connection +func (c *Conn) GetCatIndexInfo(pattern string) (catIndices []CatIndexInfo) { + catIndices = make([]CatIndexInfo, 0) + //force it to only show the fileds we know about + args := map[string]interface{}{"bytes": "b", "h": "health,status,index,pri,rep,docs.count,docs.deleted,store.size,pri.store.size"} + indices, err := c.DoCommand("GET", "/_cat/indices/"+pattern, args, nil) + if err == nil { + indexLines := strings.Split(string(indices[:]), "\n") + for _, index := range indexLines { + ci, _ := NewCatIndexInfo(index) + if nil != ci { + catIndices = append(catIndices, *ci) + } + } + } + return catIndices +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catindexinfo_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catindexinfo_test.go new file mode 100644 index 000000000..d09f6cd92 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catindexinfo_test.go @@ -0,0 +1,117 @@ +package elastigo + +import ( + . "github.com/smartystreets/goconvey/convey" + "testing" +) + +func TestCatIndexInfo(t *testing.T) { + Convey("Create index line from a broken index listing", t, func() { + _, err := NewCatIndexInfo("red ") + So(err, ShouldNotBeNil) + }) + Convey("catIndex Create index line from a bad shards index listing", t, func() { + i, err := NewCatIndexInfo("green open logs-2015-06-19 2 1 135389346 20 53048922233 53048922233") + So(err, ShouldBeNil) + So(i.Health, ShouldEqual, "green") + So(i.Status, ShouldEqual, "open") + So(i.Name, ShouldEqual, "logs-2015-06-19") + So(i.Shards, ShouldEqual, 2) + So(i.Replicas, ShouldEqual, 1) + So(i.Docs.Count, ShouldEqual, 135389346) + So(i.Docs.Deleted, ShouldEqual, 20) + So(i.Store.Size, ShouldEqual, 53048922233) + So(i.Store.PriSize, ShouldEqual, 53048922233) + }) + Convey("catIndex Create index line from a bad replicas index listing", t, func() { + i, err := NewCatIndexInfo("red open foo-2000-01-01-bar 2 0 1234 3 11000 13000") + So(err, ShouldBeNil) + So(i.Health, ShouldEqual, "red") + So(i.Status, ShouldEqual, "open") + So(i.Name, ShouldEqual, "foo-2000-01-01-bar") + So(i.Shards, ShouldEqual, 2) + So(i.Replicas, ShouldEqual, 0) + So(i.Docs.Count, ShouldEqual, 1234) + So(i.Docs.Deleted, ShouldEqual, 3) + So(i.Store.Size, ShouldEqual, 11000) + So(i.Store.PriSize, ShouldEqual, 13000) + }) + Convey("catIndex Create index line from a complete index listing", t, func() { + i, err := NewCatIndexInfo("red closed foo-2000-01-01-bar 2 1 1234 3 11000 13000") + So(err, ShouldBeNil) + So(i.Status, ShouldEqual, "closed") + So(i.Health, ShouldEqual, "red") + So(i.Name, ShouldEqual, "foo-2000-01-01-bar") + So(i.Shards, ShouldEqual, 2) + So(i.Replicas, ShouldEqual, 1) + So(i.Docs.Count, ShouldEqual, 1234) + So(i.Docs.Deleted, ShouldEqual, 3) + So(i.Store.Size, ShouldEqual, 11000) + So(i.Store.PriSize, ShouldEqual, 13000) + }) + Convey("catIndex Create index line from a bad docs index listing", t, func() { + i, err := NewCatIndexInfo("red open foo-2000-01-01-bar 2 1 a 3 11000 13000") + So(err, ShouldBeNil) + So(i.Health, ShouldEqual, "red") + So(i.Status, ShouldEqual, "open") + So(i.Name, ShouldEqual, "foo-2000-01-01-bar") + So(i.Shards, ShouldEqual, 2) + So(i.Replicas, ShouldEqual, 1) + So(i.Docs.Count, ShouldEqual, 0) + So(i.Docs.Deleted, ShouldEqual, 3) + So(i.Store.Size, ShouldEqual, 11000) + So(i.Store.PriSize, ShouldEqual, 13000) + }) + Convey("catIndex Create index line from a bad deletes index listing", t, func() { + i, err := NewCatIndexInfo("red open foo-2000-01-01-bar 2 1 1234 a 11000 13000") + So(err, ShouldBeNil) + So(i.Health, ShouldEqual, "red") + So(i.Status, ShouldEqual, "open") + So(i.Name, ShouldEqual, "foo-2000-01-01-bar") + So(i.Shards, ShouldEqual, 2) + So(i.Replicas, ShouldEqual, 1) + So(i.Docs.Count, ShouldEqual, 1234) + So(i.Docs.Deleted, ShouldEqual, 0) + So(i.Store.Size, ShouldEqual, 11000) + So(i.Store.PriSize, ShouldEqual, 13000) + }) + Convey("catIndex Create index line from a kinda short index listing", t, func() { + i, err := NewCatIndexInfo("red open foo-2000-01-01-bar 2 1 1234") + So(err, ShouldBeNil) + So(i.Health, ShouldEqual, "red") + So(i.Status, ShouldEqual, "open") + So(i.Name, ShouldEqual, "foo-2000-01-01-bar") + So(i.Shards, ShouldEqual, 2) + So(i.Replicas, ShouldEqual, 1) + So(i.Docs.Count, ShouldEqual, 1234) + So(i.Docs.Deleted, ShouldEqual, 0) + So(i.Store.Size, ShouldEqual, 0) + So(i.Store.PriSize, ShouldEqual, 0) + }) + Convey("catIndex Create index line from a kinda sorta short index listing", t, func() { + i, err := NewCatIndexInfo("red open foo-2000-01-01-bar 2 1 1234 3") + So(err, ShouldBeNil) + So(i.Health, ShouldEqual, "red") + So(i.Status, ShouldEqual, "open") + So(i.Name, ShouldEqual, "foo-2000-01-01-bar") + So(i.Shards, ShouldEqual, 2) + So(i.Replicas, ShouldEqual, 1) + So(i.Docs.Count, ShouldEqual, 1234) + So(i.Docs.Deleted, ShouldEqual, 3) + So(i.Store.Size, ShouldEqual, 0) + So(i.Store.PriSize, ShouldEqual, 0) + }) + Convey("catIndex Create index line from a short index listing", t, func() { + i, err := NewCatIndexInfo("red open foo-2000-01-01-bar 2 1") + So(err, ShouldBeNil) + So(i.Health, ShouldEqual, "red") + So(i.Status, ShouldEqual, "open") + So(i.Name, ShouldEqual, "foo-2000-01-01-bar") + So(i.Shards, ShouldEqual, 2) + So(i.Replicas, ShouldEqual, 1) + So(i.Docs.Count, ShouldEqual, 0) + So(i.Docs.Deleted, ShouldEqual, 0) + So(i.Store.Size, ShouldEqual, 0) + So(i.Store.PriSize, ShouldEqual, 0) + }) +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catnodeinfo.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catnodeinfo.go new file mode 100644 index 000000000..cee737748 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catnodeinfo.go @@ -0,0 +1,249 @@ +package elastigo + +import ( + "fmt" + "strconv" + "strings" +) + + + +// newCatNodeInfo returns an instance of CatNodeInfo populated with the +// the information in the cat output indexLine which contains the +// specified fields. An err is returned if a field is not known. +func newCatNodeInfo(fields []string, indexLine string) (catNode *CatNodeInfo, err error) { + + split := strings.Fields(indexLine) + catNode = &CatNodeInfo{} + + // Check the fields length compared to the number of stats + lf, ls := len(fields), len(split) + if lf > ls { + return nil, fmt.Errorf("Number of fields (%d) greater than number of stats (%d)", lf, ls) + } + + // Populate the apropriate field in CatNodeInfo + for i, field := range fields { + + switch field { + case "id", "nodeId": + catNode.Id = split[i] + case "pid", "p": + catNode.PID = split[i] + case "host", "h": + catNode.Host = split[i] + case "ip", "i": + catNode.IP = split[i] + case "port", "po": + catNode.Port = split[i] + case "version", "v": + catNode.Version = split[i] + case "build", "b": + catNode.Build = split[i] + case "jdk", "j": + catNode.JDK = split[i] + case "disk.avail", "d", "disk", "diskAvail": + catNode.DiskAvail = split[i] + case "heap.current", "hc", "heapCurrent": + catNode.HeapCur = split[i] + case "heap.percent", "hp", "heapPercent": + catNode.HeapPerc = split[i] + case "heap.max", "hm", "heapMax": + catNode.HeapMax = split[i] + case "ram.current", "rc", "ramCurrent": + catNode.RamCur = split[i] + case "ram.percent", "rp", "ramPercent": + val, err := strconv.Atoi(split[i]) + if err != nil { + return nil, err + } + catNode.RamPerc = int16(val) + case "ram.max", "rm", "ramMax": + catNode.RamMax = split[i] + case "file_desc.current", "fdc", "fileDescriptorCurrent": + catNode.FileDescCur = split[i] + case "file_desc.percent", "fdp", "fileDescriptorPercent": + catNode.FileDescPerc = split[i] + case "file_desc.max", "fdm", "fileDescriptorMax": + catNode.FileDescMax = split[i] + case "load", "l": + catNode.Load = split[i] + case "uptime", "u": + catNode.UpTime = split[i] + case "node.role", "r", "role", "dc", "nodeRole": + catNode.NodeRole = split[i] + case "master", "m": + catNode.Master = split[i] + case "name", "n": + catNode.Name = strings.Join(split[i:], " ") + case "completion.size", "cs", "completionSize": + catNode.CmpltSize = split[i] + case "fielddata.memory_size", "fm", "fielddataMemory": + val, err := strconv.Atoi(split[i]) + if err != nil { + return nil, err + } + catNode.FieldMem = val + case "fielddata.evictions", "fe", "fieldataEvictions": + val, err := strconv.Atoi(split[i]) + if err != nil { + return nil, err + } + catNode.FieldEvict = val + case "filter_cache.memory_size", "fcm", "filterCacheMemory": + val, err := strconv.Atoi(split[i]) + if err != nil { + return nil, err + } + catNode.FiltMem = val + case "filter_cache.evictions", "fce", "filterCacheEvictions": + val, err := strconv.Atoi(split[i]) + if err != nil { + return nil, err + } + catNode.FiltEvict = val + case "flush.total", "ft", "flushTotal": + val, err := strconv.Atoi(split[i]) + if err != nil { + return nil, err + } + catNode.FlushTotal = val + case "flush.total_time", "ftt", "flushTotalTime": + catNode.FlushTotalTime = split[i] + case "get.current", "gc", "getCurrent": + catNode.GetCur = split[i] + case "get.time", "gti", "getTime": + catNode.GetTime = split[i] + case "get.total", "gto", "getTotal": + catNode.GetTotal = split[i] + case "get.exists_time", "geti", "getExistsTime": + catNode.GetExistsTime = split[i] + case "get.exists_total", "geto", "getExistsTotal": + catNode.GetExistsTotal = split[i] + case "get.missing_time", "gmti", "getMissingTime": + catNode.GetMissingTime = split[i] + case "get.missing_total", "gmto", "getMissingTotal": + catNode.GetMissingTotal = split[i] + case "id_cache.memory_size", "im", "idCacheMemory": + val, err := strconv.Atoi(split[i]) + if err != nil { + return nil, err + } + catNode.IDCacheMemory = val + case "indexing.delete_current", "idc", "indexingDeleteCurrent": + catNode.IdxDelCur = split[i] + case "indexing.delete_time", "idti", "indexingDeleteime": + catNode.IdxDelTime = split[i] + case "indexing.delete_total", "idto", "indexingDeleteTotal": + catNode.IdxDelTotal = split[i] + case "indexing.index_current", "iic", "indexingIndexCurrent": + catNode.IdxIdxCur = split[i] + case "indexing.index_time", "iiti", "indexingIndexTime": + catNode.IdxIdxTime = split[i] + case "indexing.index_total", "iito", "indexingIndexTotal": + catNode.IdxIdxTotal = split[i] + case "merges.current", "mc", "mergesCurrent": + catNode.MergCur = split[i] + case "merges.current_docs", "mcd", "mergesCurrentDocs": + catNode.MergCurDocs = split[i] + case "merges.current_size", "mcs", "mergesCurrentSize": + catNode.MergCurSize = split[i] + case "merges.total", "mt", "mergesTotal": + catNode.MergTotal = split[i] + case "merges.total_docs", "mtd", "mergesTotalDocs": + catNode.MergTotalDocs = split[i] + case "merges.total_size", "mts", "mergesTotalSize": + catNode.MergTotalSize = split[i] + case "merges.total_time", "mtt", "mergesTotalTime": + catNode.MergTotalTime = split[i] + case "percolate.current", "pc", "percolateCurrent": + catNode.PercCur = split[i] + case "percolate.memory_size", "pm", "percolateMemory": + catNode.PercMem = split[i] + case "percolate.queries", "pq", "percolateQueries": + catNode.PercQueries = split[i] + case "percolate.time", "pti", "percolateTime": + catNode.PercTime = split[i] + case "percolate.total", "pto", "percolateTotal": + catNode.PercTotal = split[i] + case "refesh.total", "rto", "refreshTotal": + catNode.RefreshTotal = split[i] + case "refresh.time", "rti", "refreshTime": + catNode.RefreshTime = split[i] + case "search.fetch_current", "sfc", "searchFetchCurrent": + catNode.SearchFetchCur = split[i] + case "search.fetch_time", "sfti", "searchFetchTime": + catNode.SearchFetchTime = split[i] + case "search.fetch_total", "sfto", "searchFetchTotal": + catNode.SearchFetchTotal = split[i] + case "search.open_contexts", "so", "searchOpenContexts": + catNode.SearchOpenContexts = split[i] + case "search.query_current", "sqc", "searchQueryCurrent": + catNode.SearchQueryCur = split[i] + case "search.query_time", "sqti", "searchQueryTime": + catNode.SearchQueryTime = split[i] + case "search.query_total", "sqto", "searchQueryTotal": + catNode.SearchQueryTotal = split[i] + case "segments.count", "sc", "segmentsCount": + catNode.SegCount = split[i] + case "segments.memory", "sm", "segmentsMemory": + catNode.SegMem = split[i] + case "segments.index_writer_memory", "siwm", "segmentsIndexWriterMemory": + catNode.SegIdxWriterMem = split[i] + case "segments.index_writer_max_memory", "siwmx", "segmentsIndexWriterMaxMemory": + catNode.SegIdxWriterMax = split[i] + case "segments.version_map_memory", "svmm", "segmentsVersionMapMemory": + catNode.SegVerMapMem = split[i] + default: + return nil, fmt.Errorf("Invalid cat nodes field: %s", field) + } + } + + return catNode, nil +} + +// GetCatNodeInfo issues an elasticsearch cat nodes request with the specified +// fields and returns a list of CatNodeInfos, one for each node, whose requested +// members are populated with statistics. If fields is nil or empty, the default +// cat output is used. +// NOTE: if you include the name field, make sure it is the last field in the +// list, because name values can contain spaces which screw up the parsing +func (c *Conn) GetCatNodeInfo(fields []string) (catNodes []CatNodeInfo, err error) { + + catNodes = make([]CatNodeInfo, 0) + + // If no fields have been specified, use the "default" arrangement + if len(fields) < 1 { + fields = []string{"host", "ip", "heap.percent", "ram.percent", "load", + "node.role", "master", "name"} + } + + // Issue a request for stats on the requested fields + args := map[string]interface{}{ + "bytes": "b", + "h": strings.Join(fields, ","), + } + indices, err := c.DoCommand("GET", "/_cat/nodes/", args, nil) + if err != nil { + return catNodes, err + } + + // Create a CatIndexInfo for each line in the response + indexLines := strings.Split(string(indices[:]), "\n") + for _, index := range indexLines { + + // Ignore empty output lines + if len(index) < 1 { + continue + } + + // Create a CatNodeInfo and append it to the result + info, err := newCatNodeInfo(fields, index) + if info != nil { + catNodes = append(catNodes, *info) + } else if err != nil { + return catNodes, err + } + } + return catNodes, nil +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catnodeinfo_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catnodeinfo_test.go new file mode 100644 index 000000000..441de2442 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catnodeinfo_test.go @@ -0,0 +1,58 @@ +package elastigo + +import ( + . "github.com/smartystreets/goconvey/convey" + "testing" +) + +func TestCatNode(t *testing.T) { + + c := NewTestConn() + + Convey("Basic cat nodes", t, func() { + + fields := []string{"fm", "fe", "fcm", "fce", "ft", "ftt", "im", "rp", "n"} + catNodes, err := c.GetCatNodeInfo(fields) + + So(err, ShouldBeNil) + So(catNodes, ShouldNotBeNil) + So(len(catNodes), ShouldBeGreaterThan, 0) + + for _, catNode := range catNodes { + So(catNode.FieldMem, ShouldNotBeEmpty) + So(catNode.FiltMem, ShouldNotBeEmpty) + So(catNode.IDCacheMemory, ShouldNotBeEmpty) + So(catNode.RamPerc, ShouldNotBeEmpty) + So(catNode.Name, ShouldNotBeEmpty) + } + }) + + Convey("Cat nodes with default arguments", t, func() { + + fields := []string{} + catNodes, err := c.GetCatNodeInfo(fields) + + So(err, ShouldBeNil) + So(catNodes, ShouldNotBeNil) + So(len(catNodes), ShouldBeGreaterThan, 0) + + for _, catNode := range catNodes { + So(catNode.Host, ShouldNotBeEmpty) + So(catNode.IP, ShouldNotBeEmpty) + So(catNode.NodeRole, ShouldNotBeEmpty) + So(catNode.Name, ShouldNotBeEmpty) + } + }) + + Convey("Invalid field error behavior", t, func() { + + fields := []string{"fm", "bogus"} + catNodes, err := c.GetCatNodeInfo(fields) + + So(err, ShouldNotBeNil) + + for _, catNode := range catNodes { + So(catNode.FieldMem, ShouldNotBeEmpty) + } + }) +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catresponses.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catresponses.go new file mode 100644 index 000000000..17129549f --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catresponses.go @@ -0,0 +1,105 @@ +package elastigo + +type CatIndexInfo struct { + Health string + Status string + Name string + Shards int + Replicas int + Docs CatIndexDocs + Store CatIndexStore +} + +type CatIndexDocs struct { + Count int64 + Deleted int64 +} + +type CatIndexStore struct { + Size int64 + PriSize int64 +} + +type CatShardInfo struct { + IndexName string + Shard int + Primary string + State string + Docs int64 + Store int64 + NodeIP string + NodeName string +} + +type CatNodeInfo struct { + Id string + PID string + Host string + IP string + Port string + Version string + Build string + JDK string + DiskAvail string + HeapCur string + HeapPerc string + HeapMax string + RamCur string + RamPerc int16 + RamMax string + FileDescCur string + FileDescPerc string + FileDescMax string + Load string + UpTime string + NodeRole string + Master string + Name string + CmpltSize string + FieldMem int + FieldEvict int + FiltMem int + FiltEvict int + FlushTotal int + FlushTotalTime string + GetCur string + GetTime string + GetTotal string + GetExistsTime string + GetExistsTotal string + GetMissingTime string + GetMissingTotal string + IDCacheMemory int + IdxDelCur string + IdxDelTime string + IdxDelTotal string + IdxIdxCur string + IdxIdxTime string + IdxIdxTotal string + MergCur string + MergCurDocs string + MergCurSize string + MergTotal string + MergTotalDocs string + MergTotalSize string + MergTotalTime string + PercCur string + PercMem string + PercQueries string + PercTime string + PercTotal string + RefreshTotal string + RefreshTime string + SearchFetchCur string + SearchFetchTime string + SearchFetchTotal string + SearchOpenContexts string + SearchQueryCur string + SearchQueryTime string + SearchQueryTotal string + SegCount string + SegMem string + SegIdxWriterMem string + SegIdxWriterMax string + SegVerMapMem string +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catshardinfo.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catshardinfo.go new file mode 100644 index 000000000..c93366b94 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catshardinfo.go @@ -0,0 +1,106 @@ +package elastigo + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" +) + +type CatShards []CatShardInfo + +// Stringify the shards +func (s *CatShards) String() string { + var buffer bytes.Buffer + + if s != nil { + for _, cs := range *s { + buffer.WriteString(fmt.Sprintf("%v\n", cs)) + } + } + return buffer.String() +} + +var ErrInvalidShardLine = errors.New("Cannot parse shardline") + +// Create a CatShard from a line of the raw output of a _cat/shards +func NewCatShardInfo(rawCat string) (catshard *CatShardInfo, err error) { + + split := strings.Fields(rawCat) + if len(split) < 4 { + return nil, ErrInvalidShardLine + } + catshard = &CatShardInfo{} + catshard.IndexName = split[0] + catshard.Shard, err = strconv.Atoi(split[1]) + if err != nil { + catshard.Shard = -1 + } + catshard.Primary = split[2] + catshard.State = split[3] + if len(split) == 4 { + return catshard, nil + } + + catshard.Docs, err = strconv.ParseInt(split[4], 10, 64) + if err != nil { + catshard.Docs = 0 + } + if len(split) == 5 { + return catshard, nil + } + catshard.Store, err = strconv.ParseInt(split[5], 10, 64) + if err != nil { + catshard.Store = 0 + } + if len(split) == 6 { + return catshard, nil + } + catshard.NodeIP = split[6] + if len(split) == 7 { + return catshard, nil + } + catshard.NodeName = split[7] + if len(split) > 8 { + loop: + for i, moreName := range split { + if i > 7 { + if moreName == "->" { + break loop + } + catshard.NodeName += " " + catshard.NodeName += moreName + } + } + } + + return catshard, nil +} + +// Print shard info +func (s *CatShardInfo) String() string { + if s == nil { + return ":::::::" + } + return fmt.Sprintf("%v:%v:%v:%v:%v:%v:%v:%v", s.IndexName, s.Shard, s.Primary, + s.State, s.Docs, s.Store, s.NodeIP, s.NodeName) +} + +// Get all the shards, even the bad ones +func (c *Conn) GetCatShards() (shards CatShards) { + shards = make(CatShards, 0) + //force it to only respond with the columns we know about and in a forced order + args := map[string]interface{}{"bytes": "b", "h": "index,shard,prirep,state,docs,store,ip,node"} + s, err := c.DoCommand("GET", "/_cat/shards", args, nil) + if err == nil { + catShardLines := strings.Split(string(s[:]), "\n") + for _, shardLine := range catShardLines { + shard, _ := NewCatShardInfo(shardLine) + if nil != shard { + shards = append(shards, *shard) + } + } + } + return shards +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catshardinfo_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catshardinfo_test.go new file mode 100644 index 000000000..dd6aaaa4f --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catshardinfo_test.go @@ -0,0 +1,85 @@ +package elastigo + +import ( + . "github.com/smartystreets/goconvey/convey" + "testing" +) + +func TestCatShardInfo(t *testing.T) { + Convey("Create cat shard from started shard", t, func() { + c, err := NewCatShardInfo("foo-2000-01-01-bar 0 p STARTED 1234 121 127.0.0.1 Ultra Man") + So(err, ShouldBeNil) + So(c, ShouldNotBeNil) + So(c.IndexName, ShouldEqual, "foo-2000-01-01-bar") + So(c.Primary, ShouldEqual, "p") + So(c.State, ShouldEqual, "STARTED") + So(c.Docs, ShouldEqual, 1234) + So(c.Store, ShouldEqual, 121) + So(c.NodeIP, ShouldEqual, "127.0.0.1") + So(c.NodeName, ShouldEqual, "Ultra Man") + + }) + Convey("Create cat shard from realocating shard", t, func() { + c, err := NewCatShardInfo("foo-2000-01-01-bar 0 p RELOCATING 1234 121 127.0.0.1 Ultra Man -> 10.0.0.1 Super Man") + So(err, ShouldBeNil) + So(c, ShouldNotBeNil) + So(c.IndexName, ShouldEqual, "foo-2000-01-01-bar") + So(c.Primary, ShouldEqual, "p") + So(c.State, ShouldEqual, "RELOCATING") + So(c.Docs, ShouldEqual, 1234) + So(c.Store, ShouldEqual, 121) + So(c.NodeIP, ShouldEqual, "127.0.0.1") + So(c.NodeName, ShouldEqual, "Ultra Man") + }) + Convey("Create cat shard from unallocated shard", t, func() { + c, err := NewCatShardInfo("foo-2000-01-01-bar 0 p UNASSIGNED") + So(err, ShouldBeNil) + So(c, ShouldNotBeNil) + So(c.IndexName, ShouldEqual, "foo-2000-01-01-bar") + So(c.Primary, ShouldEqual, "p") + So(c.State, ShouldEqual, "UNASSIGNED") + So(c.Docs, ShouldEqual, 0) + So(c.Store, ShouldEqual, 0) + So(c.NodeIP, ShouldEqual, "") + So(c.NodeName, ShouldEqual, "") + }) + Convey("Create cat shard from invalid shard", t, func() { + c, err := NewCatShardInfo("foo-2000-01-01-bar 0 p") + So(err, ShouldEqual, ErrInvalidShardLine) + So(c, ShouldBeNil) + }) + Convey("Create cat shard from garbled shard", t, func() { + c, err := NewCatShardInfo("foo-2000-01-01-bar a p STARTED abc 121 127.0.0.1 Ultra Man") + So(err, ShouldBeNil) + So(c, ShouldNotBeNil) + So(c.Shard, ShouldEqual, -1) + So(c.IndexName, ShouldEqual, "foo-2000-01-01-bar") + So(c.Primary, ShouldEqual, "p") + So(c.State, ShouldEqual, "STARTED") + So(c.Docs, ShouldEqual, 0) + So(c.Store, ShouldEqual, 121) + So(c.NodeIP, ShouldEqual, "127.0.0.1") + So(c.NodeName, ShouldEqual, "Ultra Man") + }) + Convey("Print cat shard from started shard", t, func() { + c, _ := NewCatShardInfo("foo-2000-01-01-bar 0 p STARTED 1234 121 127.0.0.1 Ultra Man") + s := c.String() + So(s, ShouldContainSubstring, "foo-2000-01-01-bar:") + So(s, ShouldContainSubstring, ":Ultra Man") + c = nil + s = c.String() + So(s, ShouldEqual, ":::::::") + }) + Convey("Print cat shard from short shard", t, func() { + c, _ := NewCatShardInfo("foo-2000-01-01-bar 0 p STARTED 1234") + s := c.String() + So(s, ShouldContainSubstring, "foo-2000-01-01-bar:0:p:STARTED:1234") + c, _ = NewCatShardInfo("foo-2000-01-01-bar 0 p STARTED 1234 121") + s = c.String() + So(s, ShouldContainSubstring, "oo-2000-01-01-bar:0:p:STARTED:1234:121") + c, _ = NewCatShardInfo("foo-2000-01-01-bar 0 p STARTED 1234 121 127.0.0.1") + s = c.String() + So(s, ShouldContainSubstring, "oo-2000-01-01-bar:0:p:STARTED:1234:121:127.0.0.1") + }) + +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterhealth.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterhealth.go new file mode 100644 index 000000000..ee06210ba --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterhealth.go @@ -0,0 +1,128 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package elastigo + +import ( + "encoding/json" + "fmt" + "strings" +) + +// The cluster health API allows to get a very simple status on the health of the cluster. +// see http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cluster-health.html +// TODO: implement wait_for_status, timeout, wait_for_relocating_shards, wait_for_nodes +// TODO: implement level (Can be one of cluster, indices or shards. Controls the details level of the health +// information returned. Defaults to cluster.) +func (c *Conn) Health(indices ...string) (ClusterHealthResponse, error) { + var url string + var retval ClusterHealthResponse + if len(indices) > 0 { + url = fmt.Sprintf("/_cluster/health/%s", strings.Join(indices, ",")) + } else { + url = "/_cluster/health" + } + body, err := c.DoCommand("GET", url, nil, nil) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} + +func (c *Conn) WaitForStatus(status string, timeout int, indices ...string) (ClusterHealthResponse, error) { + var url string + var retval ClusterHealthResponse + if len(indices) > 0 { + url = fmt.Sprintf("/_cluster/health/%s", strings.Join(indices, ",")) + } else { + url = "/_cluster/health" + } + + body, err := c.DoCommand("GET", url, map[string]interface{}{ + "wait_for_status": status, + "timout": timeout, + }, nil) + + if err != nil { + return retval, err + } + + if err == nil { + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} + +type ClusterStateFilter struct { + FilterNodes bool + FilterRoutingTable bool + FilterMetadata bool + FilterBlocks bool + FilterIndices []string +} + +func (f ClusterStateFilter) Parameterize() []string { + var parts []string + + if f.FilterNodes { + parts = append(parts, "filter_nodes=true") + } + + if f.FilterRoutingTable { + parts = append(parts, "filter_routing_table=true") + } + + if f.FilterMetadata { + parts = append(parts, "filter_metadata=true") + } + + if f.FilterBlocks { + parts = append(parts, "filter_blocks=true") + } + + if f.FilterIndices != nil && len(f.FilterIndices) > 0 { + parts = append(parts, strings.Join([]string{"filter_indices=", strings.Join(f.FilterIndices, ",")}, "")) + } + + return parts +} + +func (c *Conn) ClusterState(filter ClusterStateFilter) (ClusterStateResponse, error) { + var parameters []string + var url string + var retval ClusterStateResponse + + parameters = filter.Parameterize() + + url = fmt.Sprintf("/_cluster/state?%s", strings.Join(parameters, "&")) + + body, err := c.DoCommand("GET", url, nil, nil) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterhealthresponses.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterhealthresponses.go new file mode 100644 index 000000000..80a53f5e0 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterhealthresponses.go @@ -0,0 +1,45 @@ +package elastigo + +type ClusterHealthResponse struct { + ClusterName string `json:"cluster_name"` + Status string `json:"status"` + TimedOut bool `json:"timed_out"` + NumberOfNodes int `json:"number_of_nodes"` + NumberOfDataNodes int `json:"number_of_data_nodes"` + ActivePrimaryShards int `json:"active_primary_shards"` + ActiveShards int `json:"active_shards"` + RelocatingShards int `json:"relocating_shards"` + InitializingShards int `json:"initializing_shards"` + UnassignedShards int `json:"unassigned_shards"` +} + +type ClusterStateResponse struct { + ClusterName string `json:"cluster_name"` + MasterNode string `json:"master_node"` + Nodes map[string]ClusterStateNodeResponse `json:"nodes"` + Metadata ClusterStateMetadataResponse `json:"metadata"` + // TODO: Routing Table + // TODO: Routing Nodes + // TODO: Allocations + +} + +type ClusterStateNodeResponse struct { + Name string `json:"name"` + TransportAddress string `json:"transport_address"` + // TODO: Attributes +} + +type ClusterStateMetadataResponse struct { + // TODO: templates + Indices map[string]ClusterStateIndiceResponse `json:"indices"` +} + +type ClusterStateIndiceResponse struct { + State string `json:"state"` +} + +type ClusterStateRoutingTableResponse struct { + // TODO: unassigned + // +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodeshotthreads.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodeshotthreads.go new file mode 100644 index 000000000..e8227651a --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodeshotthreads.go @@ -0,0 +1,12 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesinfo.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesinfo.go new file mode 100644 index 000000000..767c6d35e --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesinfo.go @@ -0,0 +1,184 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package elastigo + +import ( + "encoding/json" + "fmt" + "strings" +) + +// The cluster nodes info API allows to retrieve one or more (or all) of the cluster nodes information. +// information can be one of jvm, process +func (c *Conn) AllNodesInfo() (NodeInfo, error) { + return c.NodesInfo([]string{"_all"}, "_all") +} + +func (c *Conn) NodesInfo(information []string, nodes ...string) (NodeInfo, error) { + var url string + var retval NodeInfo + url = fmt.Sprintf("/_nodes/%s/%s", strings.Join(nodes, ","), strings.Join(information, ",")) + body, err := c.DoCommand("GET", url, nil, nil) + if err != nil { + return retval, err + } + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + return retval, err +} + +type NodeInfo struct { + ClusterName string `json:"cluster_name"` + Nodes map[string]Node `json:"nodes"` // node name is random string +} + +type Node struct { + Name string `json:"name,omitempty"` + TransportAddress string `json:"transport_address,omitempty"` + Host string `json:"host,omitempty"` + Ip string `json:"ip,omitempty"` + Version string `json:"version,omitempty"` + Build string `json:"build,omitempty"` + Hostname string `json:"hostname,omitempty"` + HttpAddress string `json:"http_address,omitempty"` + Settings *Settings `json:"settings,omitempty"` + OS *OS `json:"os,omitempty"` + Process *Process `json:"process,omitempty"` + JVM *JVM `json:"jvm,omitempty"` + ThreadPool *ThreadPool `json:"thread_pool,omitempty"` + Network *Network `json:"network,omitempty"` + Transport *Transport `json:"transport,omitempty"` + Http *Http `json:"http,omitempty"` + Plugins []*Plugin `json:"plugins,omitempty"` +} + +type Settings struct { + Path *Path `json:"path,omitempty"` + Foreground string `json:"foreground,omitempty"` + Name string `json:"name,omitempty"` +} + +type Path struct { + Logs string `json:"logs,omitempty"` + home string `json:"home,omitempty"` +} + +type Cluster struct { + Name string `json:"name"` +} + +type OS struct { + RefreshInterval int `json:"refresh_interval,omitempty"` + AvailableProcessors int `json:"available_processors,omitempty"` + CPU *CPU `json:"cpu,omitempty"` +} + +type CPU struct { + Vendor string `json:"vendor,omitempty"` + Model string `json:"model,omitempty"` + Mhz int `json:"mhz,omitempty"` + TotalCores int `json:"total_cores,omitempty"` + TotalSockets int `json:"total_sockets,omitempty"` + CoresPerSocket int `json:"cores_per_socket,omitempty"` + CacheSizeInBytes int `json:"cache_size_in_bytes,omitempty"` +} + +type MEM struct { + TotalInBytes int `json:"total_in_bytes,omitempty"` +} + +type SWAP struct { + TotalInBytes int `json:"total_in_bytes,omitempty"` +} + +type Process struct { + RefreshInterval int `json:"refresh_interval,omitempty"` + Id int `json:"id,omitempty"` + MaxFileDescriptors int `json:"max_file_descriptors,omitempty"` + Mlockall bool `json:"mlockall,omitempty"` +} + +type JVM struct { + Pid int `json:"pid,omitempty"` + Version string `json:"version,omitempty"` + VMName string `json:"vm_name,omitempty"` + VMVersion string `json:"vm_version,omitempty"` + VMVendor string `json:"vm_vendor,omitempty"` + StartTime int `json:"start_time,omitempty"` + Mem *JvmMem `json:"mem,omitempty"` + GcCollectors []string `json:"gc_collectors,omitempty"` + MemoryPools []string `json:"memory_pools,omitempty"` +} + +type JvmMem struct { + HeapInitInBytes int `json:"heap_init_in_bytes,omitempty"` + HeapMaxInBytes int `json:"heap_max_in_bytes,omitempty"` + NonHeapInitInBytes int `json:"non_heap_init_in_bytes,omitempty"` + NonHeapMaxInBytes int `json:"non_heap_max_in_bytes,omitempty"` + DirectMaxInBytes int `json:"direct_max_in_bytes,omitempty"` +} + +type ThreadPool struct { + Generic *ThreadPoolConfig `json:"generic,omitempty"` + Index *ThreadPoolConfig `json:"index,omitempty"` + Get *ThreadPoolConfig `json:"get,omitempty"` + Snapshot *ThreadPoolConfig `json:"snapshot,omitempty"` + Merge *ThreadPoolConfig `json:"merge,omitempty"` + Suggest *ThreadPoolConfig `json:"suggest,omitempty"` + Bulk *ThreadPoolConfig `json:"bulk,omitempty"` + Optimize *ThreadPoolConfig `json:"optimize,omitempty"` + Warmer *ThreadPoolConfig `json:"warmer,omitempty"` + Flush *ThreadPoolConfig `json:"flush,omitempty"` + Search *ThreadPoolConfig `json:"search,omitempty"` + Percolate *ThreadPoolConfig `json:"percolate,omitempty"` + Management *ThreadPoolConfig `json:"management,omitempty"` + Refresh *ThreadPoolConfig `json:"refresh,omitempty"` +} + +type ThreadPoolConfig struct { + Type string `json:"type,omitempty"` + Min int `json:"min,omitempty"` + Max int `json:"max,omitempty"` + QueueSize interface{} `json:"queue_size,omitempty"` // Either string or -1 + KeepAlive string `json:"keep_alive,omitempty"` +} + +type Network struct { + RefreshInterval int `json:"refresh_interval,omitempty"` + PrimaryInterface *Interface `json:"primary_interface,omitempty"` +} + +type Interface struct { + Address string `json:"address,omitempty"` + Name string `json:"name,omitempty"` + MacAddress string `json:"mac_address,omitempty"` +} + +type Transport struct { + BoundAddress string `json:"bound_address,omitempty"` + PublishAddress string `json:"publish_address,omitempty"` +} + +type Http struct { + BoundAddress string `json:"bound_address,omitempty"` + PublishAddress string `json:"publish_address,omitempty"` +} + +type Plugin struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Site bool `json:"site,omitempty"` + Jvm bool `json:"jvm,omitempty"` + Url string `json:"url,omitempty"` +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesinfo_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesinfo_test.go new file mode 100644 index 000000000..5ee6b5d7b --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesinfo_test.go @@ -0,0 +1,37 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "fmt" + "github.com/bmizerany/assert" + "testing" +) + +func TestGetAll(t *testing.T) { + InitTests(true) + c := NewTestConn() + nodesInfo, err := c.AllNodesInfo() + assert.T(t, err == nil, fmt.Sprintf("should not have gotten error, received: %v", err)) + assert.T(t, nodesInfo.ClusterName != "", fmt.Sprintf("clustername should have been not empty. received: %q", nodesInfo.ClusterName)) + for _, node := range nodesInfo.Nodes { + assert.T(t, node.Settings != nil, fmt.Sprintf("Settings should not have been null")) + assert.T(t, node.OS != nil, fmt.Sprintf("OS should not have been null")) + assert.T(t, node.Process != nil, fmt.Sprintf("Process should not have been null")) + assert.T(t, node.JVM != nil, fmt.Sprintf("JVM should not have been null")) + assert.T(t, node.ThreadPool != nil, fmt.Sprintf("ThreadPool should not have been null")) + assert.T(t, node.Network != nil, fmt.Sprintf("Network should not have been null")) + assert.T(t, node.Transport != nil, fmt.Sprintf("Transport should not have been null")) + assert.T(t, node.Http != nil, fmt.Sprintf("Http should not have been null")) + assert.T(t, node.Plugins != nil, fmt.Sprintf("Plugins should not have been null")) + } +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesshutdown.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesshutdown.go new file mode 100644 index 000000000..dacb47e1c --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesshutdown.go @@ -0,0 +1,37 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "fmt" + "net/url" + "strconv" + "strings" +) + +// NodesShutdown allows the caller to shutdown between one and all nodes in the cluster +// delay is a integer representing number of seconds +// passing "" or "_all" for the nodes parameter will shut down all nodes +// see http://www.elasticsearch.org/guide/reference/api/admin-cluster-nodes-shutdown/ +func (c *Conn) NodesShutdown(delay int, nodes ...string) error { + shutdownUrl := fmt.Sprintf("/_cluster/nodes/%s/_shutdown", strings.Join(nodes, ",")) + if delay > 0 { + var values url.Values = url.Values{} + values.Add("delay", strconv.Itoa(delay)) + shutdownUrl += "?" + values.Encode() + } + _, err := c.DoCommand("POST", shutdownUrl, nil, nil) + if err != nil { + return err + } + return nil +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesstats.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesstats.go new file mode 100644 index 000000000..f4bf12bb0 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesstats.go @@ -0,0 +1,31 @@ +// Copyright 2015 Niels Freier +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" +) + +func (c *Conn) NodesStats() (NodeStatsResponse, error) { + var retval NodeStatsResponse + + body, err := c.DoCommand("GET", "/_nodes/stats", nil, nil) + if err != nil { + return retval, err + } + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + return retval, err +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterreroute.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterreroute.go new file mode 100644 index 000000000..00c6aa942 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterreroute.go @@ -0,0 +1,81 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "errors" + "fmt" +) + +// The cluster health API allows to get a very simple status on the health of the cluster. +// see http://www.elasticsearch.org/guide/reference/api/admin-cluster-health.html +// information returned. Defaults to cluster.) +func (c *Conn) Reroute(dryRun bool, commands Commands) (ClusterHealthResponse, error) { + var url string + var retval ClusterHealthResponse + + if len(commands.Commands) > 0 { + url = fmt.Sprintf("/_cluster/reroute%s&%s", dryRunOption(dryRun)) + } else { + return retval, errors.New("Must pass at least one command") + } + m := map[string]interface{}{"commands": commands.Commands} + body, err := c.DoCommand("POST", url, m, nil) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} + +func dryRunOption(isDryRun bool) string { + if isDryRun { + return "dry_run" + } + return "" +} + +// supported commands are +// move (index, shard, from_node, to_node) +// cancel (index, shard, node, allow_primary) +// allocate (index, shard, node, allow_primary) + +type Commands struct { + Commands []interface{} `json:"commands"` +} + +type MoveCommand struct { + Index string `json:"index"` + Shard string `json:"shard"` + FromNode string `json:"from_node"` + ToNode string `json:"to_node"` +} + +type CancelCommand struct { + Index string `json:"index"` + Shard string `json:"shard"` + Node string `json:"node"` + AllowPrimary bool `json:"allow_primary,omitempty"` +} +type AllocateCommand struct { + Index string `json:"index"` + Shard string `json:"shard"` + Node string `json:"node"` + AllowPrimary bool `json:"allow_primary,omitempty"` +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterstate.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterstate.go new file mode 100644 index 000000000..d0e2a76eb --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterstate.go @@ -0,0 +1,38 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" +) + +// State gets the comprehensive state information for the whole cluster +// see http://www.elasticsearch.org/guide/reference/api/admin-cluster-state/ +func (c *Conn) UpdateSetting(args map[string]interface{}, filter_indices ...string) (ClusterStateResponse, error) { + var url string + var retval ClusterStateResponse + + url = "/_cluster/state" + + body, err := c.DoCommand("GET", url, args, nil) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterstatresponses.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterstatresponses.go new file mode 100644 index 000000000..e9a296686 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterstatresponses.go @@ -0,0 +1,299 @@ +package elastigo + +type NodeStatsResponse struct { + ClusterName string `json:"cluster_name"` + Nodes map[string]NodeStatsNodeResponse +} + +type NodeStatsNodeResponse struct { + Name string `json:"name"` + Timestamp int64 `json:"timestamp"` + TransportAddress string `json:"transport_address"` + Hostname string `json:"hostname"` + Host string `json:"host"` + IP []string `json:"ip"` + Attributes NodeStatsNodeAttributes `json:"attributes"` + Indices NodeStatsIndicesResponse `json:"indices"` + OS NodeStatsOSResponse `json:"os"` + Process NodeStatsProcessResponse `json:"process"` + JVM NodeStatsJVMResponse `json:"jvm"` + Network NodeStatsNetworkResponse `json:"network"` + FS NodeStatsFSResponse `json:"fs"` + ThreadPool map[string]NodeStatsThreadPoolPoolResponse `json:"thread_pool"` + Transport NodeStatsTransportResponse `json:"transport"` + FieldDataBreaker NodeStatsFieldDataBreakerResponse `json:"fielddata_breaker"` +} + +type NodeStatsNodeAttributes struct { + Data string `json:"data"` + Client string `json:"client"` +} +type NodeStatsNetworkResponse struct { + TCP NodeStatsTCPResponse `json:"tcp"` +} + +type NodeStatsFieldDataBreakerResponse struct { + MaximumSizeInBytes int64 `json:"maximum_size_in_bytes"` + MaximumSize string `json:"maximum_size"` + EstimatedSizeInBytes int64 `json:"estimated_size_in_bytes"` + EstimatedSize string `json:"estimated_size"` + Overhead float64 `json:"overhead"` + Tripped int64 `json:"tripped"` +} +type NodeStatsTransportResponse struct { + ServerOpen int64 `json:"server_open"` + RxCount int64 `json:"rx_count"` + RxSize int64 `json:"rx_size_in_bytes"` + TxCount int64 `json:"tx_count"` + TxSize int64 `json:"tx_size_in_bytes"` +} + +type NodeStatsThreadPoolPoolResponse struct { + Threads int64 `json:"threads"` + Queue int64 `json:"queue"` + Active int64 `json:"active"` + Rejected int64 `json:"rejected"` + Largest int64 `json:"largest"` + Completed int64 `json:"completed"` +} + +type NodeStatsTCPResponse struct { + ActiveOpens int64 `json:"active_opens"` + PassiveOpens int64 `json:"passive_opens"` + CurrEstab int64 `json:"curr_estab"` + InSegs int64 `json:"in_segs"` + OutSegs int64 `json:"out_segs"` + RetransSegs int64 `json:"retrans_segs"` + EstabResets int64 `json:"estab_resets"` + AttemptFails int64 `json:"attempt_fails"` + InErrs int64 `json:"in_errs"` + OutRsts int64 `json:"out_rsts"` +} + +type NodeStatsIndicesResponse struct { + Docs NodeStatsIndicesDocsResponse `json:"docs"` + Store NodeStatsIndicesStoreResponse `json:"store"` + Indexing NodeStatsIndicesIndexingResponse `json:"indexing"` + Get NodeStatsIndicesGetResponse `json:"get"` + Search NodeStatsIndicesSearchResponse `json:"search"` + Merges NodeStatsIndicesMergesResponse `json:"merges"` + Refresh NodeStatsIndicesRefreshResponse `json:"refresh"` + Flush NodeStatsIndicesFlushResponse `json:"flush"` + Warmer NodeStatsIndicesWarmerResponse `json:"warmer"` + FilterCache NodeStatsIndicesFilterCacheResponse `json:"filter_cache"` + IdCache NodeStatsIndicesIdCacheResponse `json:"id_cache"` + FieldData NodeStatsIndicesFieldDataResponse `json:"fielddata"` + Percolate NodeStatsIndicesPercolateResponse `json:"percolate"` + Completion NodeStatsIndicesCompletionResponse `json:"completion"` + Segments NodeStatsIndicesSegmentsResponse `json:"segments"` + Translog NodeStatsIndicesTranslogResponse `json:"translog"` + Suggest NodeStatsIndicesSuggestResponse `json:"suggest"` +} + +type NodeStatsIndicesDocsResponse struct { + Count int64 `json:"count"` + Deleted int64 `json:"deleted"` +} + +type NodeStatsIndicesStoreResponse struct { + Size int64 `json:"size_in_bytes"` + ThrottleTime int64 `json:"throttle_time_in_millis"` +} + +type NodeStatsIndicesIndexingResponse struct { + IndexTotal int64 `json:"index_total"` + IndexTime int64 `json:"index_time_in_millis"` + IndexCurrent int64 `json:"index_current"` + DeleteTotal int64 `json:"delete_total"` + DeleteTime int64 `json:"delete_time_in_millis"` + DeleteCurrent int64 `json:"delete_current"` +} + +type NodeStatsIndicesGetResponse struct { + Total int64 `json:"total"` + Time int64 `json:"time_in_millis"` + ExistsTotal int64 `json:"exists_total"` + ExistsTime int64 `json:"exists_time_in_millis"` + MissingTotal int64 `json:"missing_total"` + MissingTime int64 `json:"missing_time_in_millis"` + Current int64 `json:"current"` +} + +type NodeStatsIndicesSearchResponse struct { + OpenContext int64 `json:"open_contexts"` + QueryTotal int64 `json:"query_total"` + QueryTime int64 `json:"query_time_in_millis"` + QueryCurrent int64 `json:"query_current"` + FetchTotal int64 `json:"fetch_total"` + FetchTime int64 `json:"fetch_time_in_millis"` + FetchCurrent int64 `json:"fetch_current"` +} +type NodeStatsIndicesMergesResponse struct { + Current int64 `json:"current"` + CurrentDocs int64 `json:"current_docs"` + CurrentSizeInBytes int64 `json:"current_size_in_bytes"` + Total int64 `json:"total"` + TotalTimeInMs int64 `json:"total_time_in_millis"` + TotalDocs int64 `json:"total_docs"` + TotalSizeInBytes int64 `json:"total_size_in_bytes"` +} +type NodeStatsIndicesRefreshResponse struct { + Total int64 `json:"total"` + TotalTimeInMs int64 `json:"total_time_in_millis"` +} +type NodeStatsIndicesFlushResponse struct { + Total int64 `json:"total"` + TotalTimeInMs int64 `json:"total_time_in_millis"` +} +type NodeStatsIndicesWarmerResponse struct { + Current int64 `json:"current"` + Total int64 `json:"total"` + TotalTimeInMs int64 `json:"total_time_in_millis"` +} +type NodeStatsIndicesFilterCacheResponse struct { + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + Evictions int64 `json:"evictions"` +} +type NodeStatsIndicesIdCacheResponse struct { + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` +} +type NodeStatsIndicesFieldDataResponse struct { + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + Evictions int64 `json:"evictions"` +} +type NodeStatsIndicesPercolateResponse struct { + Total int64 `json:"total"` + TimeInMs int64 `json:"time_in_millis"` + Current int64 `json:"current"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + MemorySize string `json:"memory_size"` + Queries int64 `json:"queries"` +} +type NodeStatsIndicesCompletionResponse struct { + SizeInBytes int64 `json:"size_in_bytes"` +} +type NodeStatsIndicesSegmentsResponse struct { + Count int64 `json:"count"` + MemoryInBytes int64 `json:"memory_in_bytes"` + IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes"` + VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes"` +} +type NodeStatsIndicesTranslogResponse struct { + Operations int64 `json:"operations"` + SizeInBytes int64 `json:"size_in_bytes"` +} +type NodeStatsIndicesSuggestResponse struct { + Total int64 `json:"total"` + TimeInMs int64 `json:"time_in_millis"` + Current int64 `json:"current"` +} +type NodeStatsOSResponse struct { + Timestamp int64 `json:"timestamp"` + Uptime int64 `json:"uptime_in_millis"` + LoadAvg []float64 `json:"load_average"` + CPU NodeStatsOSCPUResponse `json:"cpu"` + Mem NodeStatsOSMemResponse `json:"mem"` + Swap NodeStatsOSSwapResponse `json:"swap"` +} + +type NodeStatsOSMemResponse struct { + Free int64 `json:"free_in_bytes"` + Used int64 `json:"used_in_bytes"` + ActualFree int64 `json:"actual_free_in_bytes"` + ActualUsed int64 `json:"actual_used_in_bytes"` +} + +type NodeStatsOSSwapResponse struct { + Used int64 `json:"used_in_bytes"` + Free int64 `json:"free_in_bytes"` +} + +type NodeStatsOSCPUResponse struct { + Sys int64 `json:"sys"` + User int64 `json:"user"` + Idle int64 `json:"idle"` + Steal int64 `json:"stolen"` +} + +type NodeStatsProcessResponse struct { + Timestamp int64 `json:"timestamp"` + OpenFD int64 `json:"open_file_descriptors"` + CPU NodeStatsProcessCPUResponse `json:"cpu"` + Memory NodeStatsProcessMemResponse `json:"mem"` +} + +type NodeStatsProcessMemResponse struct { + Resident int64 `json:"resident_in_bytes"` + Share int64 `json:"share_in_bytes"` + TotalVirtual int64 `json:"total_virtual_in_bytes"` +} + +type NodeStatsProcessCPUResponse struct { + Percent int64 `json:"percent"` + Sys int64 `json:"sys_in_millis"` + User int64 `json:"user_in_millis"` + Total int64 `json:"total_in_millis"` +} + +type NodeStatsJVMResponse struct { + Timestame int64 `json:"timestamp"` + UptimeInMs int64 `json:"uptime_in_millis"` + Mem NodeStatsJVMMemResponse `json:"mem"` + Threads NodeStatsJVMThreadsResponse `json:"threads"` + GC NodeStatsJVMGCResponse `json:"gc"` + BufferPools map[string]NodeStatsJVMBufferPoolsResponse `json:"buffer_pools"` +} + +type NodeStatsJVMMemResponse struct { + HeapUsedInBytes int64 `json:"heap_used_in_bytes"` + HeapUsedPercent int64 `json:"heap_used_percent"` + HeapCommitedInBytes int64 `json:"heap_commited_in_bytes"` + HeapMaxInBytes int64 `json:"heap_max_in_bytes"` + NonHeapUsedInBytes int64 `json:"non_heap_used_in_bytes"` + NonHeapCommittedInBytes int64 `json:"non_heap_committed_in_bytes"` + Pools map[string]NodeStatsJVMMemPoolsResponse `json:"pools"` +} +type NodeStatsJVMMemPoolsResponse struct { + UsedInBytes int64 `json:"used_in_bytes"` + MaxInBytes int64 `json:"max_in_bytes"` + PeakUsedInBytes int64 `json:"peak_used_in_bytes"` + PeakMaxInBytes int64 `json:"peak_max_in_bytes"` +} +type NodeStatsJVMThreadsResponse struct { + Count int64 `json:"count"` + PeakCount int64 `json:"peak_count"` +} +type NodeStatsJVMGCResponse struct { + Collectors map[string]NodeStatsJVMGCCollectorsAgeResponse `json:"collectors"` +} +type NodeStatsJVMGCCollectorsAgeResponse struct { + Count int64 `json:"collection_count"` + TimeInMs int64 `json:"collection_time_in_millis"` +} +type NodeStatsJVMBufferPoolsResponse struct { + Count int64 `json:"count"` + UsedInBytes int64 `json:"used_in_bytes"` + TotalCapacityInBytes int64 `json:"total_capacity_in_bytes"` +} +type NodeStatsHTTPResponse struct { + CurrentOpen int64 `json:"current_open"` + TotalOpen int64 `json:"total_open"` +} + +type NodeStatsFSResponse struct { + Timestamp int64 `json:"timestamp"` + Data []NodeStatsFSDataResponse `json:"data"` +} + +type NodeStatsFSDataResponse struct { + Path string `json:"path"` + Mount string `json:"mount"` + Device string `json:"dev"` + Total int64 `json:"total_in_bytes"` + Free int64 `json:"free_in_bytes"` + Available int64 `json:"available_in_bytes"` + DiskReads int64 `json:"disk_reads"` + DiskWrites int64 `json:"disk_writes"` + DiskReadSize int64 `json:"disk_read_size_in_bytes"` + DiskWriteSize int64 `json:"disk_write_size_in_bytes"` +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterupdatesettings.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterupdatesettings.go new file mode 100644 index 000000000..f0efa1661 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterupdatesettings.go @@ -0,0 +1,46 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" +) + +// UpdateSettings allows to update cluster wide specific settings. Defaults to Transient setting +// Settings updated can either be persistent (applied cross restarts) or transient (will not survive a full cluster restart). +// http://www.elasticsearch.org/guide/reference/api/admin-cluster-update-settings.html +func (c *Conn) UpdateSettings(settingType string, key string, value int) (ClusterSettingsResponse, error) { + var retval ClusterSettingsResponse + if settingType != "transient" && settingType != "persistent" { + return retval, fmt.Errorf("settingType must be one of transient or persistent, you passed %s", settingType) + } + var url string = "/_cluster/state" + m := map[string]map[string]int{settingType: map[string]int{key: value}} + body, err := c.DoCommand("PUT", url, nil, m) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} + +type ClusterSettingsResponse struct { + Transient map[string]int `json:"transient"` + Persistent map[string]int `json:"persistent"` +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/connection.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/connection.go new file mode 100644 index 000000000..895eb4fcb --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/connection.go @@ -0,0 +1,184 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "errors" + "fmt" + hostpool "github.com/bitly/go-hostpool" + "net/http" + "net/url" + "runtime" + "strings" + "sync" + "time" +) + +const ( + Version = "0.0.2" + DefaultProtocol = "http" + DefaultDomain = "localhost" + DefaultPort = "9200" + // A decay duration of zero results in the default behaviour + DefaultDecayDuration = 0 +) + +type Conn struct { + // Maintain these for backwards compatibility + Protocol string + Domain string + ClusterDomains []string + Port string + Username string + Password string + Hosts []string + RequestTracer func(method, url, body string) + hp hostpool.HostPool + once sync.Once + + // To compute the weighting scores, we perform a weighted average of recent response times, + // over the course of `DecayDuration`. DecayDuration may be set to 0 to use the default + // value of 5 minutes. The EpsilonValueCalculator uses this to calculate a score + // from the weighted average response time. + DecayDuration time.Duration +} + +func NewConn() *Conn { + return &Conn{ + // Maintain these for backwards compatibility + Protocol: DefaultProtocol, + Domain: DefaultDomain, + ClusterDomains: []string{DefaultDomain}, + Port: DefaultPort, + DecayDuration: time.Duration(DefaultDecayDuration * time.Second), + } +} + +func (c *Conn) SetFromUrl(u string) error { + if u == "" { + return errors.New("Url is empty") + } + + parsedUrl, err := url.Parse(u) + if err != nil { + return err + } + + c.Protocol = parsedUrl.Scheme + host, portNum := splitHostnamePartsFromHost(parsedUrl.Host, c.Port) + c.Port = portNum + c.Domain = host + + if parsedUrl.User != nil { + c.Username = parsedUrl.User.Username() + password, passwordIsSet := parsedUrl.User.Password() + if passwordIsSet { + c.Password = password + } + } + + return nil +} + +func (c *Conn) SetPort(port string) { + c.Port = port +} + +func (c *Conn) SetHosts(newhosts []string) { + + // Store the new host list + c.Hosts = newhosts + + // Reinitialise the host pool Pretty naive as this will nuke the current + // hostpool, and therefore reset any scoring + c.initializeHostPool() +} + +// Set up the host pool to be used +func (c *Conn) initializeHostPool() { + + // If no hosts are set, fallback to defaults + if len(c.Hosts) == 0 { + c.Hosts = append(c.Hosts, fmt.Sprintf("%s:%s", c.Domain, c.Port)) + } + + // Epsilon Greedy is an algorithm that allows HostPool not only to + // track failure state, but also to learn about "better" options in + // terms of speed, and to pick from available hosts based on how well + // they perform. This gives a weighted request rate to better + // performing hosts, while still distributing requests to all hosts + // (proportionate to their performance). The interface is the same as + // the standard HostPool, but be sure to mark the HostResponse + // immediately after executing the request to the host, as that will + // stop the implicitly running request timer. + // + // A good overview of Epsilon Greedy is here http://stevehanov.ca/blog/index.php?id=132 + if c.hp != nil { + c.hp.Close() + } + c.hp = hostpool.NewEpsilonGreedy( + c.Hosts, c.DecayDuration, &hostpool.LinearEpsilonValueCalculator{}) +} + +func (c *Conn) Close() { + c.hp.Close() +} + +func (c *Conn) NewRequest(method, path, query string) (*Request, error) { + // Setup the hostpool on our first run + c.once.Do(c.initializeHostPool) + + // Get a host from the host pool + hr := c.hp.Get() + + // Get the final host and port + host, portNum := splitHostnamePartsFromHost(hr.Host(), c.Port) + + // Build request + var uri string + // If query parameters are provided, the add them to the URL, + // otherwise, leave them out + if len(query) > 0 { + uri = fmt.Sprintf("%s://%s:%s%s?%s", c.Protocol, host, portNum, path, query) + } else { + uri = fmt.Sprintf("%s://%s:%s%s", c.Protocol, host, portNum, path) + } + req, err := http.NewRequest(method, uri, nil) + if err != nil { + return nil, err + } + req.Header.Add("Accept", "application/json") + req.Header.Add("User-Agent", "elasticSearch/"+Version+" ("+runtime.GOOS+"-"+runtime.GOARCH+")") + + if c.Username != "" || c.Password != "" { + req.SetBasicAuth(c.Username, c.Password) + } + + newRequest := &Request{ + Request: req, + hostResponse: hr, + } + return newRequest, nil +} + +// Split apart the hostname on colon +// Return the host and a default port if there is no separator +func splitHostnamePartsFromHost(fullHost string, defaultPortNum string) (string, string) { + + h := strings.Split(fullHost, ":") + + if len(h) == 2 { + return h[0], h[1] + } + + return h[0], defaultPortNum +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/connection_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/connection_test.go new file mode 100644 index 000000000..5719b017a --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/connection_test.go @@ -0,0 +1,62 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "fmt" + "testing" + + "github.com/bmizerany/assert" +) + +func TestSetFromUrl(t *testing.T) { + c := NewConn() + + err := c.SetFromUrl("http://localhost") + exp := "localhost" + assert.T(t, c.Domain == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, c.Domain)) + + c = NewConn() + + err = c.SetFromUrl("http://localhost:9200") + exp = "9200" + assert.T(t, c.Port == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, c.Port)) + + c = NewConn() + + err = c.SetFromUrl("http://localhost:9200") + exp = "localhost" + assert.T(t, c.Domain == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, c.Domain)) + + c = NewConn() + + err = c.SetFromUrl("http://someuser@localhost:9200") + exp = "someuser" + assert.T(t, c.Username == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, c.Username)) + + c = NewConn() + + err = c.SetFromUrl("http://someuser:password@localhost:9200") + exp = "password" + assert.T(t, c.Password == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, c.Password)) + + c = NewConn() + + err = c.SetFromUrl("http://someuser:password@localhost:9200") + exp = "someuser" + assert.T(t, c.Username == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, c.Username)) + + c = NewConn() + + err = c.SetFromUrl("") + exp = "Url is empty" + assert.T(t, err != nil && err.Error() == exp, fmt.Sprintf("Expected %s, got: %s", exp, err.Error())) +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corebulk.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corebulk.go new file mode 100644 index 000000000..35a194fbb --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corebulk.go @@ -0,0 +1,414 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "sync" + "sync/atomic" + "time" +) + +const ( + // Max buffer size in bytes before flushing to elasticsearch + BulkMaxBuffer = 16384 + // Max number of Docs to hold in buffer before forcing flush + BulkMaxDocs = 100 + // Max delay before forcing a flush to Elasticearch + BulkDelaySeconds = 5 + // maximum wait shutdown seconds + MAX_SHUTDOWN_SECS = 5 +) + +type ErrorBuffer struct { + Err error + Buf *bytes.Buffer +} + +// A bulk indexer creates goroutines, and channels for connecting and sending data +// to elasticsearch in bulk, using buffers. +type BulkIndexer struct { + conn *Conn + + // We are creating a variable defining the func responsible for sending + // to allow a mock sendor for test purposes + Sender func(*bytes.Buffer) error + + // The refresh parameter can be set to true in order to refresh the + // relevant primary and replica shards immediately after the bulk + // operation has occurred + Refresh bool + + // If we encounter an error in sending, we are going to retry for this long + // before returning an error + // if 0 it will not retry + RetryForSeconds int + + // channel for getting errors + ErrorChannel chan *ErrorBuffer + + // channel for sending to background indexer + bulkChannel chan []byte + + // numErrors is a running total of errors seen + numErrors uint64 + + // shutdown channel + shutdownChan chan chan struct{} + // channel to shutdown timer + timerDoneChan chan struct{} + + // Channel to send a complete byte.Buffer to the http sendor + sendBuf chan *bytes.Buffer + // byte buffer for docs that have been converted to bytes, but not yet sent + buf *bytes.Buffer + // Buffer for Max number of time before forcing flush + BufferDelayMax time.Duration + // Max buffer size in bytes before flushing to elasticsearch + BulkMaxBuffer int // 1048576 + // Max number of Docs to hold in buffer before forcing flush + BulkMaxDocs int // 100 + + // Number of documents we have send through so far on this session + docCt int + // Max number of http conns in flight at one time + maxConns int + // If we are indexing enough docs per bufferdelaymax, we won't need to do time + // based eviction, else we do. + needsTimeBasedFlush bool + // Lock for document writes/operations + mu sync.Mutex + // Wait Group for the http sends + sendWg *sync.WaitGroup +} + +func (b *BulkIndexer) NumErrors() uint64 { + return atomic.LoadUint64(&b.numErrors) +} + +func (c *Conn) NewBulkIndexer(maxConns int) *BulkIndexer { + b := BulkIndexer{conn: c, sendBuf: make(chan *bytes.Buffer, maxConns)} + b.needsTimeBasedFlush = true + b.buf = new(bytes.Buffer) + b.maxConns = maxConns + b.BulkMaxBuffer = BulkMaxBuffer + b.BulkMaxDocs = BulkMaxDocs + b.BufferDelayMax = time.Duration(BulkDelaySeconds) * time.Second + b.bulkChannel = make(chan []byte, 100) + b.sendWg = new(sync.WaitGroup) + b.timerDoneChan = make(chan struct{}) + return &b +} + +// A bulk indexer with more control over error handling +// @maxConns is the max number of in flight http requests +// @retrySeconds is # of seconds to wait before retrying falied requests +// +// done := make(chan bool) +// BulkIndexerGlobalRun(100, done) +func (c *Conn) NewBulkIndexerErrors(maxConns, retrySeconds int) *BulkIndexer { + b := c.NewBulkIndexer(maxConns) + b.RetryForSeconds = retrySeconds + b.ErrorChannel = make(chan *ErrorBuffer, 20) + return b +} + +// Starts this bulk Indexer running, this Run opens a go routine so is +// Non blocking +func (b *BulkIndexer) Start() { + b.shutdownChan = make(chan chan struct{}) + + go func() { + // XXX(j): Refactor this stuff to use an interface. + if b.Sender == nil { + b.Sender = b.Send + } + // Backwards compatibility + b.startHttpSender() + b.startDocChannel() + b.startTimer() + ch := <-b.shutdownChan + time.Sleep(2 * time.Millisecond) + b.Flush() + b.shutdown() + ch <- struct{}{} + }() +} + +// Stop stops the bulk indexer, blocking the caller until it is complete. +func (b *BulkIndexer) Stop() { + ch := make(chan struct{}) + b.shutdownChan <- ch + select { + case <-ch: + // done + case <-time.After(time.Second * time.Duration(MAX_SHUTDOWN_SECS)): + // timeout! + } +} + +func (b *BulkIndexer) PendingDocuments() int { + return b.docCt +} + +// Flush all current documents to ElasticSearch +func (b *BulkIndexer) Flush() { + b.mu.Lock() + if b.docCt > 0 { + b.send(b.buf) + } + b.mu.Unlock() +} + +func (b *BulkIndexer) startHttpSender() { + + // this sends http requests to elasticsearch it uses maxConns to open up that + // many goroutines, each of which will synchronously call ElasticSearch + // in theory, the whole set will cause a backup all the way to IndexBulk if + // we have consumed all maxConns + for i := 0; i < b.maxConns; i++ { + b.sendWg.Add(1) + go func() { + for buf := range b.sendBuf { + // Copy for the potential re-send. + bufCopy := bytes.NewBuffer(buf.Bytes()) + err := b.Sender(buf) + + // Perhaps a b.FailureStrategy(err) ?? with different types of strategies + // 1. Retry, then panic + // 2. Retry then return error and let runner decide + // 3. Retry, then log to disk? retry later? + if err != nil { + buf = bytes.NewBuffer(bufCopy.Bytes()) + if b.RetryForSeconds > 0 { + time.Sleep(time.Second * time.Duration(b.RetryForSeconds)) + err = b.Sender(bufCopy) + if err == nil { + // Successfully re-sent with no error + continue + } + } + if b.ErrorChannel != nil { + b.ErrorChannel <- &ErrorBuffer{err, buf} + } + } + } + b.sendWg.Done() + }() + } +} + +// start a timer for checking back and forcing flush ever BulkDelaySeconds seconds +// even if we haven't hit max messages/size +func (b *BulkIndexer) startTimer() { + ticker := time.NewTicker(b.BufferDelayMax) + go func() { + for { + select { + case <-ticker.C: + b.mu.Lock() + // don't send unless last sendor was the time, + // otherwise an indication of other thresholds being hit + // where time isn't needed + if b.buf.Len() > 0 && b.needsTimeBasedFlush { + b.needsTimeBasedFlush = true + b.send(b.buf) + } else if b.buf.Len() > 0 { + b.needsTimeBasedFlush = true + } + b.mu.Unlock() + case <-b.timerDoneChan: + // shutdown this go routine + ticker.Stop() + return + } + + } + }() +} + +func (b *BulkIndexer) startDocChannel() { + // This goroutine accepts incoming byte arrays from the IndexBulk function and + // writes to buffer + go func() { + for docBytes := range b.bulkChannel { + b.mu.Lock() + b.docCt += 1 + b.buf.Write(docBytes) + if b.buf.Len() >= b.BulkMaxBuffer || b.docCt >= b.BulkMaxDocs { + b.needsTimeBasedFlush = false + //log.Printf("Send due to size: docs=%d bufsize=%d", b.docCt, b.buf.Len()) + b.send(b.buf) + } + b.mu.Unlock() + } + }() +} + +func (b *BulkIndexer) send(buf *bytes.Buffer) { + //b2 := *b.buf + b.sendBuf <- buf + b.buf = new(bytes.Buffer) + // b.buf.Reset() + b.docCt = 0 +} + +func (b *BulkIndexer) shutdown() { + // This must be called after Flush() + close(b.timerDoneChan) + close(b.sendBuf) + close(b.bulkChannel) + b.sendWg.Wait() +} + +// The index bulk API adds or updates a typed JSON document to a specific index, making it searchable. +// it operates by buffering requests, and ocassionally flushing to elasticsearch +// http://www.elasticsearch.org/guide/reference/api/bulk.html +func (b *BulkIndexer) Index(index string, _type string, id, parent, ttl string, date *time.Time, data interface{}) error { + //{ "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } } + by, err := WriteBulkBytes("index", index, _type, id, parent, ttl, date, data) + if err != nil { + return err + } + b.bulkChannel <- by + return nil +} + +func (b *BulkIndexer) Update(index string, _type string, id, parent, ttl string, date *time.Time, data interface{}) error { + //{ "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } } + by, err := WriteBulkBytes("update", index, _type, id, parent, ttl, date, data) + if err != nil { + return err + } + b.bulkChannel <- by + return nil +} + +func (b *BulkIndexer) Delete(index, _type, id string) { + queryLine := fmt.Sprintf("{\"delete\":{\"_index\":%q,\"_type\":%q,\"_id\":%q}}\n", index, _type, id) + b.bulkChannel <- []byte(queryLine) + return +} + +func (b *BulkIndexer) UpdateWithWithScript(index string, _type string, id, parent, ttl string, date *time.Time, script string) error { + + var data map[string]interface{} = make(map[string]interface{}) + data["script"] = script + return b.Update(index, _type, id, parent, ttl, date, data) +} + +func (b *BulkIndexer) UpdateWithPartialDoc(index string, _type string, id, parent, ttl string, date *time.Time, partialDoc interface{}, upsert bool) error { + + var data map[string]interface{} = make(map[string]interface{}) + + data["doc"] = partialDoc + if upsert { + data["doc_as_upsert"] = true + } + return b.Update(index, _type, id, parent, ttl, date, data) +} + +// This does the actual send of a buffer, which has already been formatted +// into bytes of ES formatted bulk data +func (b *BulkIndexer) Send(buf *bytes.Buffer) error { + type responseStruct struct { + Took int64 `json:"took"` + Errors bool `json:"errors"` + Items []map[string]interface{} `json:"items"` + } + + response := responseStruct{} + + body, err := b.conn.DoCommand("POST", fmt.Sprintf("/_bulk?refresh=%t", b.Refresh), nil, buf) + + if err != nil { + atomic.AddUint64(&b.numErrors, 1) + return err + } + // check for response errors, bulk insert will give 200 OK but then include errors in response + jsonErr := json.Unmarshal(body, &response) + if jsonErr == nil { + if response.Errors { + atomic.AddUint64(&b.numErrors, uint64(len(response.Items))) + return fmt.Errorf("Bulk Insertion Error. Failed item count [%d]", len(response.Items)) + } + } + return nil +} + +// Given a set of arguments for index, type, id, data create a set of bytes that is formatted for bulkd index +// http://www.elasticsearch.org/guide/reference/api/bulk.html +func WriteBulkBytes(op string, index string, _type string, id, parent, ttl string, date *time.Time, data interface{}) ([]byte, error) { + // only index and update are currently supported + if op != "index" && op != "update" { + return nil, errors.New(fmt.Sprintf("Operation '%s' is not yet supported", op)) + } + + // First line + buf := bytes.Buffer{} + buf.WriteString(fmt.Sprintf(`{"%s":{"_index":"`, op)) + buf.WriteString(index) + buf.WriteString(`","_type":"`) + buf.WriteString(_type) + buf.WriteString(`"`) + if len(id) > 0 { + buf.WriteString(`,"_id":"`) + buf.WriteString(id) + buf.WriteString(`"`) + } + + if len(parent) > 0 { + buf.WriteString(`,"_parent":"`) + buf.WriteString(parent) + buf.WriteString(`"`) + } + + if op == "update" { + buf.WriteString(`,"_retry_on_conflict":3`) + } + + if len(ttl) > 0 { + buf.WriteString(`,"ttl":"`) + buf.WriteString(ttl) + buf.WriteString(`"`) + } + if date != nil { + buf.WriteString(`,"_timestamp":"`) + buf.WriteString(strconv.FormatInt(date.UnixNano()/1e6, 10)) + buf.WriteString(`"`) + } + + buf.WriteString(`}}`) + buf.WriteRune('\n') + //buf.WriteByte('\n') + switch v := data.(type) { + case *bytes.Buffer: + io.Copy(&buf, v) + case []byte: + buf.Write(v) + case string: + buf.WriteString(v) + default: + body, jsonErr := json.Marshal(data) + if jsonErr != nil { + return nil, jsonErr + } + buf.Write(body) + } + buf.WriteRune('\n') + return buf.Bytes(), nil +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corebulk_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corebulk_test.go new file mode 100644 index 000000000..e352ae338 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corebulk_test.go @@ -0,0 +1,399 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "bytes" + "crypto/rand" + "encoding/json" + "flag" + "fmt" + "log" + "net/url" + "strconv" + "sync" + "testing" + "time" + + "github.com/araddon/gou" + "github.com/bmizerany/assert" +) + +// go test -bench=".*" +// go test -bench="Bulk" + +type sharedBuffer struct { + mu sync.Mutex + Buffer []*bytes.Buffer +} + +func NewSharedBuffer() *sharedBuffer { + return &sharedBuffer{ + Buffer: make([]*bytes.Buffer, 0), + } +} + +func (b *sharedBuffer) Append(buf *bytes.Buffer) { + b.mu.Lock() + defer b.mu.Unlock() + b.Buffer = append(b.Buffer, buf) +} + +func (b *sharedBuffer) Length() int { + b.mu.Lock() + defer b.mu.Unlock() + return len(b.Buffer) +} + +func init() { + flag.Parse() + if testing.Verbose() { + gou.SetupLogging("debug") + } +} + +// take two ints, compare, need to be within 5% +func closeInt(a, b int) bool { + c := float64(a) / float64(b) + if c >= .95 && c <= 1.05 { + return true + } + return false +} + +func TestBulkIndexerBasic(t *testing.T) { + testIndex := "users" + var ( + buffers = NewSharedBuffer() + totalBytesSent int + messageSets int + ) + + InitTests(true) + c := NewTestConn() + + c.DeleteIndex(testIndex) + + indexer := c.NewBulkIndexer(3) + indexer.Sender = func(buf *bytes.Buffer) error { + messageSets += 1 + totalBytesSent += buf.Len() + buffers.Append(buf) + //log.Printf("buffer:%s", string(buf.Bytes())) + return indexer.Send(buf) + } + indexer.Start() + + date := time.Unix(1257894000, 0) + data := map[string]interface{}{ + "name": "smurfs", + "age": 22, + "date": "yesterday", + } + + err := indexer.Index(testIndex, "user", "1", "", "", &date, data) + waitFor(func() bool { + return buffers.Length() > 0 + }, 5) + + // part of request is url, so lets factor that in + //totalBytesSent = totalBytesSent - len(*eshost) + assert.T(t, buffers.Length() == 1, fmt.Sprintf("Should have sent one operation but was %d", buffers.Length())) + assert.T(t, indexer.NumErrors() == 0 && err == nil, fmt.Sprintf("Should not have any errors. NumErrors: %v, err: %v", indexer.NumErrors(), err)) + expectedBytes := 129 + assert.T(t, totalBytesSent == expectedBytes, fmt.Sprintf("Should have sent %v bytes but was %v", expectedBytes, totalBytesSent)) + + err = indexer.Index(testIndex, "user", "2", "", "", nil, data) + waitFor(func() bool { + return buffers.Length() > 1 + }, 5) + + // this will test to ensure that Flush actually catches a doc + indexer.Flush() + totalBytesSent = totalBytesSent - len(*eshost) + assert.T(t, err == nil, fmt.Sprintf("Should have nil error =%v", err)) + assert.T(t, buffers.Length() == 2, fmt.Sprintf("Should have another buffer ct=%d", buffers.Length())) + + assert.T(t, indexer.NumErrors() == 0, fmt.Sprintf("Should not have any errors %d", indexer.NumErrors())) + expectedBytes = 220 + assert.T(t, closeInt(totalBytesSent, expectedBytes), fmt.Sprintf("Should have sent %v bytes but was %v", expectedBytes, totalBytesSent)) + + indexer.Stop() +} + +func TestRefreshParam(t *testing.T) { + requrlChan := make(chan *url.URL, 1) + InitTests(true) + c := NewTestConn() + c.RequestTracer = func(method, urlStr, body string) { + requrl, _ := url.Parse(urlStr) + requrlChan <- requrl + } + date := time.Unix(1257894000, 0) + data := map[string]interface{}{"name": "smurfs", "age": 22, "date": date} + + // Now tests small batches + indexer := c.NewBulkIndexer(1) + indexer.Refresh = true + + indexer.Start() + <-time.After(time.Millisecond * 20) + + indexer.Index("users", "user", "2", "", "", &date, data) + + <-time.After(time.Millisecond * 200) + // indexer.Flush() + indexer.Stop() + requrl := <-requrlChan + assert.T(t, requrl.Query().Get("refresh") == "true", "Should have set refresh query param to true") +} + +func TestWithoutRefreshParam(t *testing.T) { + requrlChan := make(chan *url.URL, 1) + InitTests(true) + c := NewTestConn() + c.RequestTracer = func(method, urlStr, body string) { + requrl, _ := url.Parse(urlStr) + requrlChan <- requrl + } + date := time.Unix(1257894000, 0) + data := map[string]interface{}{"name": "smurfs", "age": 22, "date": date} + + // Now tests small batches + indexer := c.NewBulkIndexer(1) + + indexer.Start() + <-time.After(time.Millisecond * 20) + + indexer.Index("users", "user", "2", "", "", &date, data) + + <-time.After(time.Millisecond * 200) + // indexer.Flush() + indexer.Stop() + requrl := <-requrlChan + assert.T(t, requrl.Query().Get("refresh") == "false", "Should have set refresh query param to false") +} + +// currently broken in drone.io +func XXXTestBulkUpdate(t *testing.T) { + var ( + buffers = NewSharedBuffer() + totalBytesSent int + messageSets int + ) + + InitTests(true) + c := NewTestConn() + c.Port = "9200" + indexer := c.NewBulkIndexer(3) + indexer.Sender = func(buf *bytes.Buffer) error { + messageSets += 1 + totalBytesSent += buf.Len() + buffers.Append(buf) + return indexer.Send(buf) + } + indexer.Start() + + date := time.Unix(1257894000, 0) + user := map[string]interface{}{ + "name": "smurfs", "age": 22, "date": date, "count": 1, + } + + // Lets make sure the data is in the index ... + _, err := c.Index("users", "user", "5", nil, user) + + // script and params + data := map[string]interface{}{ + "script": "ctx._source.count += 2", + } + err = indexer.Update("users", "user", "5", "", "", &date, data) + // So here's the deal. Flushing does seem to work, you just have to give the + // channel a moment to recieve the message ... + // <- time.After(time.Millisecond * 20) + // indexer.Flush() + + waitFor(func() bool { + return buffers.Length() > 0 + }, 5) + + indexer.Stop() + + assert.T(t, indexer.NumErrors() == 0 && err == nil, fmt.Sprintf("Should not have any errors, bulkErrorCt:%v, err:%v", indexer.NumErrors(), err)) + + response, err := c.Get("users", "user", "5", nil) + assert.T(t, err == nil, fmt.Sprintf("Should not have any errors %v", err)) + m := make(map[string]interface{}) + json.Unmarshal([]byte(*response.Source), &m) + newCount := m["count"] + assert.T(t, newCount.(float64) == 3, + fmt.Sprintf("Should have update count: %#v ... %#v", m["count"], response)) +} + +func TestBulkSmallBatch(t *testing.T) { + var ( + messageSets int + ) + + InitTests(true) + c := NewTestConn() + + date := time.Unix(1257894000, 0) + data := map[string]interface{}{"name": "smurfs", "age": 22, "date": date} + + // Now tests small batches + indexer := c.NewBulkIndexer(1) + indexer.BufferDelayMax = 100 * time.Millisecond + indexer.BulkMaxDocs = 2 + messageSets = 0 + indexer.Sender = func(buf *bytes.Buffer) error { + messageSets += 1 + return indexer.Send(buf) + } + indexer.Start() + <-time.After(time.Millisecond * 20) + + indexer.Index("users", "user", "2", "", "", &date, data) + indexer.Index("users", "user", "3", "", "", &date, data) + indexer.Index("users", "user", "4", "", "", &date, data) + <-time.After(time.Millisecond * 200) + // indexer.Flush() + indexer.Stop() + assert.T(t, messageSets == 2, fmt.Sprintf("Should have sent 2 message sets %d", messageSets)) + +} + +func TestBulkDelete(t *testing.T) { + InitTests(true) + var lock sync.Mutex + c := NewTestConn() + indexer := c.NewBulkIndexer(1) + sentBytes := []byte{} + + indexer.Sender = func(buf *bytes.Buffer) error { + lock.Lock() + sentBytes = append(sentBytes, buf.Bytes()...) + lock.Unlock() + return nil + } + + indexer.Start() + + indexer.Delete("fake", "fake_type", "1") + + indexer.Flush() + indexer.Stop() + + lock.Lock() + sent := string(sentBytes) + lock.Unlock() + + expected := `{"delete":{"_index":"fake","_type":"fake_type","_id":"1"}} +` + asExpected := sent == expected + assert.T(t, asExpected, fmt.Sprintf("Should have sent '%s' but actually sent '%s'", expected, sent)) +} + +func XXXTestBulkErrors(t *testing.T) { + // lets set a bad port, and hope we get a conn refused error? + c := NewTestConn() + c.Port = "27845" + defer func() { + c.Port = "9200" + }() + indexer := c.NewBulkIndexerErrors(10, 1) + indexer.Start() + errorCt := 0 + go func() { + for i := 0; i < 20; i++ { + date := time.Unix(1257894000, 0) + data := map[string]interface{}{"name": "smurfs", "age": 22, "date": date} + indexer.Index("users", "user", strconv.Itoa(i), "", "", &date, data) + } + }() + var errBuf *ErrorBuffer + for errBuf = range indexer.ErrorChannel { + errorCt++ + break + } + if errBuf.Buf.Len() > 0 { + gou.Debug(errBuf.Err) + } + assert.T(t, errorCt > 0, fmt.Sprintf("ErrorCt should be > 0 %d", errorCt)) + indexer.Stop() +} + +/* +BenchmarkSend 18:33:00 bulk_test.go:131: Sent 1 messages in 0 sets totaling 0 bytes +18:33:00 bulk_test.go:131: Sent 100 messages in 1 sets totaling 145889 bytes +18:33:01 bulk_test.go:131: Sent 10000 messages in 100 sets totaling 14608888 bytes +18:33:05 bulk_test.go:131: Sent 20000 messages in 99 sets totaling 14462790 bytes + 20000 234526 ns/op + +*/ +func BenchmarkSend(b *testing.B) { + InitTests(true) + c := NewTestConn() + b.StartTimer() + totalBytes := 0 + sets := 0 + indexer := c.NewBulkIndexer(1) + indexer.Sender = func(buf *bytes.Buffer) error { + totalBytes += buf.Len() + sets += 1 + //log.Println("got bulk") + return indexer.Send(buf) + } + for i := 0; i < b.N; i++ { + about := make([]byte, 1000) + rand.Read(about) + data := map[string]interface{}{"name": "smurfs", "age": 22, "date": time.Unix(1257894000, 0), "about": about} + indexer.Index("users", "user", strconv.Itoa(i), "", "", nil, data) + } + log.Printf("Sent %d messages in %d sets totaling %d bytes \n", b.N, sets, totalBytes) + if indexer.NumErrors() != 0 { + b.Fail() + } +} + +/* +TODO: this should be faster than above + +BenchmarkSendBytes 18:33:05 bulk_test.go:169: Sent 1 messages in 0 sets totaling 0 bytes +18:33:05 bulk_test.go:169: Sent 100 messages in 2 sets totaling 292299 bytes +18:33:09 bulk_test.go:169: Sent 10000 messages in 99 sets totaling 14473800 bytes + 10000 373529 ns/op + +*/ +func BenchmarkSendBytes(b *testing.B) { + InitTests(true) + c := NewTestConn() + about := make([]byte, 1000) + rand.Read(about) + data := map[string]interface{}{"name": "smurfs", "age": 22, "date": time.Unix(1257894000, 0), "about": about} + body, _ := json.Marshal(data) + b.StartTimer() + totalBytes := 0 + sets := 0 + indexer := c.NewBulkIndexer(1) + indexer.Sender = func(buf *bytes.Buffer) error { + totalBytes += buf.Len() + sets += 1 + return indexer.Send(buf) + } + for i := 0; i < b.N; i++ { + indexer.Index("users", "user", strconv.Itoa(i), "", "", nil, body) + } + log.Printf("Sent %d messages in %d sets totaling %d bytes \n", b.N, sets, totalBytes) + if indexer.NumErrors() != 0 { + b.Fail() + } +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corebulkudp.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corebulkudp.go new file mode 100644 index 000000000..e8227651a --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corebulkudp.go @@ -0,0 +1,12 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corecount.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corecount.go new file mode 100644 index 000000000..279376d43 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corecount.go @@ -0,0 +1,45 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" +) + +type CountResponse struct { + Count int `json:"count"` + Shard Status `json:"_shards"` +} + +// Count allows the caller to easily execute a query and get the number of matches for that query. +// It can be executed across one or more indices and across one or more types. +// The query can either be provided using a simple query string as a parameter, +// or using the Query DSL defined within the request body. +// http://www.elasticsearch.org/guide/reference/api/count.html +func (c *Conn) Count(index string, _type string, args map[string]interface{}, query interface{}) (CountResponse, error) { + var url string + var retval CountResponse + url = fmt.Sprintf("/%s/%s/_count", index, _type) + body, err := c.DoCommand("GET", url, args, query) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coredelete.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coredelete.go new file mode 100644 index 000000000..e8af69d0c --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coredelete.go @@ -0,0 +1,37 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" +) + +// Delete API allows to delete a typed JSON document from a specific index based on its id. +// http://www.elasticsearch.org/guide/reference/api/delete.html +func (c *Conn) Delete(index string, _type string, id string, args map[string]interface{}) (BaseResponse, error) { + var url string + var retval BaseResponse + url = fmt.Sprintf("/%s/%s/%s", index, _type, id) + body, err := c.DoCommand("DELETE", url, args, nil) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coredeletebyquery.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coredeletebyquery.go new file mode 100644 index 000000000..7d99adf4b --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coredeletebyquery.go @@ -0,0 +1,57 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" + "strings" +) + +// DeleteByQuery allows the caller to delete documents from one or more indices and one or more types based on a query. +// The query can either be provided using a simple query string as a parameter, or using the Query DSL defined within +// the request body. +// see: http://www.elasticsearch.org/guide/reference/api/delete-by-query.html +func (c *Conn) DeleteByQuery(indices []string, types []string, args map[string]interface{}, query interface{}) (BaseResponse, error) { + var url string + var retval BaseResponse + if len(indices) > 0 && len(types) > 0 { + url = fmt.Sprintf("/%s/%s/_query", strings.Join(indices, ","), strings.Join(types, ",")) + } else if len(indices) > 0 { + url = fmt.Sprintf("/%s/_query", strings.Join(indices, ",")) + } + body, err := c.DoCommand("DELETE", url, args, query) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} + +func buildQuery() string { + return "" +} + +type DeleteByQueryResponse struct { + Status bool `json:"ok"` + Indicies map[string]IndexStatus `json:"_indices"` +} + +type IndexStatus struct { + Shards Status `json:"_shards"` +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreexample_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreexample_test.go new file mode 100644 index 000000000..fed31dfee --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreexample_test.go @@ -0,0 +1,52 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo_test + +import ( + "bytes" + "fmt" + "strconv" + + elastigo "github.com/mattbaird/elastigo/lib" +) + +// The simplest usage of background bulk indexing +func ExampleBulkIndexer_simple() { + c := elastigo.NewConn() + + indexer := c.NewBulkIndexerErrors(10, 60) + indexer.Start() + indexer.Index("twitter", "user", "1", "", "", nil, `{"name":"bob"}`) + indexer.Stop() +} + +// The inspecting the response +func ExampleBulkIndexer_responses() { + c := elastigo.NewConn() + + indexer := c.NewBulkIndexer(10) + // Create a custom Sender Func, to allow inspection of response/error + indexer.Sender = func(buf *bytes.Buffer) error { + // @buf is the buffer of docs about to be written + respJson, err := c.DoCommand("POST", "/_bulk", nil, buf) + if err != nil { + // handle it better than this + fmt.Println(string(respJson)) + } + return err + } + indexer.Start() + for i := 0; i < 20; i++ { + indexer.Index("twitter", "user", strconv.Itoa(i), "", "", nil, `{"name":"bob"}`) + } + indexer.Stop() +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreexplain.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreexplain.go new file mode 100644 index 000000000..82ec69556 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreexplain.go @@ -0,0 +1,43 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" +) + +// Explain computes a score explanation for a query and a specific document. +// This can give useful feedback whether a document matches or didn’t match a specific query. +// This feature is available from version 0.19.9 and up. +// see http://www.elasticsearch.org/guide/reference/api/explain.html +func (c *Conn) Explain(index string, _type string, id string, args map[string]interface{}, query string) (Match, error) { + var url string + var retval Match + if len(_type) > 0 { + url = fmt.Sprintf("/%s/%s/_explain", index, _type) + } else { + url = fmt.Sprintf("/%s/_explain", index) + } + body, err := c.DoCommand("GET", url, args, query) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreget.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreget.go new file mode 100644 index 000000000..b0d699582 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreget.go @@ -0,0 +1,129 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" + "net/http" +) + +// Get allows caller to get a typed JSON document from the index based on its id. +// GET - retrieves the doc +// HEAD - checks for existence of the doc +// http://www.elasticsearch.org/guide/reference/api/get.html +// TODO: make this implement an interface +func (c *Conn) get(index string, _type string, id string, args map[string]interface{}, source *json.RawMessage) (BaseResponse, error) { + var url string + retval := BaseResponse{Source: source} + if len(_type) > 0 { + url = fmt.Sprintf("/%s/%s/%s", index, _type, id) + } else { + url = fmt.Sprintf("/%s/%s", index, id) + } + body, err := c.DoCommand("GET", url, args, nil) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} + +// The get API allows to get a typed JSON document from the index based on its id. +// GET - retrieves the doc +// HEAD - checks for existence of the doc +// http://www.elasticsearch.org/guide/reference/api/get.html +// TODO: make this implement an interface +func (c *Conn) Get(index string, _type string, id string, args map[string]interface{}) (BaseResponse, error) { + return c.get(index, _type, id, args, nil) +} + +// Same as Get but with custom source type. +func (c *Conn) GetCustom(index string, _type string, id string, args map[string]interface{}, source *json.RawMessage) (BaseResponse, error) { + return c.get(index, _type, id, args, source) +} + +// GetSource retrieves the document by id and converts it to provided interface +func (c *Conn) GetSource(index string, _type string, id string, args map[string]interface{}, source interface{}) error { + url := fmt.Sprintf("/%s/%s/%s/_source", index, _type, id) + body, err := c.DoCommand("GET", url, args, nil) + if err == nil { + err = json.Unmarshal(body, &source) + } + return err +} + +// ExistsBool allows caller to check for the existence of a document using HEAD +// TODO(shutej): This looks redundant with the Exists function in +// baserequest.go, check with mattbaird@. +func (c *Conn) ExistsBool(index string, _type string, id string, args map[string]interface{}) (bool, error) { + + var url string + + query, err := Escape(args) + if err != nil { + return false, err + } + + if len(_type) > 0 { + url = fmt.Sprintf("/%s/%s/%s", index, _type, id) + } else { + url = fmt.Sprintf("/%s/%s", index, id) + } + + req, err := c.NewRequest("HEAD", url, query) + if err != nil { + return false, err + } + + httpStatusCode, _, err := req.Do(nil) + + // RecordNotFound is the expected response for a non-existent document, + // so we don't return an error to our caller + if err == RecordNotFound { + return false, nil + } + + return httpStatusCode == http.StatusOK, err +} + +// ExistsIndex allows caller to check for the existence of an index or a type using HEAD +func (c *Conn) ExistsIndex(index string, _type string, args map[string]interface{}) (bool, error) { + var url string + + query, err := Escape(args) + if err != nil { + return false, err + } + + if len(_type) > 0 { + url = fmt.Sprintf("/%s/%s", index, _type) + } else { + url = fmt.Sprintf("/%s", index) + } + req, err := c.NewRequest("HEAD", url, query) + httpStatusCode, _, err := req.Do(nil) + + if err != nil { + return false, err + } + if httpStatusCode == http.StatusOK { + return true, err + } + return false, err +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreindex.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreindex.go new file mode 100644 index 000000000..7ba5031e0 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreindex.go @@ -0,0 +1,132 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "errors" + "fmt" + "net/url" + "strconv" +) + +// Index adds or updates a typed JSON document in a specific index, making it searchable, creating an index +// if it did not exist. +// if id is omited, op_type 'create' will be passed and http method will default to "POST" +// _type is optional +// id is optional +// parentId is optional +// version is optional +// op_type is optional +// routing is optional +// timestamp is optional +// ttl is optional +// percolate is optional +// timeout is optional +// http://www.elasticsearch.org/guide/reference/api/index_.html +func (c *Conn) Index(index string, _type string, id string, args map[string]interface{}, data interface{}) (BaseResponse, error) { + return c.IndexWithParameters(index, _type, id, "", 0, "", "", "", 0, "", "", false, args, data) +} + +// IndexWithParameters takes all the potential parameters available +func (c *Conn) IndexWithParameters(index string, _type string, id string, parentId string, version int, op_type string, + routing string, timestamp string, ttl int, percolate string, timeout string, refresh bool, + args map[string]interface{}, data interface{}) (BaseResponse, error) { + var url string + var retval BaseResponse + url, err := GetIndexUrl(index, _type, id, parentId, version, op_type, routing, timestamp, ttl, percolate, timeout, refresh) + if err != nil { + return retval, err + } + var method string + if len(id) == 0 { + method = "POST" + } else { + method = "PUT" + } + body, err := c.DoCommand(method, url, args, data) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} + +func GetIndexUrl(index string, _type string, id string, parentId string, version int, op_type string, + routing string, timestamp string, ttl int, percolate string, timeout string, refresh bool) (retval string, e error) { + + if len(index) == 0 { + return "", errors.New("index can not be blank") + } + var partialURL string + var values url.Values = url.Values{} + if len(_type) == 0 && len(id) > 0 { + e = errors.New("Can't specify id when _type is blank") + return + } + if len(_type) > 0 && len(id) > 0 { + partialURL = fmt.Sprintf("/%s/%s/%s", index, _type, id) + } else if len(_type) > 0 { + partialURL = fmt.Sprintf("/%s/%s", index, _type) + } else { + partialURL = fmt.Sprintf("/%s", index) + } + // A child document can be indexed by specifying it’s parent when indexing. + if len(parentId) > 0 { + values.Add("parent", parentId) + } + // versions start at 1, so if greater than 0 + if version > 0 { + values.Add("version", strconv.Itoa(version)) + } + if len(op_type) > 0 { + if len(id) == 0 { + //if id is omited, op_type defaults to 'create' + values.Add("op_type", "create") + } else { + values.Add("op_type", op_type) + } + } + if len(routing) > 0 { + values.Add("routing", routing) + } + // A document can be indexed with a timestamp associated with it. + // The timestamp value of a document can be set using the timestamp parameter. + if len(timestamp) > 0 { + values.Add("timestamp", timestamp) + } + // A document can be indexed with a ttl (time to live) associated with it. Expired documents + // will be expunged automatically. + if ttl > 0 { + values.Add("ttl", strconv.Itoa(ttl)) + } + if len(percolate) > 0 { + values.Add("percolate", percolate) + } + // example 5m + if len(timeout) > 0 { + values.Add("timeout", timeout) + } + + if refresh { + values.Add("refresh", "true") + } + + partialURL += "?" + values.Encode() + return partialURL, nil +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coremget.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coremget.go new file mode 100644 index 000000000..1fece7fc3 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coremget.go @@ -0,0 +1,62 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" +) + +// MGet allows the caller to get multiple documents based on an index, type (optional) and id (and possibly routing). +// The response includes a docs array with all the fetched documents, each element similar in structure to a document +// provided by the get API. +// see http://www.elasticsearch.org/guide/reference/api/multi-get.html +func (c *Conn) MGet(index string, _type string, mgetRequest MGetRequestContainer, args map[string]interface{}) (MGetResponseContainer, error) { + var url string + var retval MGetResponseContainer + if len(index) <= 0 { + url = fmt.Sprintf("/_mget") + } + if len(_type) > 0 && len(index) > 0 { + url = fmt.Sprintf("/%s/%s/_mget", index, _type) + } else if len(index) > 0 { + url = fmt.Sprintf("/%s/_mget", index) + } + body, err := c.DoCommand("GET", url, args, mgetRequest) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} + +type MGetRequestContainer struct { + Docs []MGetRequest `json:"docs"` +} + +type MGetRequest struct { + Index string `json:"_index"` + Type string `json:"_type"` + ID string `json:"_id"` + IDS []string `json:"_ids,omitempty"` + Fields []string `json:"fields,omitempty"` +} + +type MGetResponseContainer struct { + Docs []BaseResponse `json:"docs"` +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coremorelikethis.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coremorelikethis.go new file mode 100644 index 000000000..0c2fc4c36 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coremorelikethis.go @@ -0,0 +1,57 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" +) + +// MoreLikeThis allows the caller to get documents that are “like” a specified document. +// http://www.elasticsearch.org/guide/reference/api/more-like-this.html +func (c *Conn) MoreLikeThis(index string, _type string, id string, args map[string]interface{}, query MoreLikeThisQuery) (BaseResponse, error) { + var url string + var retval BaseResponse + url = fmt.Sprintf("/%s/%s/%s/_mlt", index, _type, id) + body, err := c.DoCommand("GET", url, args, query) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} + +type MoreLikeThisQuery struct { + MoreLikeThis MLT `json:"more_like_this"` +} + +type MLT struct { + Fields []string `json:"fields"` + LikeText string `json:"like_text"` + PercentTermsToMatch float32 `json:"percent_terms_to_match"` + MinTermFrequency int `json:"min_term_freq"` + MaxQueryTerms int `json:"max_query_terms"` + StopWords []string `json:"stop_words"` + MinDocFrequency int `json:"min_doc_freq"` + MaxDocFrequency int `json:"max_doc_freq"` + MinWordLength int `json:"min_word_len"` + MaxWordLength int `json:"max_word_len"` + BoostTerms int `json:"boost_terms"` + Boost float32 `json:"boost"` + Analyzer string `json:"analyzer"` +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coremsearch.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coremsearch.go new file mode 100644 index 000000000..e8227651a --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coremsearch.go @@ -0,0 +1,12 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corepercolate.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corepercolate.go new file mode 100644 index 000000000..cc38bfa21 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corepercolate.go @@ -0,0 +1,64 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" +) + +type PercolatorResult struct { + SearchResult + Matches []PercolatorMatch `json:"matches"` +} + +type PercolatorMatch struct { + Index string `json:"_index"` + Id string `json:"_id"` +} + +// See http://www.elasticsearch.org/guide/reference/api/percolate.html +func (c *Conn) RegisterPercolate(index string, id string, data interface{}) (BaseResponse, error) { + var url string + var retval BaseResponse + url = fmt.Sprintf("/%s/.percolator/%s", index, id) + body, err := c.DoCommand("PUT", url, nil, data) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} + +func (c *Conn) Percolate(index string, _type string, name string, args map[string]interface{}, doc string) (PercolatorResult, error) { + var url string + var retval PercolatorResult + url = fmt.Sprintf("/%s/%s/_percolate", index, _type) + body, err := c.DoCommand("GET", url, args, doc) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corepercolate_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corepercolate_test.go new file mode 100644 index 000000000..55a0713a1 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corepercolate_test.go @@ -0,0 +1,64 @@ +package elastigo + +import ( + . "github.com/smartystreets/goconvey/convey" + "testing" +) + +const ( + percIndexName = "test-perc-index" +) + +func TestPercolate(t *testing.T) { + Convey("With a registered percolator", t, func() { + c := NewTestConn() + _, createErr := c.CreateIndex(percIndexName) + So(createErr, ShouldBeNil) + defer c.DeleteIndex(percIndexName) + + options := `{ + "percType": { + "properties": { + "message": { + "type": "string" + } + } + } + }` + + err := c.PutMappingFromJSON(percIndexName, "percType", []byte(options)) + So(err, ShouldBeNil) + + data := `{ + "query": { + "match": { + "message": "bonsai tree" + } + } + }` + + _, err = c.RegisterPercolate(percIndexName, "PERCID", data) + So(err, ShouldBeNil) + + Convey("That matches the document", func() { + // Should return the percolator id (registered query) + doc := `{"doc": { "message": "A new bonsai tree in the office" }}` + + result, err := c.Percolate(percIndexName, "percType", "", nil, doc) + So(err, ShouldBeNil) + So(len(result.Matches), ShouldEqual, 1) + match := result.Matches[0] + So(match.Id, ShouldEqual, "PERCID") + So(match.Index, ShouldEqual, percIndexName) + }) + + Convey("That does not match the document", func() { + // Should NOT return the percolator id (registered query) + doc := `{"doc": { "message": "Barren wasteland with no matches" }}` + + result, err := c.Percolate(percIndexName, "percType", "", nil, doc) + So(err, ShouldBeNil) + So(len(result.Matches), ShouldEqual, 0) + }) + }) +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coresearch.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coresearch.go new file mode 100644 index 000000000..0d83c9c90 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coresearch.go @@ -0,0 +1,246 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +// Search performs a very basic search on an index via the request URI API. +// +// params: +// @index: the elasticsearch index +// @_type: optional ("" if not used) search specific type in this index +// @args: a map of URL parameters. Allows all the URI-request parameters allowed by ElasticSearch. +// @query: this can be one of 3 types: +// 1) string value that is valid elasticsearch +// 2) io.Reader that can be set in body (also valid elasticsearch string syntax..) +// 3) other type marshalable to json (also valid elasticsearch json) +// +// out, err := Search(true, "github", map[string]interface{} {"from" : 10}, qryType) +// +// http://www.elasticsearch.org/guide/reference/api/search/uri-request.html +func (c *Conn) Search(index string, _type string, args map[string]interface{}, query interface{}) (SearchResult, error) { + var uriVal string + var retval SearchResult + if len(_type) > 0 && _type != "*" { + uriVal = fmt.Sprintf("/%s/%s/_search", index, _type) + } else { + uriVal = fmt.Sprintf("/%s/_search", index) + } + body, err := c.DoCommand("POST", uriVal, args, query) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal([]byte(body), &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + retval.RawJSON = body + return retval, err +} + +func (c *Conn) Suggest(index string, args map[string]interface{}, query interface{}) (SuggestResults, error) { + uriVal := fmt.Sprintf("/%s/_suggest", index) + body, err := c.DoCommand("POST", uriVal, args, query) + var retval SuggestResults + if err != nil { + return retval, err + } + jsonErr := json.Unmarshal([]byte(body), &retval.body) + if jsonErr != nil { + return retval, jsonErr + } + shards := retval.body["_shards"] + if shards == nil { + return retval, fmt.Errorf("Expect response to contain _shards field, got: %s", body) + } + jsonErr = json.Unmarshal(shards, &retval.ShardStatus) + if jsonErr != nil { + return retval, jsonErr + } + if len(retval.ShardStatus.Failures) > 0 { + return retval, fmt.Errorf("Got the following errors:\n%s", failures(retval.ShardStatus.Failures)) + } + return retval, nil +} + +type SuggestResults struct { + body map[string]json.RawMessage + ShardStatus Status +} + +func (s SuggestResults) Result(suggestName string) ([]Suggestion, error) { + var suggestions []Suggestion + query := s.body[suggestName] + if query == nil { + return nil, fmt.Errorf("No such suggest name found") + } + err := json.Unmarshal(query, &suggestions) + if err != nil { + return nil, err + } + return suggestions, nil +} + +// SearchUri performs the simplest possible query in url string +// params: +// @index: the elasticsearch index +// @_type: optional ("" if not used) search specific type in this index +// @args: a map of URL parameters. Most important one is q +// +// out, err := SearchUri("github","", map[string]interface{} { "q" : `user:kimchy`}) +// +// produces a request like this: host:9200/github/_search?q=user:kimchy" +// +// http://www.elasticsearch.org/guide/reference/api/search/uri-request.html +func (c *Conn) SearchUri(index, _type string, args map[string]interface{}) (SearchResult, error) { + var uriVal string + var retval SearchResult + if len(_type) > 0 && _type != "*" { + uriVal = fmt.Sprintf("/%s/%s/_search", index, _type) + } else { + uriVal = fmt.Sprintf("/%s/_search", index) + } + //log.Println(uriVal) + body, err := c.DoCommand("GET", uriVal, args, nil) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal([]byte(body), &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + retval.RawJSON = body + return retval, err +} + +func (c *Conn) Scroll(args map[string]interface{}, scroll_id string) (SearchResult, error) { + var url string + var retval SearchResult + + if _, ok := args["scroll"]; !ok { + return retval, fmt.Errorf("Cannot call scroll without 'scroll' in arguments") + } + + url = "/_search/scroll" + + body, err := c.DoCommand("POST", url, args, scroll_id) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal([]byte(body), &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} + +type SuggestionOption struct { + Payload json.RawMessage `json:"payload"` + Score Float32Nullable `json:"score,omitempty"` + Text string `json:"text"` +} + +type Suggestion struct { + Length int `json:"length"` + Offset int `json:"offset"` + Options []SuggestionOption `json:"options"` + Text string `json:"text"` +} + +type Suggestions map[string][]Suggestion + +type SearchResult struct { + RawJSON []byte + Took int `json:"took"` + TimedOut bool `json:"timed_out"` + ShardStatus Status `json:"_shards"` + Hits Hits `json:"hits"` + Facets json.RawMessage `json:"facets,omitempty"` // structure varies on query + ScrollId string `json:"_scroll_id,omitempty"` + Aggregations json.RawMessage `json:"aggregations,omitempty"` // structure varies on query + Suggestions Suggestions `json:"suggest,omitempty"` +} + +func (s *SearchResult) String() string { + return fmt.Sprintf("", s.Took, s.TimedOut, s.Hits.Total) +} + +type Hits struct { + Total int `json:"total"` + // MaxScore float32 `json:"max_score"` + Hits []Hit `json:"hits"` +} + +func (h *Hits) Len() int { + return len(h.Hits) +} + +type Highlight map[string][]string + +type Hit struct { + Index string `json:"_index"` + Type string `json:"_type,omitempty"` + Id string `json:"_id"` + Score Float32Nullable `json:"_score,omitempty"` // Filters (no query) dont have score, so is null + Source *json.RawMessage `json:"_source"` // marshalling left to consumer + Fields *json.RawMessage `json:"fields"` // when a field arg is passed to ES, instead of _source it returns fields + Explanation *Explanation `json:"_explanation,omitempty"` + Highlight *Highlight `json:"highlight,omitempty"` + Sort []interface{} `json:"sort,omitempty"` +} + +func (e *Explanation) String(indent string) string { + if len(e.Details) == 0 { + return fmt.Sprintf("%s>>> %v = %s", indent, e.Value, strings.Replace(e.Description, "\n", "", -1)) + } else { + detailStrs := make([]string, 0) + for _, detail := range e.Details { + detailStrs = append(detailStrs, fmt.Sprintf("%s", detail.String(indent+"| "))) + } + return fmt.Sprintf("%s%v = %s(\n%s\n%s)", indent, e.Value, strings.Replace(e.Description, "\n", "", -1), strings.Join(detailStrs, "\n"), indent) + } +} + +// Elasticsearch returns some invalid (according to go) json, with floats having... +// +// json: cannot unmarshal null into Go value of type float32 (see last field.) +// +// "hits":{"total":6808,"max_score":null, +// "hits":[{"_index":"10user","_type":"user","_id":"751820","_score":null, +type Float32Nullable float32 + +func (i *Float32Nullable) UnmarshalJSON(data []byte) error { + if len(data) == 0 || string(data) == "null" { + return nil + } + + if in, err := strconv.ParseFloat(string(data), 32); err != nil { + return err + } else { + *i = Float32Nullable(in) + } + return nil +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coresearch_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coresearch_test.go new file mode 100644 index 000000000..a16d6fec2 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coresearch_test.go @@ -0,0 +1,83 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + . "github.com/smartystreets/goconvey/convey" + "testing" +) + +type SuggestTest struct { + Completion string `json:"completion"` +} + +type hash map[string]interface{} + +func TestCoreSearch(t *testing.T) { + + c := NewTestConn() + c.CreateIndex("github") + waitFor(func() bool { return false }, 5) + + defer func() { + c.DeleteIndex("github") + }() + + Convey("Convert a search result to JSON", t, func() { + + qry := map[string]interface{}{ + "query": map[string]interface{}{ + "wildcard": map[string]string{"actor": "a*"}, + }, + } + var args map[string]interface{} + out, err := c.Search("github", "", args, qry) + So(err, ShouldBeNil) + + _, err = json.Marshal(out.Hits.Hits) + So(err, ShouldBeNil) + }) + + Convey("Update a document and verify that it is reflected", t, func() { + mappingOpts := MappingOptions{Properties: hash{ + "completion": hash{ + "type": "completion", + }, + }} + err := c.PutMapping("github", "SuggestTest", SuggestTest{}, mappingOpts) + So(err, ShouldBeNil) + + _, err = c.UpdateWithPartialDoc("github", "SuggestTest", "1", nil, SuggestTest{"foobar"}, true) + So(err, ShouldBeNil) + + query := hash{"completion_completion": hash{ + "text": "foo", + "completion": hash{ + "size": 10, + "field": "completion", + }, + }} + + _, err = c.Refresh("github") + So(err, ShouldBeNil) + + res, err := c.Suggest("github", nil, query) + So(err, ShouldBeNil) + + opts, err := res.Result("completion_completion") + So(err, ShouldBeNil) + + So(len(opts[0].Options), ShouldBeGreaterThan, 0) + So(opts[0].Options[0].Text, ShouldEqual, "foobar") + }) +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coretest_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coretest_test.go new file mode 100644 index 000000000..37aa3fc98 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coretest_test.go @@ -0,0 +1,198 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/md5" + "encoding/json" + "flag" + "fmt" + "io" + "log" + "net/http" + "time" +) + +/* + +usage: + + test -v -host eshost -loaddata + +*/ + +const ( + testIndex = "github" +) + +var ( + bulkStarted bool + hasStartedTesting bool + hasLoadedData bool + sleepAfterLoad bool + loadData *bool = flag.Bool("loaddata", false, "This loads a bunch of test data into elasticsearch for testing") + sleep *int = flag.Int("sleep", 0, "Post bulk loading sleep test to make drone.io work") +) + +func InitTests(startIndexer bool) *Conn { + c := NewConn() + + if !hasStartedTesting { + flag.Parse() + hasStartedTesting = true + log.SetFlags(log.Ltime | log.Lshortfile) + c.Domain = *eshost + } + if startIndexer && !bulkStarted { + bulkStarted = true + b := c.NewBulkIndexer(100) + b.Start() + if *loadData && !hasLoadedData { + log.Println("loading test data ") + hasLoadedData = true + LoadTestData() + } + b.Stop() + } + c.Flush("_all") + c.Refresh("_all") + if !sleepAfterLoad { + time.Sleep(time.Duration(*sleep) * time.Second) + } + sleepAfterLoad = true + return c +} + +func NewTestConn() *Conn { + c := NewConn() + c.Domain = *eshost + return c +} + +// Wait for condition (defined by func) to be true, a utility to create a ticker +// checking every 100 ms to see if something (the supplied check func) is done +// +// waitFor(func() bool { +// return ctr.Ct == 0 +// }, 10) +// +// @timeout (in seconds) is the last arg +func waitFor(check func() bool, timeoutSecs int) { + timer := time.NewTicker(100 * time.Millisecond) + tryct := 0 + for _ = range timer.C { + if check() { + timer.Stop() + break + } + if tryct >= timeoutSecs*10 { + timer.Stop() + break + } + tryct++ + } +} + +type GithubEvent struct { + Url string + Created time.Time `json:"created_at"` + Type string +} + +// This loads test data from github archives (~6700 docs) +func LoadTestData() { + c := NewConn() + c.Domain = *eshost + + c.DeleteIndex(testIndex) + + docCt := 0 + errCt := 0 + indexer := c.NewBulkIndexer(1) + indexer.Sender = func(buf *bytes.Buffer) error { + // log.Printf("Sent %d bytes total %d docs sent", buf.Len(), docCt) + req, err := c.NewRequest("POST", "/_bulk", "") + if err != nil { + errCt += 1 + log.Fatalf("ERROR: %v", err) + return err + } + req.SetBody(buf) + // res, err := http.DefaultClient.Do(*(api.Request(req))) + var response map[string]interface{} + httpStatusCode, _, err := req.Do(&response) + if err != nil { + errCt += 1 + log.Fatalf("ERROR: %v", err) + return err + } + if httpStatusCode != 200 { + log.Fatalf("Not 200! %d %q\n", httpStatusCode, buf.String()) + } + return nil + } + indexer.Start() + resp, err := http.Get("http://data.githubarchive.org/2012-12-10-15.json.gz") + if err != nil || resp == nil { + panic("Could not download data") + } + defer resp.Body.Close() + if err != nil { + log.Println(err) + return + } + gzReader, err := gzip.NewReader(resp.Body) + defer gzReader.Close() + if err != nil { + panic(err) + } + r := bufio.NewReader(gzReader) + var ge GithubEvent + docsm := make(map[string]bool) + h := md5.New() + for { + line, err := r.ReadBytes('\n') + if err != nil { + if err == io.EOF { + indexer.Flush() + break + } + log.Fatalf("could not read line: %v", err) + } + if err := json.Unmarshal(line, &ge); err == nil { + // create an "ID" + h.Write(line) + id := fmt.Sprintf("%x", h.Sum(nil)) + if _, ok := docsm[id]; ok { + log.Println("HM, already exists? ", ge.Url) + } + docsm[id] = true + indexer.Index(testIndex, ge.Type, id, "", "", &ge.Created, line) + docCt++ + } else { + log.Println("ERROR? ", string(line)) + } + } + if errCt != 0 { + log.Println("FATAL, could not load ", errCt) + } + // lets wait a bit to ensure that elasticsearch finishes? + indexer.Stop() + if len(docsm) != docCt { + panic(fmt.Sprintf("Docs didn't match? %d:%d", len(docsm), docCt)) + } + c.Flush(testIndex) + c.Refresh(testIndex) +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreupdate.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreupdate.go new file mode 100644 index 000000000..f6abbc14c --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreupdate.go @@ -0,0 +1,94 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" +) + +// Update updates a document based on a script provided. The operation gets the document +// (collocated with the shard) from the index, runs the script (with optional script language and parameters), +// and index back the result (also allows to delete, or ignore the operation). It uses versioning to make sure +// no updates have happened during the “get” and “reindex”. (available from 0.19 onwards). +// Note, this operation still means full reindex of the document, it just removes some network roundtrips +// and reduces chances of version conflicts between the get and the index. The _source field need to be enabled +// for this feature to work. +// +// http://www.elasticsearch.org/guide/reference/api/update.html +// TODO: finish this, it's fairly complex +func (c *Conn) Update(index string, _type string, id string, args map[string]interface{}, data interface{}) (BaseResponse, error) { + var url string + var retval BaseResponse + + url = fmt.Sprintf("/%s/%s/%s/_update", index, _type, id) + body, err := c.DoCommand("POST", url, args, data) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} + +// UpdateWithPartialDoc updates a document based on partial document provided. The update API also +// support passing a partial document (since 0.20), which will be merged into the existing +// document (simple recursive merge, inner merging of objects, replacing core "keys/values" and arrays). +// If both doc and script is specified, then doc is ignored. Best is to put your field pairs of the partial +// document in the script itself. +// +// http://www.elasticsearch.org/guide/reference/api/update.html +func (c *Conn) UpdateWithPartialDoc(index string, _type string, id string, args map[string]interface{}, doc interface{}, upsert bool) (BaseResponse, error) { + switch v := doc.(type) { + case string: + upsertStr := "" + if upsert { + upsertStr = ", \"doc_as_upsert\":true" + } + content := fmt.Sprintf("{\"doc\":%s %s}", v, upsertStr) + return c.Update(index, _type, id, args, content) + } + var data map[string]interface{} = make(map[string]interface{}) + data["doc"] = doc + if upsert { + data["doc_as_upsert"] = true + } + return c.Update(index, _type, id, args, data) +} + +// UpdateWithScript updates a document based on a script provided. +// The operation gets the document (collocated with the shard) from the index, runs the script +// (with optional script language and parameters), and index back the result (also allows to +// delete, or ignore the operation). It uses versioning to make sure no updates have happened +// during the "get" and "reindex". (available from 0.19 onwards). +// +// Note, this operation still means full reindex of the document, it just removes some network +// roundtrips and reduces chances of version conflicts between the get and the index. The _source +// field need to be enabled for this feature to work. +// http://www.elasticsearch.org/guide/reference/api/update.html +func (c *Conn) UpdateWithScript(index string, _type string, id string, args map[string]interface{}, script string, params interface{}) (BaseResponse, error) { + switch v := params.(type) { + case string: + paramsPart := fmt.Sprintf("{\"params\":%s}", v) + data := fmt.Sprintf("{\"script\":\"%s\", \"params\":%s}", script, paramsPart) + return c.Update(index, _type, id, args, data) + } + var data map[string]interface{} = make(map[string]interface{}) + data["params"] = params + data["script"] = script + return c.Update(index, _type, id, args, data) +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corevalidate.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corevalidate.go new file mode 100644 index 000000000..d1881584f --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corevalidate.go @@ -0,0 +1,53 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" +) + +// Validate allows a user to validate a potentially expensive query without executing it. +// see http://www.elasticsearch.org/guide/reference/api/validate.html +func (c *Conn) Validate(index string, _type string, args map[string]interface{}) (BaseResponse, error) { + var url string + var retval BaseResponse + if len(_type) > 0 { + url = fmt.Sprintf("/%s/%s/_validate/", index, _type) + } else { + url = fmt.Sprintf("/%s/_validate/", index) + } + body, err := c.DoCommand("GET", url, args, nil) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} + +type Validation struct { + Valid bool `json:"valid"` + Shards Status `json:"_shards"` + Explainations []Explaination `json:"explanations,omitempty"` +} + +type Explaination struct { + Index string `json:"index"` + Valid bool `json:"valid"` + Error string `json:"error"` +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/error.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/error.go new file mode 100644 index 000000000..a3a54c660 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/error.go @@ -0,0 +1,8 @@ +package elastigo + +import ( + "errors" +) + +// 404 Response. +var RecordNotFound = errors.New("record not found") diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesaliases.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesaliases.go new file mode 100644 index 000000000..0328ed9c2 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesaliases.go @@ -0,0 +1,65 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http:www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" +) + +type JsonAliases struct { + Actions []JsonAliasAdd `json:"actions"` +} + +type JsonAliasAdd struct { + Add JsonAlias `json:"add"` +} + +type JsonAlias struct { + Index string `json:"index"` + Alias string `json:"alias"` +} + +// The API allows you to create an index alias through an API. +func (c *Conn) AddAlias(index string, alias string) (BaseResponse, error) { + var url string + var retval BaseResponse + + if len(index) > 0 { + url = "/_aliases" + } else { + return retval, fmt.Errorf("You must specify an index to create the alias on") + } + + jsonAliases := JsonAliases{} + jsonAliasAdd := JsonAliasAdd{} + jsonAliasAdd.Add.Alias = alias + jsonAliasAdd.Add.Index = index + jsonAliases.Actions = append(jsonAliases.Actions, jsonAliasAdd) + requestBody, err := json.Marshal(jsonAliases) + + if err != nil { + return retval, err + } + + body, err := c.DoCommand("POST", url, nil, requestBody) + if err != nil { + return retval, err + } + + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + + return retval, err +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesanalyze.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesanalyze.go new file mode 100644 index 000000000..70d9e7ef2 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesanalyze.go @@ -0,0 +1,55 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "errors" + "fmt" +) + +// AnalyzeIndices performs the analysis process on a text and return the tokens breakdown of the text. +// http://www.elasticsearch.org/guide/reference/api/admin-indices-analyze/ +func (c *Conn) AnalyzeIndices(index string, args map[string]interface{}) (AnalyzeResponse, error) { + var retval AnalyzeResponse + if len(args["text"].(string)) == 0 { + return retval, errors.New("text to analyze must not be blank") + } + var analyzeUrl string = "/_analyze" + if len(index) > 0 { + analyzeUrl = fmt.Sprintf("/%s/%s", index, analyzeUrl) + } + + body, err := c.DoCommand("GET", analyzeUrl, args, nil) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} + +type AnalyzeResponse struct { + Tokens []Token `json:"tokens"` +} +type Token struct { + Name string `json:"token"` + StartOffset int `json:"start_offset"` + EndOffset int `json:"end_offset"` + Type string `json:"type"` + Position int `json:"position"` +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesclearcache.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesclearcache.go new file mode 100644 index 000000000..93806ac71 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesclearcache.go @@ -0,0 +1,44 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" + "strings" +) + +// ClearCache allows to clear either all caches or specific cached associated with one ore more indices. +// see http://www.elasticsearch.org/guide/reference/api/admin-indices-clearcache/ +func (c *Conn) ClearCache(clearId bool, clearBloom bool, args map[string]interface{}, indices ...string) (ExtendedStatus, error) { + var retval ExtendedStatus + var clearCacheUrl string + if len(indices) > 0 { + clearCacheUrl = fmt.Sprintf("/%s/_cache/clear", strings.Join(indices, ",")) + + } else { + clearCacheUrl = fmt.Sprintf("/_cache/clear") + } + + body, err := c.DoCommand("POST", clearCacheUrl, args, nil) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicescreateindex.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicescreateindex.go new file mode 100644 index 000000000..e9b876ef5 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicescreateindex.go @@ -0,0 +1,77 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" + "reflect" +) + +// The create API allows you to create an indices through an API. +func (c *Conn) CreateIndex(index string) (BaseResponse, error) { + var url string + var retval BaseResponse + + if len(index) > 0 { + url = fmt.Sprintf("/%s", index) + } else { + return retval, fmt.Errorf("You must specify an index to create") + } + + body, err := c.DoCommand("PUT", url, nil, nil) + if err != nil { + return retval, err + } + + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + + return retval, err +} + +// The create API allows you to create an indices through an API. +func (c *Conn) CreateIndexWithSettings(index string, settings interface{}) (BaseResponse, error) { + var url string + var retval BaseResponse + + settingsType := reflect.TypeOf(settings).Kind() + if settingsType != reflect.Struct && settingsType != reflect.Map { + return retval, fmt.Errorf("Settings kind was not struct or map") + } + + requestBody, err := json.Marshal(settings) + + if err != nil { + return retval, err + } + + if len(index) > 0 { + url = fmt.Sprintf("/%s", index) + } else { + return retval, fmt.Errorf("You must specify an index to create") + } + + body, err := c.DoCommand("PUT", url, nil, requestBody) + if err != nil { + return retval, err + } + + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + + return retval, err +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdeleteindex.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdeleteindex.go new file mode 100644 index 000000000..082ff07d4 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdeleteindex.go @@ -0,0 +1,42 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" +) + +// The delete API allows you to delete one or more indices through an API. This operation may fail +// if the elasitsearch configuration has been set to forbid deleting indexes. +func (c *Conn) DeleteIndex(index string) (BaseResponse, error) { + var url string + var retval BaseResponse + + if len(index) > 0 { + url = fmt.Sprintf("/%s", index) + } else { + return retval, fmt.Errorf("You must specify at least one index to delete") + } + + body, err := c.DoCommand("DELETE", url, nil, nil) + if err != nil { + return retval, err + } + + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + + return retval, err +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdeletemapping.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdeletemapping.go new file mode 100644 index 000000000..f0ac94d21 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdeletemapping.go @@ -0,0 +1,45 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" +) + +// The delete API allows you to delete a mapping through an API. +func (c *Conn) DeleteMapping(index string, typeName string) (BaseResponse, error) { + var retval BaseResponse + + if len(index) == 0 { + return retval, fmt.Errorf("You must specify at least one index to delete a mapping from") + } + + if len(typeName) == 0 { + return retval, fmt.Errorf("You must specify at least one mapping to delete") + } + + // As documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-delete-mapping.html + url := fmt.Sprintf("/%s/%s", index, typeName) + + body, err := c.DoCommand("DELETE", url, nil, nil) + if err != nil { + return retval, err + } + + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + + return retval, err +} \ No newline at end of file diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdeletemapping_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdeletemapping_test.go new file mode 100644 index 000000000..5f6575f9c --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdeletemapping_test.go @@ -0,0 +1,54 @@ +package elastigo + +import ( + "testing" + "net/http/httptest" + "net/http" + "net/url" + "strings" +) + +func TestDeleteMapping(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != "DELETE" { + t.Errorf("Expected HTTP Verb, DELETE") + } + + if r.URL.Path == "/this/exists" { + w.Write([]byte(`{"acknowledged": true}`)) + } else if r.URL.Path == "/this/not_exists" { + w.WriteHeader(http.StatusNotFound) + w.Write([]byte(`{"error": "TypeMissingException[[_all] type[[not_exists]] missing: No index has the type.]","status": 404}`)) + } else { + t.Errorf("Unexpected request path, %s", r.URL.Path) + } + })) + defer ts.Close() + + serverURL, _ := url.Parse(ts.URL) + + c := NewTestConn() + + c.Domain = strings.Split(serverURL.Host, ":")[0] + c.Port = strings.Split(serverURL.Host, ":")[1] + + _, err := c.DeleteMapping("this","exists") + if err != nil { + t.Errorf("Expected no error and got, %s", err) + } + + _, err = c.DeleteMapping("this", "not_exists") + if err == nil { + t.Errorf("Expected error and got none deleting /this/not_exists") + } + + _, err = c.DeleteMapping("", "two") + if err == nil { + t.Errorf("Expected error for no index and got none") + } + + _, err = c.DeleteMapping("one", "") + if err == nil { + t.Errorf("Expected error for no mapping and got none") + } +} \ No newline at end of file diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdoc.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdoc.go new file mode 100644 index 000000000..e8227651a --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdoc.go @@ -0,0 +1,12 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesflush.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesflush.go new file mode 100644 index 000000000..9fe7bc99c --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesflush.go @@ -0,0 +1,46 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Flush flushes one or more indices through an API. The flush process of an index basically +// frees memory from the index by flushing data to the index storage and clearing the internal transaction +// log. By default, ElasticSearch uses memory heuristics in order to automatically trigger flush operations +// as required in order to clear memory. +// http://www.elasticsearch.org/guide/reference/api/admin-indices-flush.html +// TODO: add Shards to response +func (c *Conn) Flush(indices ...string) (BaseResponse, error) { + var url string + var retval BaseResponse + if len(indices) > 0 { + url = fmt.Sprintf("/%s/_flush", strings.Join(indices, ",")) + } else { + url = "/_flush" + } + body, err := c.DoCommand("POST", url, nil, nil) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesgetsettings.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesgetsettings.go new file mode 100644 index 000000000..e8227651a --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesgetsettings.go @@ -0,0 +1,12 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesindicesexists.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesindicesexists.go new file mode 100644 index 000000000..b1a7ea30c --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesindicesexists.go @@ -0,0 +1,37 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "fmt" + "strings" +) + +// IndicesExists checks for the existence of indices. uses RecordNotFound message if it doesn't exist and +// "no error" situation if it exists. If there is some other error, gives the error and says it exists +// just in case +// see http://www.elasticsearch.org/guide/reference/api/admin-indices-indices-exists/ +func (c *Conn) IndicesExists(indices ...string) (bool, error) { + var url string + if len(indices) > 0 { + url = fmt.Sprintf("/%s", strings.Join(indices, ",")) + } + _, err := c.DoCommand("HEAD", url, nil, nil) + if err != nil { + if err == RecordNotFound { + return false, nil + } else { + return true, err + } + } + return true, nil +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesopencloseindex.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesopencloseindex.go new file mode 100644 index 000000000..256d776a9 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesopencloseindex.go @@ -0,0 +1,54 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" +) + +func (c *Conn) OpenIndices() (BaseResponse, error) { + return c.openCloseOperation("_all", "_open") +} + +func (c *Conn) CloseIndices() (BaseResponse, error) { + return c.openCloseOperation("_all", "_close") +} + +func (c *Conn) OpenIndex(index string) (BaseResponse, error) { + return c.openCloseOperation(index, "_open") +} + +func (c *Conn) CloseIndex(index string) (BaseResponse, error) { + return c.openCloseOperation(index, "_close") +} + +func (c *Conn) openCloseOperation(index, mode string) (BaseResponse, error) { + var url string + var retval BaseResponse + + if len(index) > 0 { + url = fmt.Sprintf("/%s/%s", index, mode) + } else { + url = fmt.Sprintf("/%s", mode) + } + + body, errDo := c.DoCommand("POST", url, nil, nil) + if errDo != nil { + return retval, errDo + } + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + return retval, errDo +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesoptimize.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesoptimize.go new file mode 100644 index 000000000..559bea0eb --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesoptimize.go @@ -0,0 +1,41 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" + "strings" +) + +// AnalyzeIndices performs the analysis process on a text and return the tokens breakdown of the text. +// http://www.elasticsearch.org/guide/reference/api/admin-indices-analyze/ +func (c *Conn) OptimizeIndices(args map[string]interface{}, indices ...string) (ExtendedStatus, error) { + var retval ExtendedStatus + var optimizeUrl string = "/_optimize" + if len(indices) > 0 { + optimizeUrl = fmt.Sprintf("/%s%s", strings.Join(indices, ","), optimizeUrl) + } + + body, err := c.DoCommand("POST", optimizeUrl, args, nil) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesputmapping.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesputmapping.go new file mode 100644 index 000000000..aab0d228c --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesputmapping.go @@ -0,0 +1,171 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" +) + +type Mapping map[string]MappingOptions + +type MappingOptions struct { + Id IdOptions `json:"_id"` + Timestamp TimestampOptions `json:"_timestamp"` + Analyzer *AnalyzerOptions `json:"_analyzer,omitempty"` + Parent *ParentOptions `json:"_parent,omitempty"` + Routing *RoutingOptions `json:"_routing,omitempty"` + Size *SizeOptions `json:"_size,omitempty"` + Source *SourceOptions `json:"_source,omitempty"` + TTL *TTLOptions `json:"_ttl,omitempty"` + Type *TypeOptions `json:"_type,omitempty"` + Properties map[string]interface{} `json:"properties"` +} + +type TimestampOptions struct { + Enabled bool `json:"enabled"` +} + +type AnalyzerOptions struct { + Path string `json:"path,omitempty"` + Index string `json:"index,omitempty"` +} + +type ParentOptions struct { + Type string `json:"type"` +} + +type RoutingOptions struct { + Required bool `json:"required,omitempty"` + Path string `json:"path,omitempty"` +} + +type SizeOptions struct { + Enabled bool `json:"enabled,omitempty"` + Store bool `json:"store,omitempty"` +} + +type SourceOptions struct { + Enabled bool `json:"enabled,omitempty"` + Includes []string `json:"includes,omitempty"` + Excludes []string `json:"excludes,omitempty"` +} + +type TypeOptions struct { + Store bool `json:"store,omitempty"` + Index string `json:"index,omitempty"` +} + +type TTLOptions struct { + Enabled bool `json:"enabled"` + Default string `json:"default,omitempty"` +} + +type IdOptions struct { + Index string `json:"index,omitempty"` + Path string `json:"path,omitempty"` +} + +func (m_ Mapping) Options() MappingOptions { + m := map[string]MappingOptions(m_) + for _, v := range m { + return v + } + panic(fmt.Errorf("Malformed input: %v", m_)) +} + +func MappingForType(typeName string, opts MappingOptions) Mapping { + return map[string]MappingOptions{typeName: opts} +} + +func (c *Conn) PutMapping(index string, typeName string, instance interface{}, opt MappingOptions) error { + instanceType := reflect.TypeOf(instance) + if instanceType.Kind() != reflect.Struct { + return fmt.Errorf("instance kind was not struct") + } + + if opt.Properties == nil { + opt.Properties = make(map[string]interface{}) + } + getProperties(instanceType, opt.Properties) + body, err := json.Marshal(MappingForType(typeName, opt)) + if err != nil { + return err + } + _, err = c.DoCommand("PUT", fmt.Sprintf("/%s/%s/_mapping", index, typeName), nil, string(body)) + if err != nil { + return err + } + + return nil +} + +//Same as PutMapping, but takes a []byte for mapping and provides no check of structure +func (c *Conn) PutMappingFromJSON(index string, typeName string, mapping []byte) error { + _, err := c.DoCommand("PUT", fmt.Sprintf("/%s/%s/_mapping", index, typeName), nil, string(mapping)) + return err +} + +func getProperties(t reflect.Type, prop map[string]interface{}) { + n := t.NumField() + for i := 0; i < n; i++ { + field := t.Field(i) + + name := strings.Split(field.Tag.Get("json"), ",")[0] + if name == "-" { + continue + } else if name == "" { + name = field.Name + } + + attrMap := make(map[string]interface{}) + attrs := splitTag(field.Tag.Get("elastic")) + for _, attr := range attrs { + keyvalue := strings.Split(attr, ":") + attrMap[keyvalue[0]] = keyvalue[1] + } + + if len(attrMap) == 0 || attrMap["type"] == "nested" { + + // We are looking for tags on any inner struct, independently of + // whether the field is a struct, a pointer to struct, or a slice of structs + targetType := field.Type + if targetType.Kind() == reflect.Ptr || + targetType.Kind() == reflect.Slice { + targetType = field.Type.Elem() + } + if targetType.Kind() == reflect.Struct { + if field.Anonymous { + getProperties(targetType, prop) + } else { + innerStructProp := make(map[string]interface{}) + getProperties(targetType, innerStructProp) + attrMap["properties"] = innerStructProp + } + } + } + if len(attrMap) != 0 { + prop[name] = attrMap + } + } +} + +func splitTag(tag string) []string { + tag = strings.Trim(tag, " ") + if tag == "" { + return []string{} + } else { + return strings.Split(tag, ",") + } +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesputmapping_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesputmapping_test.go new file mode 100644 index 000000000..8da3ca346 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesputmapping_test.go @@ -0,0 +1,302 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "sort" + "strings" + "testing" +) + +var ( + mux *http.ServeMux + server *httptest.Server +) + +func setup(t *testing.T) *Conn { + mux = http.NewServeMux() + server = httptest.NewServer(mux) + c := NewTestConn() + + serverURL, err := url.Parse(server.URL) + if err != nil { + t.Fatalf("Error: %v", err) + } + + c.Domain = strings.Split(serverURL.Host, ":")[0] + c.Port = strings.Split(serverURL.Host, ":")[1] + + return c +} + +func teardown() { + server.Close() +} + +type TestStruct struct { + Id string `json:"id" elastic:"index:not_analyzed"` + DontIndex string `json:"dontIndex" elastic:"index:no"` + Number int `json:"number" elastic:"type:integer,index:analyzed"` + Omitted string `json:"-"` + NoJson string `elastic:"type:string"` + unexported string + JsonOmitEmpty string `json:"jsonOmitEmpty,omitempty" elastic:"type:string"` + Embedded + Inner InnerStruct `json:"inner"` + InnerP *InnerStruct `json:"pointer_to_inner"` + InnerS []InnerStruct `json:"slice_of_inner"` + MultiAnalyze string `json:"multi_analyze"` + NestedObject NestedStruct `json:"nestedObject" elastic:"type:nested"` +} + +type Embedded struct { + EmbeddedField string `json:"embeddedField" elastic:"type:string"` +} + +type InnerStruct struct { + InnerField string `json:"innerField" elastic:"type:date"` +} + +type NestedStruct struct { + InnerField string `json:"innerField" elastic:"type:date"` +} + +// Sorting string +// RuneSlice implements sort.Interface (http://golang.org/pkg/sort/#Interface) +type RuneSlice []rune + +func (p RuneSlice) Len() int { return len(p) } +func (p RuneSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p RuneSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// sorted func returns string with sorted characters +func sorted(s string) string { + runes := []rune(s) + sort.Sort(RuneSlice(runes)) + return string(runes) +} + +func TestPutMapping(t *testing.T) { + c := setup(t) + defer teardown() + + options := MappingOptions{ + Timestamp: TimestampOptions{Enabled: true}, + Id: IdOptions{Index: "analyzed", Path: "id"}, + Parent: &ParentOptions{Type: "testParent"}, + TTL: &TTLOptions{Enabled: true, Default: "1w"}, + Properties: map[string]interface{}{ + // special properties that can't be expressed as tags + "multi_analyze": map[string]interface{}{ + "type": "multi_field", + "fields": map[string]map[string]string{ + "ma_analyzed": {"type": "string", "index": "analyzed"}, + "ma_notanalyzed": {"type": "string", "index": "not_analyzed"}, + }, + }, + }, + } + expValue := MappingForType("myType", MappingOptions{ + Timestamp: TimestampOptions{Enabled: true}, + Id: IdOptions{Index: "analyzed", Path: "id"}, + Parent: &ParentOptions{Type: "testParent"}, + TTL: &TTLOptions{Enabled: true, Default: "1w"}, + Properties: map[string]interface{}{ + "NoJson": map[string]string{"type": "string"}, + "dontIndex": map[string]string{"index": "no"}, + "embeddedField": map[string]string{"type": "string"}, + "id": map[string]string{"index": "not_analyzed"}, + "jsonOmitEmpty": map[string]string{"type": "string"}, + "number": map[string]string{"index": "analyzed", "type": "integer"}, + "multi_analyze": map[string]interface{}{ + "type": "multi_field", + "fields": map[string]map[string]string{ + "ma_analyzed": {"type": "string", "index": "analyzed"}, + "ma_notanalyzed": {"type": "string", "index": "not_analyzed"}, + }, + }, + "inner": map[string]map[string]map[string]string{ + "properties": { + "innerField": {"type": "date"}, + }, + }, + "pointer_to_inner": map[string]map[string]map[string]string{ + "properties": { + "innerField": {"type": "date"}, + }, + }, + "slice_of_inner": map[string]map[string]map[string]string{ + "properties": { + "innerField": {"type": "date"}, + }, + }, + "nestedObject": map[string]interface{}{ + "type": "nested", + "properties": map[string]map[string]string{ + "innerField": {"type": "date"}, + }, + }, + }, + }) + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + var value map[string]interface{} + bd, err := ioutil.ReadAll(r.Body) + json.NewDecoder(strings.NewReader(string(bd))).Decode(&value) + expValJson, err := json.MarshalIndent(expValue, "", " ") + if err != nil { + t.Errorf("Got error: %v", err) + } + valJson, err := json.MarshalIndent(value, "", " ") + if err != nil { + t.Errorf("Got error: %v", err) + } + + if sorted(string(expValJson)) != sorted(string(valJson)) { + t.Errorf("Expected %s but got %s", string(expValJson), string(valJson)) + } + }) + + err := c.PutMapping("myIndex", "myType", TestStruct{}, options) + if err != nil { + t.Errorf("Error: %v", err) + } +} + +func TestPutMappingFromJSON(t *testing.T) { + c := setup(t) + defer teardown() + /* + options := MappingOptions{ + Timestamp: TimestampOptions{Enabled: true}, + Id: IdOptions{Index: "analyzed", Path: "id"}, + Parent: &ParentOptions{Type: "testParent"}, + Properties: map[string]interface{}{ + // special properties that can't be expressed as tags + "multi_analyze": map[string]interface{}{ + "type": "multi_field", + "fields": map[string]map[string]string{ + "ma_analyzed": {"type": "string", "index": "analyzed"}, + "ma_notanalyzed": {"type": "string", "index": "not_analyzed"}, + }, + }, + }, + } + */ + + options := `{ + "myType": { + "_id": { + "index": "analyzed", + "path": "id" + }, + "_timestamp": { + "enabled": true + }, + "_parent": { + "type": "testParent" + }, + "properties": { + "analyzed_string": { + "type": "string", + "index": "analyzed" + }, + "multi_analyze": { + "type": "multi_field", + "fields": { + "ma_analyzed": { + "type": "string", + "index": "analyzed" + }, + "ma_notanalyzed": { + "type": "string", + "index": "not_analyzed" + } + } + } + } + } + }` + + expValue := map[string]interface{}{ + "myType": map[string]interface{}{ + "_timestamp": map[string]interface{}{ + "enabled": true, + }, + "_id": map[string]interface{}{ + "index": "analyzed", + "path": "id", + }, + "_parent": map[string]interface{}{ + "type": "testParent", + }, + "properties": map[string]interface{}{ + "analyzed_string": map[string]string{ + "type": "string", + "index": "analyzed", + }, + "multi_analyze": map[string]interface{}{ + "type": "multi_field", + "fields": map[string]map[string]string{ + "ma_analyzed": {"type": "string", "index": "analyzed"}, + "ma_notanalyzed": {"type": "string", "index": "not_analyzed"}, + }, + }, + }, + }, + } + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + var value map[string]interface{} + bd, err := ioutil.ReadAll(r.Body) + err = json.Unmarshal(bd, &value) + if err != nil { + t.Errorf("Got error: %v", err) + } + expValJson, err := json.MarshalIndent(expValue, "", " ") + if err != nil { + t.Errorf("Got error: %v", err) + } + + valJson, err := json.MarshalIndent(value, "", " ") + if err != nil { + t.Errorf("Got error: %v", err) + } + + if sorted(string(expValJson)) != sorted(string(valJson)) { + t.Errorf("Expected %s but got %s", string(expValJson), string(valJson)) + } + }) + + err := c.PutMappingFromJSON("myIndex", "myType", []byte(options)) + if err != nil { + t.Errorf("Error: %v", err) + } +} + +type StructWithEmptyElasticTag struct { + Field string `json:"field" elastic:""` +} + +func TestPutMapping_empty_elastic_tag_is_accepted(t *testing.T) { + properties := map[string]interface{}{} + getProperties(reflect.TypeOf(StructWithEmptyElasticTag{}), properties) + if len(properties) != 0 { + t.Errorf("Expected empty properites but got: %v", properties) + } +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesputsettings.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesputsettings.go new file mode 100644 index 000000000..a21cc698c --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesputsettings.go @@ -0,0 +1,42 @@ +package elastigo + +import ( + "encoding/json" + "fmt" + "reflect" +) + +func (c *Conn) PutSettings(index string, settings interface{}) (BaseResponse, error) { + + var url string + var retval BaseResponse + + settingsType := reflect.TypeOf(settings) + if settingsType.Kind() != reflect.Struct { + return retval, fmt.Errorf("Settings kind was not struct") + } + + if len(index) > 0 { + url = fmt.Sprintf("/%s/_settings", index) + } else { + url = fmt.Sprintf("/_settings") + } + + requestBody, err := json.Marshal(settings) + + if err != nil { + return retval, err + } + + body, errDo := c.DoCommand("PUT", url, nil, requestBody) + if errDo != nil { + return retval, errDo + } + + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + + return retval, err +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesrefresh.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesrefresh.go new file mode 100644 index 000000000..26534484a --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesrefresh.go @@ -0,0 +1,45 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Refresh explicitly refreshes one or more index, making all operations performed since +// the last refresh available for search. The (near) real-time capabilities depend on the index engine used. +// For example, the internal one requires refresh to be called, but by default a refresh is scheduled periodically. +// http://www.elasticsearch.org/guide/reference/api/admin-indices-refresh.html +// TODO: add Shards to response +func (c *Conn) Refresh(indices ...string) (BaseResponse, error) { + var url string + var retval BaseResponse + if len(indices) > 0 { + url = fmt.Sprintf("/%s/_refresh", strings.Join(indices, ",")) + } else { + url = "/_refresh" + } + body, err := c.DoCommand("POST", url, nil, nil) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicessegments.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicessegments.go new file mode 100644 index 000000000..e8227651a --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicessegments.go @@ -0,0 +1,12 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicessnapshot.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicessnapshot.go new file mode 100644 index 000000000..3fea027b7 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicessnapshot.go @@ -0,0 +1,44 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Snapshot allows to explicitly perform a snapshot through the gateway of one or more indices (backup them). +// By default, each index gateway periodically snapshot changes, though it can be disabled and be controlled completely through this API. +// see http://www.elasticsearch.org/guide/reference/api/admin-indices-gateway-snapshot/ +func (c *Conn) Snapshot(indices ...string) (ExtendedStatus, error) { + var retval ExtendedStatus + var url string + if len(indices) > 0 { + url = fmt.Sprintf("/%s/_gateway/snapshot", strings.Join(indices, ",")) + + } else { + url = fmt.Sprintf("/_gateway/snapshot") + } + body, err := c.DoCommand("POST", url, nil, nil) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesstats.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesstats.go new file mode 100644 index 000000000..e8227651a --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesstats.go @@ -0,0 +1,12 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesstatus.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesstatus.go new file mode 100644 index 000000000..38981a7c9 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesstatus.go @@ -0,0 +1,43 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Status lists status details of all indices or the specified index. +// http://www.elasticsearch.org/guide/reference/api/admin-indices-status.html +func (c *Conn) Status(args map[string]interface{}, indices ...string) (BaseResponse, error) { + var retval BaseResponse + var url string + if len(indices) > 0 { + url = fmt.Sprintf("/%s/_status", strings.Join(indices, ",")) + + } else { + url = "/_status" + } + body, err := c.DoCommand("GET", url, args, nil) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicestemplates.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicestemplates.go new file mode 100644 index 000000000..e8227651a --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicestemplates.go @@ -0,0 +1,12 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesupdatesettings.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesupdatesettings.go new file mode 100644 index 000000000..e8227651a --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesupdatesettings.go @@ -0,0 +1,12 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/request.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/request.go new file mode 100644 index 000000000..4f72955eb --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/request.go @@ -0,0 +1,126 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + + hostpool "github.com/bitly/go-hostpool" +) + +type Request struct { + *http.Client + *http.Request + hostResponse hostpool.HostPoolResponse +} + +func (r *Request) SetBodyJson(data interface{}) error { + body, err := json.Marshal(data) + if err != nil { + return err + } + r.SetBodyBytes(body) + r.Header.Set("Content-Type", "application/json") + return nil +} + +func (r *Request) SetBodyString(body string) { + r.SetBody(strings.NewReader(body)) +} + +func (r *Request) SetBodyBytes(body []byte) { + r.SetBody(bytes.NewReader(body)) +} + +func (r *Request) SetBody(body io.Reader) { + rc, ok := body.(io.ReadCloser) + if !ok && body != nil { + rc = ioutil.NopCloser(body) + } + r.Body = rc + r.ContentLength = -1 +} + +func (r *Request) Do(v interface{}) (int, []byte, error) { + response, bodyBytes, err := r.DoResponse(v) + if err != nil { + return -1, nil, err + } + return response.StatusCode, bodyBytes, err +} + +func (r *Request) DoResponse(v interface{}) (*http.Response, []byte, error) { + var client = r.Client + if client == nil { + client = http.DefaultClient + } + + res, err := client.Do(r.Request) + // Inform the HostPool of what happened to the request and allow it to update + r.hostResponse.Mark(err) + if err != nil { + return nil, nil, err + } + + defer res.Body.Close() + bodyBytes, err := ioutil.ReadAll(res.Body) + + if err != nil { + return nil, nil, err + } + + if res.StatusCode == 404 { + return nil, bodyBytes, RecordNotFound + } + + if res.StatusCode > 304 && v != nil { + jsonErr := json.Unmarshal(bodyBytes, v) + if jsonErr != nil { + return nil, nil, fmt.Errorf("Json response unmarshal error: [%s], response content: [%s]", jsonErr.Error(), string(bodyBytes)) + } + } + return res, bodyBytes, err +} + +func Escape(args map[string]interface{}) (s string, err error) { + vals := url.Values{} + for key, val := range args { + switch v := val.(type) { + case string: + vals.Add(key, v) + case bool: + vals.Add(key, strconv.FormatBool(v)) + case int, int32, int64: + vInt := reflect.ValueOf(v).Int() + vals.Add(key, strconv.FormatInt(vInt, 10)) + case float32, float64: + vFloat := reflect.ValueOf(v).Float() + vals.Add(key, strconv.FormatFloat(vFloat, 'f', -1, 32)) + case []string: + vals.Add(key, strings.Join(v, ",")) + default: + err = fmt.Errorf("Could not format URL argument: %s", key) + return + } + } + s = vals.Encode() + return +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/request_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/request_test.go new file mode 100644 index 000000000..fa173d6ec --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/request_test.go @@ -0,0 +1,74 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "fmt" + "testing" + + "github.com/bmizerany/assert" +) + +func TestQueryString(t *testing.T) { + // Test nil argument + s, err := Escape(nil) + assert.T(t, s == "" && err == nil, fmt.Sprintf("Nil should not fail and yield empty string")) + + // Test single string argument + s, err = Escape(map[string]interface{}{"foo": "bar"}) + exp := "foo=bar" + assert.T(t, s == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, s)) + + // Test single int argument + s, err = Escape(map[string]interface{}{"foo": int(1)}) + exp = "foo=1" + assert.T(t, s == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, s)) + + // Test single int64 argument + s, err = Escape(map[string]interface{}{"foo": int64(1)}) + exp = "foo=1" + assert.T(t, s == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, s)) + + // Test single int32 argument + s, err = Escape(map[string]interface{}{"foo": int32(1)}) + exp = "foo=1" + assert.T(t, s == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, s)) + + // Test single float64 argument + s, err = Escape(map[string]interface{}{"foo": float64(3.141592)}) + exp = "foo=3.141592" + assert.T(t, s == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, s)) + + // Test single float32 argument + s, err = Escape(map[string]interface{}{"foo": float32(3.141592)}) + exp = "foo=3.141592" + assert.T(t, s == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, s)) + + // Test single []string argument + s, err = Escape(map[string]interface{}{"foo": []string{"bar", "baz"}}) + exp = "foo=bar%2Cbaz" + assert.T(t, s == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, s)) + + // Test combination of all arguments + s, err = Escape(map[string]interface{}{ + "foo": "bar", + "bar": 1, + "baz": 3.141592, + "test": []string{"a", "b"}, + }) + // url.Values also orders arguments alphabetically. + exp = "bar=1&baz=3.141592&foo=bar&test=a%2Cb" + assert.T(t, s == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, s)) + + // Test invalid datatype + s, err = Escape(map[string]interface{}{"foo": []int{}}) + assert.T(t, err != nil, fmt.Sprintf("Expected err to not be nil")) +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchaggregate.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchaggregate.go new file mode 100644 index 000000000..b7412ff46 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchaggregate.go @@ -0,0 +1,226 @@ +package elastigo + +import "encoding/json" + +func Aggregate(name string) *AggregateDsl { + return &AggregateDsl{Name: name} +} + +type AggregateDsl struct { + Name string + TypeName string + Type interface{} + Filters *FilterWrap `json:"filters,omitempty"` + AggregatesVal map[string]*AggregateDsl `json:"aggregations,omitempty"` +} + +type FieldAggregate struct { + Field string `json:"field"` + Size *int `json:"size,omitempty"` +} + +/** + * Aggregates accepts n "sub-aggregates" to be applied to this aggregate + * + * agg := Aggregate("user").Term("user_id") + * agg.Aggregates( + * Aggregate("total_spent").Sum("price"), + * Aggregate("total_saved").Sum("discount"), + * ) + */ +func (d *AggregateDsl) Aggregates(aggs ...*AggregateDsl) *AggregateDsl { + if len(aggs) < 1 { + return d + } + if len(d.AggregatesVal) == 0 { + d.AggregatesVal = make(map[string]*AggregateDsl) + } + + for _, agg := range aggs { + d.AggregatesVal[agg.Name] = agg + } + return d +} + +func (d *AggregateDsl) Min(field string) *AggregateDsl { + d.Type = FieldAggregate{Field: field} + d.TypeName = "min" + return d +} + +func (d *AggregateDsl) Max(field string) *AggregateDsl { + d.Type = FieldAggregate{Field: field} + d.TypeName = "max" + return d +} + +func (d *AggregateDsl) Sum(field string) *AggregateDsl { + d.Type = FieldAggregate{Field: field} + d.TypeName = "sum" + return d +} + +func (d *AggregateDsl) Avg(field string) *AggregateDsl { + d.Type = FieldAggregate{Field: field} + d.TypeName = "avg" + return d +} + +func (d *AggregateDsl) Stats(field string) *AggregateDsl { + d.Type = FieldAggregate{Field: field} + d.TypeName = "stats" + return d +} + +func (d *AggregateDsl) ExtendedStats(field string) *AggregateDsl { + d.Type = FieldAggregate{Field: field} + d.TypeName = "extended_stats" + return d +} + +func (d *AggregateDsl) ValueCount(field string) *AggregateDsl { + d.Type = FieldAggregate{Field: field} + d.TypeName = "value_count" + return d +} + +func (d *AggregateDsl) Percentiles(field string) *AggregateDsl { + d.Type = FieldAggregate{Field: field} + d.TypeName = "percentiles" + return d +} + +type Cardinality struct { + Field string `json:"field"` + PrecisionThreshold float64 `json:"precision_threshold,omitempty"` + Rehash bool `json:"rehash,omitempty"` +} + +/** + * Cardinality( + * "field_name", + * true, + * 0, + * ) + */ +func (d *AggregateDsl) Cardinality(field string, rehash bool, threshold int) *AggregateDsl { + c := Cardinality{Field: field} + + // Only set if it's false, since the default is true + if !rehash { + c.Rehash = false + } + + if threshold > 0 { + c.PrecisionThreshold = float64(threshold) + } + d.Type = c + d.TypeName = "cardinality" + return d +} + +func (d *AggregateDsl) Global() *AggregateDsl { + d.Type = struct{}{} + d.TypeName = "global" + return d +} + +func (d *AggregateDsl) Filter(filters ...interface{}) *AggregateDsl { + + if len(filters) == 0 { + return d + } + + if d.Filters == nil { + d.Filters = NewFilterWrap() + } + + d.Filters.addFilters(filters) + return d +} + +func (d *AggregateDsl) Missing(field string) *AggregateDsl { + d.Type = FieldAggregate{Field: field} + d.TypeName = "missing" + return d +} + +func (d *AggregateDsl) Terms(field string) *AggregateDsl { + d.Type = FieldAggregate{Field: field} + d.TypeName = "terms" + return d +} + +func (d *AggregateDsl) TermsWithSize(field string, size int) *AggregateDsl { + d.Type = FieldAggregate{Field: field, Size: &size} + d.TypeName = "terms" + return d +} + +func (d *AggregateDsl) SignificantTerms(field string) *AggregateDsl { + d.Type = FieldAggregate{Field: field} + d.TypeName = "significant_terms" + return d +} + +type Histogram struct { + Field string `json:"field"` + Interval float64 `json:"interval"` +} + +func (d *AggregateDsl) Histogram(field string, interval int) *AggregateDsl { + d.Type = Histogram{ + Field: field, + Interval: float64(interval), + } + d.TypeName = "histogram" + return d +} + +type DateHistogram struct { + Field string `json:"field"` + Interval string `json:"interval"` +} + +func (d *AggregateDsl) DateHistogram(field, interval string) *AggregateDsl { + d.Type = DateHistogram{ + Field: field, + Interval: interval, + } + d.TypeName = "date_histogram" + return d +} + +func (d *AggregateDsl) MarshalJSON() ([]byte, error) { + return json.Marshal(d.toMap()) +} + +func (d *AggregateDsl) toMap() map[string]interface{} { + root := map[string]interface{}{} + + if d.Type != nil { + root[d.TypeName] = d.Type + } + aggregates := d.aggregatesMap() + + if d.Filters != nil { + root["filter"] = d.Filters + } + + if len(aggregates) > 0 { + root["aggregations"] = aggregates + } + return root + +} + +func (d *AggregateDsl) aggregatesMap() map[string]interface{} { + root := map[string]interface{}{} + + if len(d.AggregatesVal) > 0 { + for _, agg := range d.AggregatesVal { + root[agg.Name] = agg.toMap() + } + } + return root +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchaggregate_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchaggregate_test.go new file mode 100644 index 000000000..331809ce6 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchaggregate_test.go @@ -0,0 +1,177 @@ +package elastigo + +import ( + "encoding/json" + "reflect" + "testing" +) + +// Test all aggregate types and nested aggregations +func TestAggregateDsl(t *testing.T) { + + min := Aggregate("min_price").Min("price") + max := Aggregate("max_price").Max("price") + sum := Aggregate("sum_price").Sum("price") + avg := Aggregate("avg_price").Avg("price") + stats := Aggregate("stats_price").Stats("price") + extendedStats := Aggregate("extended_stats_price").ExtendedStats("price") + valueCount := Aggregate("value_count_price").ValueCount("price") + percentiles := Aggregate("percentiles_price").Percentiles("price") + cardinality := Aggregate("cardinality_price").Cardinality("price", true, 50) + global := Aggregate("global").Global() + missing := Aggregate("missing_price").Missing("price") + terms := Aggregate("terms_price").Terms("price") + termsSize := Aggregate("terms_price_size").TermsWithSize("price", 0) + significantTerms := Aggregate("significant_terms_price").SignificantTerms("price") + histogram := Aggregate("histogram_price").Histogram("price", 50) + + dateAgg := Aggregate("articles_over_time").DateHistogram("date", "month") + dateAgg.Aggregates( + min, + max, + sum, + avg, + stats, + extendedStats, + valueCount, + percentiles, + cardinality, + global, + missing, + terms, + termsSize, + significantTerms, + histogram, + ) + + qry := Search("github").Aggregates(dateAgg) + + marshaled, err := json.MarshalIndent(qry.AggregatesVal, "", " ") + if err != nil { + t.Errorf("Failed to marshal AggregatesVal: %s", err.Error()) + return + } + + assertJsonMatch( + t, + marshaled, + []byte(` + { + "articles_over_time": { + "date_histogram" : { + "field" : "date", + "interval" : "month" + }, + "aggregations": { + "min_price":{ + "min": { "field": "price" } + }, + "max_price":{ + "max": { "field": "price" } + }, + "sum_price":{ + "sum": { "field": "price" } + }, + "avg_price": { + "avg": { "field": "price" } + }, + "stats_price":{ + "stats": { "field": "price" } + }, + "extended_stats_price":{ + "extended_stats": { "field": "price" } + }, + "value_count_price":{ + "value_count": { "field": "price" } + }, + "percentiles_price":{ + "percentiles": { "field": "price" } + }, + "cardinality_price":{ + "cardinality": { "field": "price", "precision_threshold": 50 } + }, + "global":{ + "global": {} + }, + "missing_price":{ + "missing": { "field": "price" } + }, + "terms_price":{ + "terms": { "field": "price" } + }, + "terms_price_size":{ + "terms": { "field": "price", "size": 0 } + }, + "significant_terms_price":{ + "significant_terms": { "field": "price" } + }, + "histogram_price":{ + "histogram": { "field": "price", "interval": 50 } + } + } + } + } + `), + ) + +} + +func TestAggregateFilter(t *testing.T) { + + avg := Aggregate("avg_price").Avg("price") + + dateAgg := Aggregate("in_stock_products").Filter( + Filter().Range("stock", nil, 0, nil, nil, ""), + ) + + dateAgg.Aggregates( + avg, + ) + + qry := Search("github").Aggregates(dateAgg) + + marshaled, err := json.MarshalIndent(qry.AggregatesVal, "", " ") + if err != nil { + t.Errorf("Failed to marshal AggregatesVal: %s", err.Error()) + return + } + + assertJsonMatch( + t, + marshaled, + []byte(` + { + "in_stock_products" : { + "filter" : { + "range" : { "stock" : { "gt" : 0 } } + }, + "aggregations" : { + "avg_price" : { "avg" : { "field" : "price" } } + } + } + } + `), + ) +} + +func assertJsonMatch(t *testing.T, match, expected []byte) { + var m interface{} + var e interface{} + + err := json.Unmarshal(expected, &e) + if err != nil { + t.Errorf("Failed to unmarshal expectation: %s", err.Error()) + return + } + err = json.Unmarshal(match, &m) + if err != nil { + t.Errorf("Failed to unmarshal match: %s", err.Error()) + return + } + + if !reflect.DeepEqual(m, e) { + t.Errorf("Expected %s but got %s", string(expected), string(match)) + return + } + +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchdsl.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchdsl.go new file mode 100644 index 000000000..345a0e9dd --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchdsl.go @@ -0,0 +1,28 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +type SearchRequest struct { + From int `json:"from,omitempty"` + Size int `json:"size,omitempty"` + Query OneTermQuery `json:"query,omitempty"` + + Filter struct { + Term Term `json:"term"` + } `json:"filter,omitempty"` +} + +type Facets struct { + Tag struct { + Terms string `json:"terms"` + } `json:"tag"` +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfacet.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfacet.go new file mode 100644 index 000000000..5eec68bce --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfacet.go @@ -0,0 +1,142 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + + u "github.com/araddon/gou" +) + +var ( + _ = u.DEBUG +) + +/* +"facets": { + "terms": { + "terms": { + "field": [ + "@fields.category" + ], + "size": 25 + } + } +} + + +"facets": { + "actors": { "terms": {"field": ["actor"],"size": "10" }} + , "langauge": { "terms": {"field": ["repository.language"],"size": "10" }} +} + +*/ +func Facet() *FacetDsl { + return &FacetDsl{} +} + +func FacetRange(field string) *RangeDsl { + out := &RangeDsl{&RangeDef{}, nil} + out.RangeDef.Field = field + return out +} + +type FacetDsl struct { + size string + Terms map[string]*Term `json:"terms,omitempty"` + Ranges map[string]*RangeDsl `json:"terms,omitempty"` +} + +type RangeDsl struct { + RangeDef *RangeDef `json:"range,omitempty"` + FilterVal *FilterWrap `json:"facet_filter,omitempty"` +} + +type RangeDef struct { + Field string `json:"field,omitempty"` + Values []*RangeVal `json:"ranges,omitempty"` +} + +type RangeVal struct { + From string `json:"from,omitempty"` + To string `json:"to,omitempty"` +} + +func (m *RangeDsl) Range(from, to string) *RangeDsl { + if len(m.RangeDef.Values) == 0 { + m.RangeDef.Values = make([]*RangeVal, 0) + } + + m.RangeDef.Values = append(m.RangeDef.Values, &RangeVal{From: from, To: to}) + return m +} + +func (s *RangeDsl) Filter(fl ...interface{}) *RangeDsl { + if s.FilterVal == nil { + s.FilterVal = NewFilterWrap() + } + + s.FilterVal.addFilters(fl) + return s +} + +func (m *FacetDsl) Size(size string) *FacetDsl { + m.size = size + return m +} + +func (m *FacetDsl) Fields(fields ...string) *FacetDsl { + if len(fields) < 1 { + return m + } + if len(m.Terms) == 0 { + m.Terms = make(map[string]*Term) + } + m.Terms[fields[0]] = &Term{Terms{Fields: fields}, nil} + return m +} + +func (m *FacetDsl) Regex(field, match string) *FacetDsl { + if len(m.Terms) == 0 { + m.Terms = make(map[string]*Term) + } + m.Terms[field] = &Term{Terms{Fields: []string{field}, Regex: match}, nil} + return m +} + +func (m *FacetDsl) Term(t *Term) *FacetDsl { + if len(m.Terms) == 0 { + m.Terms = make(map[string]*Term) + } + m.Terms[t.Terms.Fields[0]] = t + return m +} + +func (m *FacetDsl) Range(r *RangeDsl) *FacetDsl { + if len(m.Ranges) == 0 { + m.Ranges = make(map[string]*RangeDsl) + } + m.Ranges[r.RangeDef.Field] = r + return m +} + +func (m *FacetDsl) MarshalJSON() ([]byte, error) { + data := map[string]interface{}{} + for key, t := range m.Terms { + t.Terms.Size = m.size + data[key] = t + } + for key, r := range m.Ranges { + data[key] = r + } + return json.Marshal(&data) +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfacet_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfacet_test.go new file mode 100644 index 000000000..11e2664a8 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfacet_test.go @@ -0,0 +1,42 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "github.com/araddon/gou" + . "github.com/smartystreets/goconvey/convey" + "testing" +) + +func TestFacetRegex(t *testing.T) { + + c := NewTestConn() + PopulateTestDB(t, c) + defer TearDownTestDB(c) + + Convey("Facted regex query", t, func() { + + // This is a possible solution for auto-complete + out, err := Search("oilers").Size("0").Facet( + Facet().Regex("name", "[jk].*").Size("8"), + ).Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + + // Debug(string(out.Facets)) + fh := gou.NewJsonHelper([]byte(out.Facets)) + facets := fh.Helpers("/name/terms") + So(err, ShouldBeNil) + So(facets, ShouldNotBeNil) + So(len(facets), ShouldEqual, 4) + }) +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfilter.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfilter.go new file mode 100644 index 000000000..3d10ab57e --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfilter.go @@ -0,0 +1,402 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" + "github.com/araddon/gou" +) + +var ( + _ = gou.DEBUG +) + +// BoolClause represents aa bool (and/or) clause for use with FilterWrap +// Legacy, use new FilterOp functions instead +type BoolClause string + +// TermExecutionMode refers to how a terms (not term) filter should behave +// The acceptable options are all prefixed with TEM +// See https://www.elastic.co/guide/en/elasticsearch/reference/1.5/query-dsl-terms-filter.html +type TermExecutionMode string + +const ( + // TEMDefault default ES term filter behavior (plain) + TEMDefault TermExecutionMode = "" + // TEMPlain default ES term filter behavior + TEMPlain TermExecutionMode = "plain" + // TEMField field_data execution mode + TEMField TermExecutionMode = "field_data" + // TEMBool bool execution mode + TEMBool TermExecutionMode = "bool" + // TEMAnd and execution mode + TEMAnd TermExecutionMode = "and" + // TEMOr or execution mode + TEMOr TermExecutionMode = "or" +) + +// FilterClause is either a boolClause or FilterOp for use with FilterWrap +type FilterClause interface { + String() string +} + +// FilterWrap is the legacy struct for chaining multiple filters with a bool +// Legacy, use new FilterOp functions instead +type FilterWrap struct { + boolClause string + filters []interface{} +} + +// NewFilterWrap creates a new FilterWrap struct +func NewFilterWrap() *FilterWrap { + return &FilterWrap{filters: make([]interface{}, 0), boolClause: "and"} +} + +func (f *FilterWrap) String() string { + return fmt.Sprintf(`fopv: %d:%v`, len(f.filters), f.filters) +} + +// Bool sets the type of boolean filter to use. +// Accepted values are "and" and "or". +// Legacy, use new FilterOp functions instead +func (f *FilterWrap) Bool(s string) { + f.boolClause = s +} + +// Custom marshalling to support the query dsl +func (f *FilterWrap) addFilters(fl []interface{}) { + if len(fl) > 1 { + fc := fl[0] + switch fc.(type) { + case BoolClause, string: + f.boolClause = fc.(string) + fl = fl[1:] + } + } + f.filters = append(f.filters, fl...) +} + +// MarshalJSON override for FilterWrap to match the expected ES syntax with the bool at the root +func (f *FilterWrap) MarshalJSON() ([]byte, error) { + var root interface{} + if len(f.filters) > 1 { + root = map[string]interface{}{f.boolClause: f.filters} + } else if len(f.filters) == 1 { + root = f.filters[0] + } + return json.Marshal(root) +} + +/* + "filter": { + "range": { + "@timestamp": { + "from": "2012-12-29T16:52:48+00:00", + "to": "2012-12-29T17:52:48+00:00" + } + } + } + "filter": { + "missing": { + "field": "repository.name" + } + } + + "filter" : { + "terms" : { + "user" : ["kimchy", "elasticsearch"], + "execution" : "bool", + "_cache": true + } + } + + "filter" : { + "term" : { "user" : "kimchy"} + } + + "filter" : { + "and" : [ + { + "range" : { + "postDate" : { + "from" : "2010-03-01", + "to" : "2010-04-01" + } + } + }, + { + "prefix" : { "name.second" : "ba" } + } + ] + } + +*/ + + +// Filter creates a blank FilterOp that can be customized with further function calls +// This is the starting point for constructing any filter query +// Examples: +// +// Filter().Term("user","kimchy") +// +// // we use variadics to allow n arguments, first is the "field" rest are values +// Filter().Terms("user", "kimchy", "elasticsearch") +// +// Filter().Exists("repository.name") +func Filter() *FilterOp { + return &FilterOp{} +} + +// CompoundFilter creates a complete FilterWrap given multiple filters +// Legacy, use new FilterOp functions instead +func CompoundFilter(fl ...interface{}) *FilterWrap { + FilterVal := NewFilterWrap() + FilterVal.addFilters(fl) + return FilterVal +} + +// FilterOp holds all the information for a filter query +// Properties should not be set directly, but instead via the fluent-style API. +type FilterOp struct { + TermsMap map[string]interface{} `json:"terms,omitempty"` + TermMap map[string]interface{} `json:"term,omitempty"` + RangeMap map[string]RangeFilter `json:"range,omitempty"` + ExistsProp *propertyPathMarker `json:"exists,omitempty"` + MissingProp *propertyPathMarker `json:"missing,omitempty"` + AndFilters []*FilterOp `json:"and,omitempty"` + OrFilters []*FilterOp `json:"or,omitempty"` + NotFilters []*FilterOp `json:"not,omitempty"` + LimitProp *LimitFilter `json:"limit,omitempty"` + TypeProp *TypeFilter `json:"type,omitempty"` + IdsProp *IdsFilter `json:"ids,omitempty"` + ScriptProp *ScriptFilter `json:"script,omitempty"` + GeoDistMap map[string]interface{} `json:"geo_distance,omitempty"` + GeoDistRangeMap map[string]interface{} `json:"geo_distance_range,omitempty"` +} + +type propertyPathMarker struct { + Field string `json:"field"` +} + +// LimitFilter holds the Limit filter information +// Value: number of documents to limit +type LimitFilter struct { + Value int `json:"value"` +} + +// TypeFilter filters on the document type +// Value: the document type to filter +type TypeFilter struct { + Value string `json:"value"` +} + +// IdsFilter holds the type and ids (on the _id field) to filter +// Type: a string or an array of string types. Optional. +// Values: Array of ids to match +type IdsFilter struct { + Type []string `json:"type,omitempty"` + Values []interface{} `json:"values,omitempty"` +} + +// ScriptFilter will filter using a custom javascript function +// Script: the javascript to run +// Params: map of custom parameters to pass into the function (JSON), if any +// IsCached: whether to cache the results of the filter +type ScriptFilter struct { + Script string `json:"script"` + Params map[string]interface{} `json:"params,omitempty"` + IsCached bool `json:"_cache,omitempty"` +} + +// RangeFilter filters given a range. Parameters need to be comparable for ES to accept. +// Only a minimum of one comparison parameter is required. You probably shouldn't mix GT and GTE parameters. +// Gte: the greater-than-or-equal to value. Should be a number or date. +// Lte: the less-than-or-equal to value. Should be a number or date. +// Gt: the greater-than value. Should be a number or date. +// Lt: the less-than value. Should be a number or date. +// TimeZone: the timezone to use (+|-h:mm format), if the other parameters are dates +type RangeFilter struct { + Gte interface{} `json:"gte,omitempty"` + Lte interface{} `json:"lte,omitempty"` + Gt interface{} `json:"gt,omitempty"` + Lt interface{} `json:"lt,omitempty"` + TimeZone string `json:"time_zone,omitempty"` //Ideally this would be an int +} + +// GeoLocation holds the coordinates for a geo query. Currently hashes are not supported. +type GeoLocation struct { + Latitude float32 `json:"lat"` + Longitude float32 `json:"lon"` +} + +// GeoField holds a GeoLocation and a field to match to. +// This exists so the struct will match the ES schema. +type GeoField struct { + GeoLocation + Field string +} + +// Term will add a term to the filter. +// Multiple Term filters can be added, and ES will OR them. +// If the term already exists in the FilterOp, the value will be overridden. +func (f *FilterOp) Term(field string, value interface{}) *FilterOp { + if len(f.TermMap) == 0 { + f.TermMap = make(map[string]interface{}) + } + + f.TermMap[field] = value + return f +} + +// And will add an AND op to the filter. One or more FilterOps can be passed in. +func (f *FilterOp) And(filters ...*FilterOp) *FilterOp { + if len(f.AndFilters) == 0 { + f.AndFilters = filters[:] + } else { + f.AndFilters = append(f.AndFilters, filters...) + } + + return f +} + +// Or will add an OR op to the filter. One or more FilterOps can be passed in. +func (f *FilterOp) Or(filters ...*FilterOp) *FilterOp { + if len(f.OrFilters) == 0 { + f.OrFilters = filters[:] + } else { + f.OrFilters = append(f.OrFilters, filters...) + } + + return f +} + +// Not will add a NOT op to the filter. One or more FilterOps can be passed in. +func (f *FilterOp) Not(filters ...*FilterOp) *FilterOp { + if len(f.NotFilters) == 0 { + f.NotFilters = filters[:] + + } else { + f.NotFilters = append(f.NotFilters, filters...) + } + + return f +} + +// GeoDistance will add a GEO DISTANCE op to the filter. +// distance: distance in ES distance format, i.e. "100km" or "100mi". +// fields: an array of GeoField origin coordinates. Only one coordinate needs to match. +func (f *FilterOp) GeoDistance(distance string, fields ...GeoField) *FilterOp { + f.GeoDistMap = make(map[string]interface{}) + f.GeoDistMap["distance"] = distance + for _, val := range fields { + f.GeoDistMap[val.Field] = val.GeoLocation + } + + return f +} + +// GeoDistanceRange will add a GEO DISTANCE RANGE op to the filter. +// from: minimum distance in ES distance format, i.e. "100km" or "100mi". +// to: maximum distance in ES distance format, i.e. "100km" or "100mi". +// fields: an array of GeoField origin coordinates. Only one coor +func (f *FilterOp) GeoDistanceRange(from string, to string, fields ...GeoField) *FilterOp { + f.GeoDistRangeMap = make(map[string]interface{}) + f.GeoDistRangeMap["from"] = from + f.GeoDistRangeMap["to"] = to + + for _, val := range fields { + f.GeoDistRangeMap[val.Field] = val.GeoLocation + } + + return f +} + +// NewGeoField is a helper function to create values for the GeoDistance filters +func NewGeoField(field string, latitude float32, longitude float32) GeoField { + return GeoField{ + GeoLocation: GeoLocation{Latitude: latitude, Longitude: longitude}, + Field: field} +} + +// Terms adds a TERMS op to the filter. +// field: the document field +// executionMode Term execution mode, starts with TEM +// values: array of values to match +// Note: you can only have one terms clause in a filter. Use a bool filter to combine multiple. +func (f *FilterOp) Terms(field string, executionMode TermExecutionMode, values ...interface{}) *FilterOp { + //You can only have one terms in a filter + f.TermsMap = make(map[string]interface{}) + + if executionMode != "" { + f.TermsMap["execution"] = executionMode + } + + f.TermsMap[field] = values + + return f +} + +// Range adds a range filter for the given field. +// See the RangeFilter struct documentation for information about the parameters. +func (f *FilterOp) Range(field string, gte interface{}, + gt interface{}, lte interface{}, lt interface{}, timeZone string) *FilterOp { + + if f.RangeMap == nil { + f.RangeMap = make(map[string]RangeFilter) + } + + f.RangeMap[field] = RangeFilter{ + Gte: gte, + Gt: gt, + Lte: lte, + Lt: lt, + TimeZone: timeZone} + + return f +} + +// Type adds a TYPE op to the filter. +func (f *FilterOp) Type(fieldType string) *FilterOp { + f.TypeProp = &TypeFilter{Value: fieldType} + return f +} + +// Ids adds a IDS op to the filter. +func (f *FilterOp) Ids(ids ...interface{}) *FilterOp { + f.IdsProp = &IdsFilter{Values: ids} + return f +} + +// IdsByTypes adds a IDS op to the filter, but also allows passing in an array of types for the query. +func (f *FilterOp) IdsByTypes(types []string, ids ...interface{}) *FilterOp { + f.IdsProp = &IdsFilter{Type: types, Values: ids} + return f +} + +// Exists adds an EXISTS op to the filter. +func (f *FilterOp) Exists(field string) *FilterOp { + f.ExistsProp = &propertyPathMarker{Field: field} + return f +} + +// Missing adds an MISSING op to the filter. +func (f *FilterOp) Missing(field string) *FilterOp { + f.MissingProp = &propertyPathMarker{Field: field} + return f +} + +// Limit adds an LIMIT op to the filter. +func (f *FilterOp) Limit(maxResults int) *FilterOp { + f.LimitProp = &LimitFilter{Value: maxResults} + return f +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfilter_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfilter_test.go new file mode 100644 index 000000000..a4931ddc6 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfilter_test.go @@ -0,0 +1,287 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + . "github.com/smartystreets/goconvey/convey" + "testing" +) + +func TestFilterDsl(t *testing.T) { + Convey("And filter", t, func() { + filter := Filter().And(Filter().Term("test", "asdf")). + And(Filter().Range("rangefield", 1, 2, 3, 4, "+08:00")) + actual, err := GetJson(filter) + + actualFilters := actual["and"].([]interface{}) + + So(err, ShouldBeNil) + So(1, ShouldEqual, len(actual)) + So(2, ShouldEqual, len(actualFilters)) + So(true, ShouldEqual, HasKey(actualFilters[0].(map[string]interface{}), "term")) + So(true, ShouldEqual, HasKey(actualFilters[1].(map[string]interface{}), "range")) + }) + + Convey("Or filter", t, func() { + filter := Filter().Or(Filter().Term("test", "asdf"), Filter().Range("rangefield", 1, 2, 3, 4, "+08:00")) + actual, err := GetJson(filter) + + actualFilters := actual["or"].([]interface{}) + + So(err, ShouldBeNil) + So(1, ShouldEqual, len(actual)) + So(2, ShouldEqual, len(actualFilters)) + So(true, ShouldEqual, HasKey(actualFilters[0].(map[string]interface{}), "term")) + So(true, ShouldEqual, HasKey(actualFilters[1].(map[string]interface{}), "range")) + }) + + Convey("Not filter", t, func() { + filter := Filter().Not(Filter().Term("test", "asdf")). + Not(Filter().Range("rangefield", 1, 2, 3, 4, "+08:00")) + actual, err := GetJson(filter) + + actualFilters := actual["not"].([]interface{}) + + So(err, ShouldBeNil) + So(1, ShouldEqual, len(actual)) + So(2, ShouldEqual, len(actualFilters)) + So(true, ShouldEqual, HasKey(actualFilters[0].(map[string]interface{}), "term")) + So(true, ShouldEqual, HasKey(actualFilters[1].(map[string]interface{}), "range")) + }) + + Convey("Terms filter", t, func() { + filter := Filter().Terms("Sample", TEMAnd, "asdf", 123, true) + actual, err := GetJson(filter) + + actualTerms := actual["terms"].(map[string]interface{}) + actualValues := actualTerms["Sample"].([]interface{}) + + So(err, ShouldBeNil) + So(1, ShouldEqual, len(actual)) + So(3, ShouldEqual, len(actualValues)) + So(actualValues[0], ShouldEqual, "asdf") + So(actualValues[1], ShouldEqual, float64(123)) + So(actualValues[2], ShouldEqual, true) + So("and", ShouldEqual, actualTerms["execution"]) + }) + + Convey("Term filter", t, func() { + filter := Filter().Term("Sample", "asdf").Term("field2", 341.4) + actual, err := GetJson(filter) + + actualTerm := actual["term"].(map[string]interface{}) + + So(err, ShouldBeNil) + So(1, ShouldEqual, len(actual)) + So("asdf", ShouldEqual, actualTerm["Sample"]) + So(float64(341.4), ShouldEqual, actualTerm["field2"]) + }) + + Convey("Range filter", t, func() { + filter := Filter().Range("rangefield", 1, 2, 3, 4, "+08:00") + actual, err := GetJson(filter) + //A bit lazy, probably should assert keys exist + actualRange := actual["range"].(map[string]interface{})["rangefield"].(map[string]interface{}) + + So(err, ShouldBeNil) + So(1, ShouldEqual, len(actual)) + So(float64(1), ShouldEqual, actualRange["gte"]) + So(float64(2), ShouldEqual, actualRange["gt"]) + So(float64(3), ShouldEqual, actualRange["lte"]) + So(float64(4), ShouldEqual, actualRange["lt"]) + So("+08:00", ShouldEqual, actualRange["time_zone"]) + }) + + Convey("Exists filter", t, func() { + filter := Filter().Exists("field1") + actual, err := GetJson(filter) + + actualValue := actual["exists"].(map[string]interface{}) + + So(err, ShouldBeNil) + So(1, ShouldEqual, len(actual)) + So("field1", ShouldEqual, actualValue["field"]) + }) + + Convey("Missing filter", t, func() { + filter := Filter().Missing("field1") + actual, err := GetJson(filter) + + actualValue := actual["missing"].(map[string]interface{}) + + So(err, ShouldBeNil) + So(1, ShouldEqual, len(actual)) + So("field1", ShouldEqual, actualValue["field"]) + }) + + Convey("Limit filter", t, func() { + filter := Filter().Limit(100) + actual, err := GetJson(filter) + + actualValue := actual["limit"].(map[string]interface{}) + So(err, ShouldBeNil) + So(1, ShouldEqual, len(actual)) + So(float64(100), ShouldEqual, actualValue["value"]) + }) + + Convey("Type filter", t, func() { + filter := Filter().Type("my_type") + actual, err := GetJson(filter) + + actualValue := actual["type"].(map[string]interface{}) + So(err, ShouldBeNil) + So(1, ShouldEqual, len(actual)) + So("my_type", ShouldEqual, actualValue["value"]) + }) + + Convey("Ids filter", t, func() { + filter := Filter().Ids("test", "asdf", "fdsa") + actual, err := GetJson(filter) + + actualValue := actual["ids"].(map[string]interface{}) + actualValues := actualValue["values"].([]interface{}) + So(err, ShouldBeNil) + So(1, ShouldEqual, len(actual)) + So(nil, ShouldEqual, actualValue["type"]) + So(3, ShouldEqual, len(actualValues)) + So("test", ShouldEqual, actualValues[0]) + So("asdf", ShouldEqual, actualValues[1]) + So("fdsa", ShouldEqual, actualValues[2]) + }) + + Convey("IdsByTypes filter", t, func() { + filter := Filter().IdsByTypes([]string{"my_type"}, "test", "asdf", "fdsa") + actual, err := GetJson(filter) + + actualValue := actual["ids"].(map[string]interface{}) + actualTypes := actualValue["type"].([]interface{}) + actualValues := actualValue["values"].([]interface{}) + So(err, ShouldBeNil) + So(1, ShouldEqual, len(actual)) + So(1, ShouldEqual, len(actualTypes)) + So("my_type", ShouldEqual, actualTypes[0]) + So(3, ShouldEqual, len(actualValues)) + So("test", ShouldEqual, actualValues[0]) + So("asdf", ShouldEqual, actualValues[1]) + So("fdsa", ShouldEqual, actualValues[2]) + }) + + Convey("GeoDistance filter", t, func() { + filter := Filter().GeoDistance("100km", NewGeoField("pin.location", 32.3, 23.4)) + actual, err := GetJson(filter) + + actualValue := actual["geo_distance"].(map[string]interface{}) + actualLocation := actualValue["pin.location"].(map[string]interface{}) + So(err, ShouldBeNil) + So("100km", ShouldEqual, actualValue["distance"]) + So(float64(32.3), ShouldEqual, actualLocation["lat"]) + So(float64(23.4), ShouldEqual, actualLocation["lon"]) + }) + + Convey("GeoDistanceRange filter", t, func() { + filter := Filter().GeoDistanceRange("100km", "200km", NewGeoField("pin.location", 32.3, 23.4)) + actual, err := GetJson(filter) + + actualValue := actual["geo_distance_range"].(map[string]interface{}) + actualLocation := actualValue["pin.location"].(map[string]interface{}) + So(err, ShouldBeNil) + So("100km", ShouldEqual, actualValue["from"]) + So("200km", ShouldEqual, actualValue["to"]) + So(float64(32.3), ShouldEqual, actualLocation["lat"]) + So(float64(23.4), ShouldEqual, actualLocation["lon"]) + }) +} + +func TestFilters(t *testing.T) { + + c := NewTestConn() + PopulateTestDB(t, c) + defer TearDownTestDB(c) + + Convey("Exists filter", t, func() { + qry := Search("oilers").Filter( + Filter().Exists("goals"), + ) + out, err := qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits, ShouldNotBeNil) + So(out.Hits.Len(), ShouldEqual, 10) + So(out.Hits.Total, ShouldEqual, 12) + }) + + Convey("Missing filter", t, func() { + qry := Search("oilers").Filter( + Filter().Missing("goals"), + ) + out, err := qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits, ShouldNotBeNil) + So(out.Hits.Total, ShouldEqual, 2) + }) + + Convey("Terms filter", t, func() { + qry := Search("oilers").Filter( + Filter().Terms("pos", TEMDefault, "RW", "LW"), + ) + out, err := qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits, ShouldNotBeNil) + So(out.Hits.Total, ShouldEqual, 6) + }) + + Convey("Filter involving an AND", t, func() { + qry := Search("oilers").Filter( + Filter().And( + Filter().Terms("pos", TEMDefault, "LW"), + Filter().Exists("PIM"), + ), + ) + + out, err := qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits, ShouldNotBeNil) + So(out.Hits.Total, ShouldEqual, 2) + }) + + + Convey("Filterng filter results", t, func() { + qry := Search("oilers").Filter( + Filter().Terms("pos", TEMDefault, "LW"), + ) + qry.Filter( + Filter().Exists("PIM"), + ) + out, err := qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits, ShouldNotBeNil) + So(out.Hits.Total, ShouldEqual, 2) + }) + + Convey("Filter involving OR", t, func() { + qry := Search("oilers").Filter( + Filter().Or( + Filter().Terms("pos", TEMDefault, "G"), + Filter().Range("goals", nil, 80, nil, nil, ""), + ), + ) + out, err := qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits, ShouldNotBeNil) + So(out.Hits.Total, ShouldEqual, 3) + }) +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchhighlight.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchhighlight.go new file mode 100644 index 000000000..ac74947d7 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchhighlight.go @@ -0,0 +1,138 @@ +package elastigo + +import "encoding/json" + +func NewHighlight() *HighlightDsl { + return &HighlightDsl{} +} + +type HighlightDsl struct { + Settings *HighlightEmbed `-` + TagSchema string `json:"tag_schema,omitempty"` + Fields map[string]HighlightEmbed `json:"fields,omitempty"` +} + +func NewHighlightOpts() *HighlightEmbed { + return &HighlightEmbed{} +} + +type HighlightEmbed struct { + BoundaryCharsVal string `json:"boundary_chars,omitempty"` + BoundaryMaxScanVal int `json:"boundary_max_scan,omitempty"` + PreTags []string `json:"pre_tags,omitempty"` + PostTags []string `json:"post_tags,omitempty"` + FragmentSizeVal int `json:"fragment_size,omitempty"` + NumOfFragmentsVal int `json:"number_of_fragments,omitempty"` + HighlightQuery *QueryDsl `json:"highlight_query,omitempty"` + MatchedFieldsVal []string `json:"matched_fields,omitempty"` + OrderVal string `json:"order,omitempty"` + TypeVal string `json:"type,omitempty"` +} + +// Custom marshalling +func (t *HighlightDsl) MarshalJSON() ([]byte, error) { + m := make(map[string]interface{}) + + if t.Fields != nil { + m["fields"] = t.Fields + } + + if t.TagSchema != "" { + m["tag_schema"] = t.TagSchema + } + + if t.Settings == nil { + return json.Marshal(m) + } + + //This is terrible :(, could use structs package to avoid extra serialization. + embed, err := json.Marshal(t.Settings) + if err == nil { + err = json.Unmarshal(embed, &m) + } + + if err == nil { + return json.Marshal(m) + } + + return nil, err +} + +func (h *HighlightDsl) AddField(name string, settings *HighlightEmbed) *HighlightDsl { + if h.Fields == nil { + h.Fields = make(map[string]HighlightEmbed) + } + + if settings != nil { + h.Fields[name] = *settings + } else { + h.Fields[name] = HighlightEmbed{} + } + + return h +} + +func (h *HighlightDsl) Schema(schema string) *HighlightDsl { + h.TagSchema = schema + return h +} + +func (h *HighlightDsl) SetOptions(options *HighlightEmbed) *HighlightDsl { + h.Settings = options + return h +} + +func (o *HighlightEmbed) BoundaryChars(chars string) *HighlightEmbed { + o.BoundaryCharsVal = chars + return o +} + +func (o *HighlightEmbed) BoundaryMaxScan(max int) *HighlightEmbed { + o.BoundaryMaxScanVal = max + return o +} + +func (he *HighlightEmbed) FragSize(size int) *HighlightEmbed { + he.FragmentSizeVal = size + return he +} + +func (he *HighlightEmbed) NumFrags(numFrags int) *HighlightEmbed { + he.NumOfFragmentsVal = numFrags + return he +} + +func (he *HighlightEmbed) MatchedFields(fields ...string) *HighlightEmbed { + he.MatchedFieldsVal = fields + return he +} + +func (he *HighlightEmbed) Order(order string) *HighlightEmbed { + he.OrderVal = order + return he +} + +func (he *HighlightEmbed) Tags(pre string, post string) *HighlightEmbed { + if he == nil { + he = &HighlightEmbed{} + } + + if he.PreTags == nil { + he.PreTags = []string{pre} + } else { + he.PreTags = append(he.PreTags, pre) + } + + if he.PostTags == nil { + he.PostTags = []string{post} + } else { + he.PostTags = append(he.PostTags, post) + } + + return he +} + +func (he *HighlightEmbed) Type(highlightType string) *HighlightEmbed { + he.TypeVal = highlightType + return he +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchhighlight_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchhighlight_test.go new file mode 100644 index 000000000..ca5b9304d --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchhighlight_test.go @@ -0,0 +1,67 @@ +package elastigo + +import ( + "github.com/bmizerany/assert" + "testing" +) + +func TestEmbedDsl(t *testing.T) { + highlight := NewHighlight().SetOptions(NewHighlightOpts(). + Tags("
", "
"). + BoundaryChars("asdf").BoundaryMaxScan(100). + FragSize(10).NumFrags(50). + Order("order").Type("fdsa"). + MatchedFields("1", "2")) + + actual, err := GetJson(highlight) + + assert.Equal(t, nil, err) + assert.Equal(t, "
", actual["pre_tags"].([]interface{})[0]) + assert.Equal(t, "
", actual["post_tags"].([]interface{})[0]) + assert.Equal(t, "asdf", actual["boundary_chars"]) + assert.Equal(t, float64(100), actual["boundary_max_scan"]) + assert.Equal(t, float64(10), actual["fragment_size"]) + assert.Equal(t, float64(50), actual["number_of_fragments"]) + assert.Equal(t, "1", actual["matched_fields"].([]interface{})[0]) + assert.Equal(t, "2", actual["matched_fields"].([]interface{})[1]) + assert.Equal(t, "order", actual["order"]) + assert.Equal(t, "fdsa", actual["type"]) +} + +func TestFieldDsl(t *testing.T) { + highlight := NewHighlight().AddField("whatever", NewHighlightOpts(). + Tags("
", "
"). + BoundaryChars("asdf").BoundaryMaxScan(100). + FragSize(10).NumFrags(50). + Order("order").Type("fdsa"). + MatchedFields("1", "2")) + + result, err := GetJson(highlight) + actual := result["fields"].(map[string]interface{})["whatever"].(map[string]interface{}) + + assert.Equal(t, nil, err) + assert.Equal(t, "
", actual["pre_tags"].([]interface{})[0]) + assert.Equal(t, "
", actual["post_tags"].([]interface{})[0]) + assert.Equal(t, "asdf", actual["boundary_chars"]) + assert.Equal(t, float64(100), actual["boundary_max_scan"]) + assert.Equal(t, float64(10), actual["fragment_size"]) + assert.Equal(t, float64(50), actual["number_of_fragments"]) + assert.Equal(t, "1", actual["matched_fields"].([]interface{})[0]) + assert.Equal(t, "2", actual["matched_fields"].([]interface{})[1]) + assert.Equal(t, "order", actual["order"]) + assert.Equal(t, "fdsa", actual["type"]) +} + +func TestEmbedAndFieldDsl(t *testing.T) { + highlight := NewHighlight(). + SetOptions(NewHighlightOpts().Tags("
", "
")). + AddField("afield", NewHighlightOpts().Type("something")) + + actual, err := GetJson(highlight) + actualField := actual["fields"].(map[string]interface{})["afield"].(map[string]interface{}) + + assert.Equal(t, nil, err) + assert.Equal(t, "
", actual["pre_tags"].([]interface{})[0]) + assert.Equal(t, "
", actual["post_tags"].([]interface{})[0]) + assert.Equal(t, "something", actualField["type"]) +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchquery.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchquery.go new file mode 100644 index 000000000..dd01ed717 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchquery.go @@ -0,0 +1,262 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" + //"log" + "strings" +) + +// Query creates a new Query Dsl +func Query() *QueryDsl { + return &QueryDsl{} +} + +/* + +some ways to serialize +"query": { + "filtered": { + "query": { + "query_string": { + "default_operator": "OR", + "default_field": "_all", + "query": " actor:\"bob\" AND type:\"EventType\"" + } + }, + "filter": { + "range": { + "@timestamp": { + "from": "2012-12-29T16:52:48+00:00", + "to": "2012-12-29T17:52:48+00:00" + } + } + } + } +}, + +"query" : { + "term" : { "user" : "kimchy" } +} + +"query" : { + "match_all" : {} +}, +*/ +type QueryDsl struct { + QueryEmbed + FilterVal *FilterOp `json:"filter,omitempty"` +} + +// The core Query Syntax can be embedded as a child of a variety of different parents +type QueryEmbed struct { + MatchAll *MatchAll `json:"match_all,omitempty"` + Terms map[string]string `json:"term,omitempty"` + Qs *QueryString `json:"query_string,omitempty"` + MultiMatch *MultiMatch `json:"multi_match,omitempty"` + FunctionScore map[string]interface{} `json:"function_score,omitempty"` + //Exist string `json:"_exists_,omitempty"` +} + +// MarshalJSON provides custom marshalling to support the query dsl which is a conditional +// json format, not always the same parent/children +func (qd *QueryDsl) MarshalJSON() ([]byte, error) { + q := qd.QueryEmbed + hasQuery := false + if q.Qs != nil || len(q.Terms) > 0 || q.MatchAll != nil || q.MultiMatch != nil { + hasQuery = true + } + // If a query has a + if qd.FilterVal != nil && hasQuery { + queryB, err := json.Marshal(q) + if err != nil { + return queryB, err + } + filterB, err := json.Marshal(qd.FilterVal) + if err != nil { + return filterB, err + } + return []byte(fmt.Sprintf(`{"filtered":{"query":%s,"filter":%s}}`, queryB, filterB)), nil + } + return json.Marshal(q) +} + +// get all +func (q *QueryDsl) All() *QueryDsl { + q.MatchAll = &MatchAll{""} + return q +} + +// Range adds a RANGE FilterOp to the search query +// Legacy. Use the Filter() function instead +func (q *QueryDsl) Range(fop *FilterOp) *QueryDsl { + if q.FilterVal == nil { + q.FilterVal = fop + return q + } + + return q +} + +// Add a term search for a specific field +// Term("user","kimchy") +func (q *QueryDsl) Term(name, value string) *QueryDsl { + if len(q.Terms) == 0 { + q.Terms = make(map[string]string) + } + q.Terms[name] = value + return q +} + +// FunctionScore sets functions to use to score the documents. +// http://www.elastic.co/guide/en/elasticsearch/reference/1.x/query-dsl-function-score-query.html +func (q *QueryDsl) FunctionScore(mode string, functions ...map[string]interface{}) *QueryDsl { + q.QueryEmbed.FunctionScore = map[string]interface{}{ + "functions": functions, + "score_mode": mode, + } + return q +} + +// The raw search strings (lucene valid) +func (q *QueryDsl) Search(searchFor string) *QueryDsl { + //I don't think this is right, it is not a filter.query, it should be q query? + qs := NewQueryString("", "") + q.QueryEmbed.Qs = &qs + q.QueryEmbed.Qs.Query = searchFor + return q +} + +// Querystring operations +func (q *QueryDsl) Qs(qs *QueryString) *QueryDsl { + q.QueryEmbed.Qs = qs + return q +} + +// SetLenient sets whether the query should ignore format based failures, +// such as passing in text to a number field. +func (q *QueryDsl) SetLenient(lenient bool) *QueryDsl { + q.QueryEmbed.Qs.Lenient = lenient + return q +} + +// Fields in query_string search +// Fields("fieldname","search_for","","") +// +// Fields("fieldname,field2,field3","search_for","","") +// +// Fields("fieldname,field2,field3","search_for","field_exists","") +func (q *QueryDsl) Fields(fields, search, exists, missing string) *QueryDsl { + fieldList := strings.Split(fields, ",") + qs := NewQueryString("", "") + q.QueryEmbed.Qs = &qs + q.QueryEmbed.Qs.Query = search + if len(fieldList) == 1 { + q.QueryEmbed.Qs.DefaultField = fields + } else { + q.QueryEmbed.Qs.Fields = fieldList + } + q.QueryEmbed.Qs.Exists = exists + q.QueryEmbed.Qs.Missing = missing + return q +} + +// Filter this query +func (q *QueryDsl) Filter(f *FilterOp) *QueryDsl { + q.FilterVal = f + return q +} + +// MultiMatch allows searching against multiple fields. +func (q *QueryDsl) MultiMatch(s string, fields []string) *QueryDsl { + q.QueryEmbed.MultiMatch = &MultiMatch{Query: s, Fields: fields} + return q +} + +type MultiMatch struct { + Query string `json:"query"` + Fields []string `json:"fields"` +} + +type MatchAll struct { + All string `json:"-"` +} + +// should we reuse QueryDsl here? +type QueryWrap struct { + Qs QueryString `json:"query_string,omitempty"` +} + +// QueryString based search +func NewQueryString(field, query string) QueryString { + return QueryString{"", field, query, "", "", nil, false} +} + +type QueryString struct { + DefaultOperator string `json:"default_operator,omitempty"` + DefaultField string `json:"default_field,omitempty"` + Query string `json:"query,omitempty"` + Exists string `json:"_exists_,omitempty"` + Missing string `json:"_missing_,omitempty"` + Fields []string `json:"fields,omitempty"` + Lenient bool `json:"lenient,omitempty"` + //_exists_:field1, + //_missing_:field1, +} + +//I don't know how any of the Term stuff below is supposed to work. -mikeyoon + +// Generic Term based (used in query, facet, filter) +type Term struct { + Terms Terms `json:"terms,omitempty"` + FilterVal *FilterWrap `json:"facet_filter,omitempty"` +} + +type Terms struct { + Fields []string `json:"field,omitempty"` + Size string `json:"size,omitempty"` + Regex string `json:"regex,omitempty"` +} + +func NewTerm(fields ...string) *Term { + m := &Term{Terms{Fields: fields}, nil} + return m +} + +func (s *Term) Filter(fl ...interface{}) *Term { + if s.FilterVal == nil { + s.FilterVal = NewFilterWrap() + } + + s.FilterVal.addFilters(fl) + return s +} + +// Custom marshalling +func (t *Terms) MarshalJSON() ([]byte, error) { + m := make(map[string]interface{}) + // TODO: this isn't getting called!? + if len(t.Fields) == 1 { + m["field"] = t.Fields[0] + } else if len(t.Fields) > 1 { + m["fields"] = t.Fields + } + if len(t.Regex) > 0 { + m["regex"] = t.Regex + } + if len(t.Size) > 0 { + m["size"] = t.Size + } + return json.Marshal(m) +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchreadme b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchreadme new file mode 100644 index 000000000..2d2d55582 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchreadme @@ -0,0 +1,4 @@ + + +To run tests on this, you must first have run/imported data inside of *core* + diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchsearch.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchsearch.go new file mode 100644 index 000000000..c921ae5a3 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchsearch.go @@ -0,0 +1,204 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" + u "github.com/araddon/gou" + "strconv" + "strings" +) + +var ( + _ = u.DEBUG +) + +// Search is the entry point to the SearchDsl, it is a chainable set of utilities +// to create searches. +// +// params +// @index = elasticsearch index to search +// +// out, err := Search("github").Type("Issues").Pretty().Query( +// Query().Range( +// Range().Field("created_at").From("2012-12-10T15:00:00-08:00").To("2012-12-10T15:10:00-08:00"), +// ).Search("add"), +// ).Result() +func Search(index string) *SearchDsl { + return &SearchDsl{Index: index, args: map[string]interface{}{}} +} + +type SearchDsl struct { + args map[string]interface{} + types []string + FromVal int `json:"from,omitempty"` + SizeVal int `json:"size,omitempty"` + Index string `json:"-"` + FacetVal *FacetDsl `json:"facets,omitempty"` + QueryVal *QueryDsl `json:"query,omitempty"` + SortBody []*SortDsl `json:"sort,omitempty"` + FilterVal *FilterOp `json:"filter,omitempty"` + AggregatesVal map[string]*AggregateDsl `json:"aggregations,omitempty"` + HighlightVal *HighlightDsl `json:"highlight,omitempty"` +} + +func (s *SearchDsl) Bytes(conn *Conn) ([]byte, error) { + return conn.DoCommand("POST", s.url(), s.args, s) +} + +func (s *SearchDsl) Result(conn *Conn) (*SearchResult, error) { + var retval SearchResult + body, err := s.Bytes(conn) + retval.RawJSON = body + if err != nil { + u.Errorf("%v", err) + return nil, err + } + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + u.Errorf("%v \n\t%s", jsonErr, string(body)) + } + return &retval, jsonErr +} + +func (s *SearchDsl) url() string { + url := fmt.Sprintf("/%s%s/_search", s.Index, s.getType()) + return url +} + +func (s *SearchDsl) Pretty() *SearchDsl { + s.args["pretty"] = "1" + return s +} + +// Type is the elasticsearch *Type* within a specific index +func (s *SearchDsl) Type(indexType string) *SearchDsl { + if len(s.types) == 0 { + s.types = make([]string, 0) + } + s.types = append(s.types, indexType) + return s +} + +func (s *SearchDsl) getType() string { + if len(s.types) > 0 { + return "/" + strings.Join(s.types, ",") + } + return "" +} + +func (s *SearchDsl) From(from string) *SearchDsl { + s.args["from"] = from + return s +} + +// Search is a simple interface to search, doesn't have the power of query +// but uses a simple query_string search +func (s *SearchDsl) Search(srch string) *SearchDsl { + s.QueryVal = Query().Search(srch) + return s +} + +func (s *SearchDsl) Size(size string) *SearchDsl { + s.args["size"] = size + return s +} + +func (s *SearchDsl) Fields(fields ...string) *SearchDsl { + s.args["fields"] = strings.Join(fields, ",") + return s +} + +func (s *SearchDsl) Source(returnSource bool) *SearchDsl { + s.args["_source"] = strconv.FormatBool(returnSource) + return s +} + +// Facet passes a Query expression to this search +// +// qry := Search("github").Size("0").Facet( +// Facet().Regex("repository.name", "no.*").Size("8"), +// ) +// +// qry := Search("github").Pretty().Facet( +// Facet().Fields("type").Size("25"), +// ) +func (s *SearchDsl) Facet(f *FacetDsl) *SearchDsl { + s.FacetVal = f + return s +} + +func (s *SearchDsl) Aggregates(aggs ...*AggregateDsl) *SearchDsl { + if len(aggs) < 1 { + return s + } + if len(s.AggregatesVal) == 0 { + s.AggregatesVal = make(map[string]*AggregateDsl) + } + + for _, agg := range aggs { + s.AggregatesVal[agg.Name] = agg + } + return s +} + +func (s *SearchDsl) Query(q *QueryDsl) *SearchDsl { + s.QueryVal = q + return s +} + +// Filter adds a Filter Clause with optional Boolean Clause. This accepts n number of +// filter clauses. If more than one, and missing Boolean Clause it assumes "and" +// +// qry := Search("github").Filter( +// Filter().Exists("repository.name"), +// ) +// +// qry := Search("github").Filter( +// "or", +// Filter().Exists("repository.name"), +// Filter().Terms("actor_attributes.location", "portland"), +// ) +// +// qry := Search("github").Filter( +// Filter().Exists("repository.name"), +// Filter().Terms("repository.has_wiki", true) +// ) + +func (s *SearchDsl) Filter(fl *FilterOp) *SearchDsl { + s.FilterVal = fl + return s +} + +func (s *SearchDsl) Sort(sort ...*SortDsl) *SearchDsl { + if s.SortBody == nil { + s.SortBody = make([]*SortDsl, 0) + } + s.SortBody = append(s.SortBody, sort...) + return s +} + +func (s *SearchDsl) Scroll(duration string) *SearchDsl { + s.args["scroll"] = duration + return s +} + +func (s *SearchDsl) SearchType(searchType string) *SearchDsl { + s.args["search_type"] = searchType + return s +} + +func (s *SearchDsl) Highlight(highlight *HighlightDsl) *SearchDsl { + s.HighlightVal = highlight + return s +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchsearch_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchsearch_test.go new file mode 100644 index 000000000..81f11b3a3 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchsearch_test.go @@ -0,0 +1,291 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "github.com/araddon/gou" + . "github.com/smartystreets/goconvey/convey" + "testing" +) + +func TestSearch(t *testing.T) { + + c := NewTestConn() + PopulateTestDB(t, c) + defer TearDownTestDB(c) + + Convey("Wildcard request query", t, func() { + + qry := map[string]interface{}{ + "query": map[string]interface{}{ + "wildcard": map[string]string{"name": "*hu*"}, + }, + } + out, err := c.Search("oilers", "", nil, qry) + + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits, ShouldNotBeNil) + So(out.Hits.Total, ShouldEqual, 3) + }) + + Convey("Simple search", t, func() { + + // searching without faceting + qry := Search("oilers").Pretty().Query( + Query().Search("dave"), + ) + + // how many different docs used the word "dave" + out, err := qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits, ShouldNotBeNil) + So(out.Hits.Total, ShouldEqual, 2) + + out, _ = Search("oilers").Search("dave").Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits, ShouldNotBeNil) + So(out.Hits.Total, ShouldEqual, 2) + }) + + Convey("URL Request query string", t, func() { + + out, err := c.SearchUri("oilers", "", map[string]interface{}{"q": "pos:LW"}) + + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits, ShouldNotBeNil) + So(out.Hits.Total, ShouldEqual, 3) + }) + + + // A faceted search for what "type" of events there are + // - since we are not specifying an elasticsearch type it searches all () + // + // { + // "terms" : { + // "_type" : "terms", + // "missing" : 0, + // "total" : 7561, + // "other" : 0, + // "terms" : [ { + // "term" : "pushevent", + // "count" : 4185 + // }, { + // "term" : "createevent", + // "count" : 786 + // }.....] + // } + // } + + Convey("Facet search simple", t, func() { + + qry := Search("oilers").Pretty().Facet( + Facet().Fields("teams").Size("4"), + ).Query( + Query().All(), + ).Size("1") + out, err := qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + + h := gou.NewJsonHelper(out.Facets) + So(h.Int("teams.total"), ShouldEqual, 37) + So(h.Int("teams.missing"), ShouldEqual, 0) + So(len(h.List("teams.terms")), ShouldEqual, 4) + + // change the size + qry.FacetVal.Size("20") + out, err = qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + + h = gou.NewJsonHelper(out.Facets) + So(h.Int("teams.total"), ShouldEqual, 37) + So(len(h.List("teams.terms")), ShouldEqual, 11) + + }) + + Convey("Facet search with type", t, func() { + + out, err := Search("oilers").Type("heyday").Pretty().Facet( + Facet().Fields("teams").Size("4"), + ).Query( + Query().All(), + ).Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + + h := gou.NewJsonHelper(out.Facets) + So(h.Int("teams.total"), ShouldEqual, 37) + So(len(h.List("teams.terms")), ShouldEqual, 4) + }) + + + Convey("Facet search with wildcard", t, func() { + + qry := Search("oilers").Pretty().Facet( + Facet().Fields("teams").Size("20"), + ).Query( + Query().Search("*w*"), + ) + out, err := qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + + h := gou.NewJsonHelper(out.Facets) + So(h.Int("teams.total"), ShouldEqual, 20) + So(len(h.List("teams.terms")), ShouldEqual, 7) + }) + + Convey("Facet search with range", t, func() { + + qry := Search("oilers").Pretty().Facet( + Facet().Fields("teams").Size("20"), + ).Query( + Query().Range( + Filter().Range("dob", "19600101", nil, "19621231", nil, ""), + ).Search("*w*"), + ) + out, err := qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + + h := gou.NewJsonHelper(out.Facets) + So(h.Int("teams.total"), ShouldEqual, 12) + So(len(h.List("teams.terms")), ShouldEqual, 5) + }) + + Convey("Search query with terms", t, func() { + + qry := Search("oilers").Query( + Query().Term("teams", "NYR"), + ) + out, err := qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits.Len(), ShouldEqual, 4) + So(out.Hits.Total, ShouldEqual, 4) + }) + + Convey("Search query with fields", t, func() { + + qry := Search("oilers").Query( + Query().Fields("teams", "NYR", "", ""), + ) + out, err := qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits.Len(), ShouldEqual, 4) + So(out.Hits.Total, ShouldEqual, 4) + }) + + Convey("Search query with fields exist and missing", t, func() { + + qry := Search("oilers").Filter( + Filter().Exists("PIM"), + ) + out, err := qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits.Len(), ShouldEqual, 2) + So(out.Hits.Total, ShouldEqual, 2) + + qry = Search("oilers").Filter( + Filter().Missing("PIM"), + ) + out, err = qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits.Len(), ShouldEqual, 10) + So(out.Hits.Total, ShouldEqual, 12) + }) + + Convey("Search with query and filter", t, func() { + + out, err := Search("oilers").Size("25").Query( + Query().Fields("name", "*d*", "", ""), + ).Filter( + Filter().Terms("teams", TEMDefault, "STL"), + ).Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits.Len(), ShouldEqual, 2) + So(out.Hits.Total, ShouldEqual, 2) + }) + + Convey("Search with range", t, func() { + + out, err := Search("oilers").Size("25").Query( + Query().Range( + Filter().Range("dob", "19600101", nil, "19621231", nil, ""), + ).Search("*w*"), + ).Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits.Len(), ShouldEqual, 4) + So(out.Hits.Total, ShouldEqual, 4) + }) + + Convey("Search with sorting desc", t, func() { + + out, err := Search("oilers").Pretty().Query( + Query().All(), + ).Sort( + Sort("dob").Desc(), + ).Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits.Len(), ShouldEqual, 10) + So(out.Hits.Total, ShouldEqual, 14) + + b, err := out.Hits.Hits[0].Source.MarshalJSON() + h1 := gou.NewJsonHelper(b) + So(h1.String("name"), ShouldEqual, "Grant Fuhr") + }) + + Convey("Search with sorting asc", t, func() { + + out, err := Search("oilers").Pretty().Query( + Query().All(), + ).Sort( + Sort("dob"), + ).Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits.Len(), ShouldEqual, 10) + So(out.Hits.Total, ShouldEqual, 14) + + b, err := out.Hits.Hits[0].Source.MarshalJSON() + h1 := gou.NewJsonHelper(b) + So(h1.String("name"), ShouldEqual, "Pat Hughes") + }) + + Convey("Search with sorting desc with query", t, func() { + + out, err := Search("oilers").Pretty().Query( + Query().Search("*w*"), + ).Sort( + Sort("dob").Desc(), + ).Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits.Len(), ShouldEqual, 8) + So(out.Hits.Total, ShouldEqual, 8) + + b, err := out.Hits.Hits[0].Source.MarshalJSON() + h1 := gou.NewJsonHelper(b) + So(h1.String("name"), ShouldEqual, "Wayne Gretzky") + }) +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchsort.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchsort.go new file mode 100644 index 000000000..a8359173f --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchsort.go @@ -0,0 +1,52 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" +) + +// SortDsl accepts any number of Sort commands +// +// Query().Sort( +// Sort("last_name").Desc(), +// Sort("age"), +// ) +func Sort(field string) *SortDsl { + return &SortDsl{Name: field} +} + +type SortBody []interface{} +type SortDsl struct { + Name string + IsDesc bool +} + +func (s *SortDsl) Desc() *SortDsl { + s.IsDesc = true + return s +} +func (s *SortDsl) Asc() *SortDsl { + s.IsDesc = false + return s +} + +func (s *SortDsl) MarshalJSON() ([]byte, error) { + if s.IsDesc { + return json.Marshal(map[string]string{s.Name: "desc"}) + } + if s.Name == "_score" { + return []byte(`"_score"`), nil + } + return []byte(fmt.Sprintf(`"%s"`, s.Name)), nil // "user" assuming default = asc? +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/setup_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/setup_test.go new file mode 100644 index 000000000..026f2dc54 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/setup_test.go @@ -0,0 +1,84 @@ +package elastigo + +import ( + "testing" + "time" +) + +/* +// elastigo Conn adapter to avoid a circular dependency +type conn interface { + CreateIndex(name string) (interface{}, error) + DeleteIndex(name string) (interface{}, error) + + Index(index string, _type string, id string, args map[string]interface{}, data interface{}) (interface{}, error) +} +*/ + +func newIndexWorker(c *Conn, t *testing.T) func(interface{}) { + + return func(d interface{}) { + _, err := c.Index("oilers", "heyday", "", nil, d) + if err != nil { + t.Fatalf("Index failed: %s", err) + } + } +} + +func PopulateTestDB(t *testing.T, c *Conn) { + + // it is not technically necessary to create an index here + _, err := c.CreateIndex("oilers") + if err != nil { + t.Fatal("Error in CreateIndex", err) + } + + // set the mapping for dob to be a date so it can be used for range searches + _, err = c.DoCommand("PUT", "/oilers/heyday/_mapping?ignore_conflicts", nil, + string(`{"heyday": {"properties": { + "dob": {"type": "date", "format": "basic_date"}, + "pos": {"type": "string", "index": "not_analyzed"}, + "teams": {"type": "string", "index": "not_analyzed"} + }}}`)) + if err != nil { + t.Fatal("Error setting dob mapping", err) + } + + idx := newIndexWorker(c, t) + + idx(`{"name": "Mark Messier", "jersey": 11, "pos": "LW", "goals": 37, "PIM": 165, + "dob": "19610118", "teams": ["EDM", "NYR", "VAN"]}`) + idx(`{"name": "Wayne Gretzky", "jersey": 99, "pos": "C", "goals": 87, + "dob": "19610126", "teams": ["EDM", "NYR", "STL"]}`) + idx(`{"name": "Paul Coffey", "jersey": 7, "pos": "D", "goals": 40, + "dob": "19610601", "teams": ["EDM", "DET"]}`) + idx(`{"name": "Jari Kurri", "jersey": 17, "pos": "RW", "goals": 52, + "dob": "19600518", "teams": ["EDM", "VAN"]}`) + idx(`{"name": "Glenn Anderson", "jersey": 9, "pos": "RW", "goals": 54, + "dob": "19601002", "teams": ["EDM", "NYR", "TOR", "STL"]}`) + idx(`{"name": "Ken Linseman", "jersey": 13, "pos": "C", "goals": 18, + "dob": "19580811", "teams": ["EDM", "TOR"]}`) + idx(`{"name": "Pat Hughes", "jersey": 16, "pos": "RW", "goals": 27, + "dob": "19550325", "teams": ["EDM", "MTL", "PIT"]}`) + idx(`{"name": "Dave Hunter", "jersey": 12, "pos": "LW", "goals": 22, + "dob": "19580101", "teams": ["EDM", "PIT"]}`) + idx(`{"name": "Kevin Lowe", "jersey": 4, "pos": "D", "goals": 4, + "dob": "19590415", "teams": ["EDM", "NYR"]}`) + idx(`{"name": "Charlie Huddy", "jersey": 22, "pos": "D", "goals": 8, + "dob": "19590602", "teams": ["EDM", "BUF", "STL"]}`) + idx(`{"name": "Randy Gregg", "jersey": 21, "pos": "D", "goals": 13, + "dob": "19560219", "teams": ["EDM", "VAN"]}`) + idx(`{"name": "Dave Semenko", "jersey": 27, "pos": "LW", "goals": 4, "PIM": 118, + "dob": "19570712", "teams": ["EDM"]}`) + idx(`{"name": "Grant Fuhr", "jersey": 31, "pos": "G", "GAA": 3.91, + "dob": "19620928", "teams": ["EDM", "TOR", "BUF", "STL"]}`) + idx(`{"name": "Andy Moog", "jersey": 35, "pos": "G", "GAA": 3.77, + "dob": "19600218", "teams": ["EDM", "BOS", "DAL", "MTL"]}`) + + // HACK to let the ES magic happen + time.Sleep(time.Second) +} + +func TearDownTestDB(c *Conn) { + c.DeleteIndex("oilers") +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/shared.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/shared.go new file mode 100644 index 000000000..697a8adf2 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/shared.go @@ -0,0 +1,18 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +type OneTermQuery struct { + Query struct { + Term string `json:"term"` + } `json:"query"` +} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/shared_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/shared_test.go new file mode 100644 index 000000000..2a3d8de41 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/shared_test.go @@ -0,0 +1,43 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "flag" + "log" + "encoding/json" +) + +var ( + _ = log.Ldate + eshost *string = flag.String("host", "localhost", "Elasticsearch Server Host Address") + logLevel *string = flag.String("logging", "info", "Which log level: [debug,info,warn,error,fatal]") +) + +func GetJson(input interface{}) (map[string]interface{}, error) { + var result map[string]interface{} + bytes, err := json.Marshal(input) + + if err == nil { + err = json.Unmarshal(bytes, &result) + } + + return result, err +} + +func HasKey(input map[string]interface{}, key string) bool { + if _, ok := input[key]; ok { + return true + } + + return false +} \ No newline at end of file diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/snapshot.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/snapshot.go new file mode 100644 index 000000000..5b106d193 --- /dev/null +++ b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/snapshot.go @@ -0,0 +1,120 @@ +// Copyright 2015 Niels Freier +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "fmt" + "time" +) + +type GetSnapshotsResponse struct { + Snapshots []struct { + Snapshot string `json:"snapshot"` + Indices []string `json:"indices"` + State string `json:"state"` + StartTime time.Time `json:"start_time"` + EndTime time.Time `json:"end_time"` + } `json:"snapshots"` +} + +// CreateSnapshotRepository creates a new snapshot repository on the cluster +// http://www.elastic.co/guide/en/elasticsearch/reference/1.3/modules-snapshots.html +func (c *Conn) CreateSnapshotRepository(name string, args map[string]interface{}, settings interface{}) (BaseResponse, error) { + var url string + var retval BaseResponse + url = fmt.Sprintf("/_snapshot/%s", name) + body, err := c.DoCommand("POST", url, args, settings) + if err != nil { + return retval, err + } + if err == nil { + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + + return retval, nil + +} + +// TakeSnapshot takes a snapshot of the current state of the cluster with a specific name and for a existing repositoriy +// http://www.elastic.co/guide/en/elasticsearch/reference/1.3/modules-snapshots.html +func (c *Conn) TakeSnapshot(repository, name string, args map[string]interface{}, query interface{}) (BaseResponse, error) { + var url string + var retval BaseResponse + url = fmt.Sprintf("/_snapshot/%s/%s", repository, name) + body, err := c.DoCommand("PUT", url, args, query) + if err != nil { + return retval, err + } + if err == nil { + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + + return retval, nil +} + +// RestoreSnapshot restores a snapshot of the current state of the cluster with a specific name and for a existing repositoriy +// http://www.elastic.co/guide/en/elasticsearch/reference/1.3/modules-snapshots.html +func (c *Conn) RestoreSnapshot(repository, name string, args map[string]interface{}, query interface{}) (BaseResponse, error) { + var url string + var retval BaseResponse + url = fmt.Sprintf("/_snapshot/%s/%s/_restore", repository, name) + body, err := c.DoCommand("POST", url, args, query) + if err != nil { + return retval, err + } + if err == nil { + fmt.Println(string(body)) + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + + return retval, nil +} + +// GetSnapshots returns all snapshot of the specified name for a specific repository +// http://www.elastic.co/guide/en/elasticsearch/reference/1.3/modules-snapshots.html +func (c *Conn) GetSnapshotByName(repository, name string, args map[string]interface{}) (GetSnapshotsResponse, error) { + return c.getSnapshots(repository, name, args) +} + +// GetSnapshots returns all snapshot for a specific repository +// http://www.elastic.co/guide/en/elasticsearch/reference/1.3/modules-snapshots.html +func (c *Conn) GetSnapshots(repository string, args map[string]interface{}) (GetSnapshotsResponse, error) { + return c.getSnapshots(repository, "_all", args) +} + +func (c *Conn) getSnapshots(repository, name string, args map[string]interface{}) (GetSnapshotsResponse, error) { + var url string + var retval GetSnapshotsResponse + url = fmt.Sprintf("/_snapshot/%s/%s", repository, name) + body, err := c.DoCommand("GET", url, args, nil) + if err != nil { + return retval, err + } + if err == nil { + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + + return retval, nil +} diff --git a/services/templeton/vendor/src/github.com/olebedev/config/LICENSE b/services/templeton/vendor/src/github.com/olebedev/config/LICENSE new file mode 100644 index 000000000..c09d13e56 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olebedev/config/LICENSE @@ -0,0 +1,20 @@ +JSON or YAML configuration wrapper with convenient access methods +Copyright (C) 2014 Oleg Lebedev + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/services/templeton/vendor/src/github.com/olebedev/config/README.md b/services/templeton/vendor/src/github.com/olebedev/config/README.md new file mode 100644 index 000000000..b6454b7c5 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olebedev/config/README.md @@ -0,0 +1,17 @@ +# Config [![wercker status](https://app.wercker.com/status/b4e8561d9a711afcb016bf0018e83897/s/ "wercker status")](https://app.wercker.com/project/bykey/b4e8561d9a711afcb016bf0018e83897) [![GoDoc](https://godoc.org/github.com/olebedev/config?status.png)](https://godoc.org/github.com/olebedev/config) + +Package config provides convenient access methods to configuration +stored as JSON or YAML. + +This is a fork of the [original version](https://github.com/moraes/config). +This version extends the functionality of the original without losing compatibility. +Major features added: + +- [`Set(path string, value interface{}) error`](http://godoc.org/github.com/olebedev/config#Config.Set) method +- [`Env() *config.Config`](http://godoc.org/github.com/olebedev/config#Config.Env) method, for OS environment variables parsing +- [`Flag() *config.Config`](http://godoc.org/github.com/olebedev/config#Config.Flag) method, for command line arguments parsing +- [`U*`](https://godoc.org/github.com/olebedev/config#Config.UBool) methods +- [`Copy(...path) (*config.config, error)`](https://godoc.org/github.com/olebedev/config#Config.Copy) method +- [`Extend(*config.Config) (*config.Config, error)`](https://godoc.org/github.com/olebedev/config#Config.Extend) method + +Example and more information you can find [here](http://godoc.org/github.com/olebedev/config). diff --git a/services/templeton/vendor/src/github.com/olebedev/config/config.go b/services/templeton/vendor/src/github.com/olebedev/config/config.go new file mode 100644 index 000000000..fbe1472fc --- /dev/null +++ b/services/templeton/vendor/src/github.com/olebedev/config/config.go @@ -0,0 +1,583 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package config + +import ( + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "strconv" + "strings" + "syscall" + + "gopkg.in/yaml.v2" +) + +// Config --------------------------------------------------------------------- + +// Config represents a configuration with convenient access methods. +type Config struct { + Root interface{} +} + +// Get returns a nested config according to a dotted path. +func (cfg *Config) Get(path string) (*Config, error) { + n, err := Get(cfg.Root, path) + if err != nil { + return nil, err + } + return &Config{Root: n}, nil +} + +// Set a nested config according to a dotted path. +func (cfg *Config) Set(path string, val interface{}) error { + return Set(cfg.Root, path, val) +} + +// Fetch data from system env, based on existing config keys. +func (cfg *Config) Env() *Config { + keys := getKeys(cfg.Root) + for _, key := range keys { + if val, exist := syscall.Getenv(strings.ToUpper(strings.Join(key, "_"))); exist { + cfg.Set(strings.Join(key, "."), val) + } + } + return cfg +} + +// Parse command line arguments, based on existing config keys. +func (cfg *Config) Flag() *Config { + keys := getKeys(cfg.Root) + hash := map[string]*string{} + for _, key := range keys { + k := strings.Join(key, "-") + hash[k] = new(string) + val, _ := cfg.String(k) + flag.StringVar(hash[k], k, val, "") + } + + flag.Parse() + + flag.Visit(func(f *flag.Flag) { + name := strings.Replace(f.Name, "-", ".", -1) + cfg.Set(name, f.Value.String()) + }) + + return cfg +} + +// Get all keys for given interface +func getKeys(source interface{}, base ...string) [][]string { + acc := [][]string{} + switch c := source.(type) { + case map[string]interface{}: + for k, v := range c { + acc = append(acc, getKeys(v, append(base, k)...)...) + } + case []interface{}: + for i, v := range c { + k := strconv.Itoa(i) + acc = append(acc, getKeys(v, append(base, k)...)...) + } + default: + acc = append(acc, base) + } + return acc +} + +// Bool returns a bool according to a dotted path. +func (cfg *Config) Bool(path string) (bool, error) { + n, err := Get(cfg.Root, path) + if err != nil { + return false, err + } + switch n := n.(type) { + case bool: + return n, nil + case string: + return strconv.ParseBool(n) + } + return false, typeMismatch("bool or string", n) +} + +// UBool retirns a bool according to a dotted path or default value or false. +func (c *Config) UBool(path string, defaults ...bool) bool { + value, err := c.Bool(path) + + if err == nil { + return value + } + + for _, def := range defaults { + return def + } + return false +} + +// Float64 returns a float64 according to a dotted path. +func (cfg *Config) Float64(path string) (float64, error) { + n, err := Get(cfg.Root, path) + if err != nil { + return 0, err + } + switch n := n.(type) { + case float64: + return n, nil + case int: + return float64(n), nil + case string: + return strconv.ParseFloat(n, 64) + } + return 0, typeMismatch("float64, int or string", n) +} + +// UFloat64 returns a float64 according to a dotted path or default value or 0. +func (c *Config) UFloat64(path string, defaults ...float64) float64 { + value, err := c.Float64(path) + + if err == nil { + return value + } + + for _, def := range defaults { + return def + } + return float64(0) +} + +// Int returns an int according to a dotted path. +func (cfg *Config) Int(path string) (int, error) { + n, err := Get(cfg.Root, path) + if err != nil { + return 0, err + } + switch n := n.(type) { + case float64: + // encoding/json unmarshals numbers into floats, so we compare + // the string representation to see if we can return an int. + if i := int(n); fmt.Sprint(i) == fmt.Sprint(n) { + return i, nil + } else { + return 0, fmt.Errorf("Value can't be converted to int: %v", n) + } + case int: + return n, nil + case string: + if v, err := strconv.ParseInt(n, 10, 0); err == nil { + return int(v), nil + } else { + return 0, err + } + } + return 0, typeMismatch("float64, int or string", n) +} + +// UInt returns an int according to a dotted path or default value or 0. +func (c *Config) UInt(path string, defaults ...int) int { + value, err := c.Int(path) + + if err == nil { + return value + } + + for _, def := range defaults { + return def + } + return 0 +} + +// List returns a []interface{} according to a dotted path. +func (cfg *Config) List(path string) ([]interface{}, error) { + n, err := Get(cfg.Root, path) + if err != nil { + return nil, err + } + if value, ok := n.([]interface{}); ok { + return value, nil + } + return nil, typeMismatch("[]interface{}", n) +} + +// UList returns a []interface{} according to a dotted path or defaults or []interface{}. +func (c *Config) UList(path string, defaults ...[]interface{}) []interface{} { + value, err := c.List(path) + + if err == nil { + return value + } + + for _, def := range defaults { + return def + } + return make([]interface{}, 0) +} + +// Map returns a map[string]interface{} according to a dotted path. +func (cfg *Config) Map(path string) (map[string]interface{}, error) { + n, err := Get(cfg.Root, path) + if err != nil { + return nil, err + } + if value, ok := n.(map[string]interface{}); ok { + return value, nil + } + return nil, typeMismatch("map[string]interface{}", n) +} + +// UMap returns a map[string]interface{} according to a dotted path or default or map[string]interface{}. +func (c *Config) UMap(path string, defaults ...map[string]interface{}) map[string]interface{} { + value, err := c.Map(path) + + if err == nil { + return value + } + + for _, def := range defaults { + return def + } + return map[string]interface{}{} +} + +// String returns a string according to a dotted path. +func (cfg *Config) String(path string) (string, error) { + n, err := Get(cfg.Root, path) + if err != nil { + return "", err + } + switch n := n.(type) { + case bool, float64, int: + return fmt.Sprint(n), nil + case string: + return n, nil + } + return "", typeMismatch("bool, float64, int or string", n) +} + +// UString returns a string according to a dotted path or default or "". +func (c *Config) UString(path string, defaults ...string) string { + value, err := c.String(path) + + if err == nil { + return value + } + + for _, def := range defaults { + return def + } + return "" +} + +// Copy returns a deep copy with given path or without. +func (c *Config) Copy(dottedPath ...string) (*Config, error) { + toJoin := []string{} + for _, part := range dottedPath { + if len(part) != 0 { + toJoin = append(toJoin, part) + } + } + + var err error + var path = strings.Join(toJoin, ".") + var cfg = c + var root = "" + + if len(path) > 0 { + if cfg, err = c.Get(path); err != nil { + return nil, err + } + } + + if root, err = RenderYaml(cfg.Root); err != nil { + return nil, err + } + return ParseYaml(root) +} + +// Extend returns extended copy of current config with applied +// values from the given config instance. Note that if you extend +// with different structure you will get an error. See: `.Set()` method +// for details. +func (c *Config) Extend(cfg *Config) (*Config, error) { + n, err := c.Copy() + if err != nil { + return nil, err + } + + keys := getKeys(cfg.Root) + for _, key := range keys { + k := strings.Join(key, ".") + i, err := Get(cfg.Root, k) + if err != nil { + return nil, err + } + if err := n.Set(k, i); err != nil { + return nil, err + } + } + return n, nil +} + +// typeMismatch returns an error for an expected type. +func typeMismatch(expected string, got interface{}) error { + return fmt.Errorf("Type mismatch: expected %s; got %T", expected, got) +} + +// Fetching ------------------------------------------------------------------- + +// Get returns a child of the given value according to a dotted path. +func Get(cfg interface{}, path string) (interface{}, error) { + parts := strings.Split(path, ".") + // Normalize path. + for k, v := range parts { + if v == "" { + if k == 0 { + parts = parts[1:] + } else { + return nil, fmt.Errorf("Invalid path %q", path) + } + } + } + // Get the value. + for pos, part := range parts { + switch c := cfg.(type) { + case []interface{}: + if i, error := strconv.ParseInt(part, 10, 0); error == nil { + if int(i) < len(c) { + cfg = c[i] + } else { + return nil, fmt.Errorf( + "Index out of range at %q: list has only %v items", + strings.Join(parts[:pos+1], "."), len(c)) + } + } else { + return nil, fmt.Errorf("Invalid list index at %q", + strings.Join(parts[:pos+1], ".")) + } + case map[string]interface{}: + if value, ok := c[part]; ok { + cfg = value + } else { + return nil, fmt.Errorf("Nonexistent map key at %q", + strings.Join(parts[:pos+1], ".")) + } + default: + return nil, fmt.Errorf( + "Invalid type at %q: expected []interface{} or map[string]interface{}; got %T", + strings.Join(parts[:pos+1], "."), cfg) + } + } + + return cfg, nil +} + +// Set returns an error, in case when it is not possible to +// establish the value obtained in accordance with given dotted path. +func Set(cfg interface{}, path string, value interface{}) error { + parts := strings.Split(path, ".") + // Normalize path. + for k, v := range parts { + if v == "" { + if k == 0 { + parts = parts[1:] + } else { + return fmt.Errorf("Invalid path %q", path) + } + } + } + + point := &cfg + for pos, part := range parts { + switch c := (*point).(type) { + case []interface{}: + if i, error := strconv.ParseInt(part, 10, 0); error == nil { + // 1. normalize slice capacity + if int(i) >= cap(c) { + c = append(c, make([]interface{}, int(i)-cap(c)+1, int(i)-cap(c)+1)...) + } + + // 2. set value or go further + if pos+1 == len(parts) { + c[i] = value + } else { + + // if exists just pick the pointer + if va := c[i]; va != nil { + point = &va + } else { + // is next part slice or map? + if i, err := strconv.ParseInt(parts[pos+1], 10, 0); err == nil { + va = make([]interface{}, int(i)+1, int(i)+1) + } else { + va = make(map[string]interface{}) + } + c[i] = va + point = &va + } + + } + + } else { + return fmt.Errorf("Invalid list index at %q", + strings.Join(parts[:pos+1], ".")) + } + case map[string]interface{}: + if pos+1 == len(parts) { + c[part] = value + } else { + // if exists just pick the pointer + if va, ok := c[part]; ok { + point = &va + } else { + // is next part slice or map? + if i, err := strconv.ParseInt(parts[pos+1], 10, 0); err == nil { + va = make([]interface{}, int(i)+1, int(i)+1) + } else { + va = make(map[string]interface{}) + } + c[part] = va + point = &va + } + } + default: + return fmt.Errorf( + "Invalid type at %q: expected []interface{} or map[string]interface{}; got %T", + strings.Join(parts[:pos+1], "."), cfg) + } + } + + return nil +} + +// Parsing -------------------------------------------------------------------- + +// Must is a wrapper for parsing functions to be used during initialization. +// It panics on failure. +func Must(cfg *Config, err error) *Config { + if err != nil { + panic(err) + } + return cfg +} + +// normalizeValue normalizes a unmarshalled value. This is needed because +// encoding/json doesn't support marshalling map[interface{}]interface{}. +func normalizeValue(value interface{}) (interface{}, error) { + switch value := value.(type) { + case map[interface{}]interface{}: + node := make(map[string]interface{}, len(value)) + for k, v := range value { + key, ok := k.(string) + if !ok { + return nil, fmt.Errorf("Unsupported map key: %#v", k) + } + item, err := normalizeValue(v) + if err != nil { + return nil, fmt.Errorf("Unsupported map value: %#v", v) + } + node[key] = item + } + return node, nil + case map[string]interface{}: + node := make(map[string]interface{}, len(value)) + for key, v := range value { + item, err := normalizeValue(v) + if err != nil { + return nil, fmt.Errorf("Unsupported map value: %#v", v) + } + node[key] = item + } + return node, nil + case []interface{}: + node := make([]interface{}, len(value)) + for key, v := range value { + item, err := normalizeValue(v) + if err != nil { + return nil, fmt.Errorf("Unsupported list item: %#v", v) + } + node[key] = item + } + return node, nil + case bool, float64, int, string, nil: + return value, nil + } + return nil, fmt.Errorf("Unsupported type: %T", value) +} + +// JSON ----------------------------------------------------------------------- + +// ParseJson reads a JSON configuration from the given string. +func ParseJson(cfg string) (*Config, error) { + return parseJson([]byte(cfg)) +} + +// ParseJsonFile reads a JSON configuration from the given filename. +func ParseJsonFile(filename string) (*Config, error) { + cfg, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return parseJson(cfg) +} + +// parseJson performs the real JSON parsing. +func parseJson(cfg []byte) (*Config, error) { + var out interface{} + var err error + if err = json.Unmarshal(cfg, &out); err != nil { + return nil, err + } + if out, err = normalizeValue(out); err != nil { + return nil, err + } + return &Config{Root: out}, nil +} + +// RenderJson renders a JSON configuration. +func RenderJson(cfg interface{}) (string, error) { + b, err := json.Marshal(cfg) + if err != nil { + return "", err + } + return string(b), nil +} + +// YAML ----------------------------------------------------------------------- + +// ParseYaml reads a YAML configuration from the given string. +func ParseYaml(cfg string) (*Config, error) { + return parseYaml([]byte(cfg)) +} + +// ParseYamlFile reads a YAML configuration from the given filename. +func ParseYamlFile(filename string) (*Config, error) { + cfg, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return parseYaml(cfg) +} + +// parseYaml performs the real YAML parsing. +func parseYaml(cfg []byte) (*Config, error) { + var out interface{} + var err error + if err = yaml.Unmarshal(cfg, &out); err != nil { + return nil, err + } + if out, err = normalizeValue(out); err != nil { + return nil, err + } + return &Config{Root: out}, nil +} + +// RenderYaml renders a YAML configuration. +func RenderYaml(cfg interface{}) (string, error) { + b, err := yaml.Marshal(cfg) + if err != nil { + return "", err + } + return string(b), nil +} diff --git a/services/templeton/vendor/src/github.com/olebedev/config/config_test.go b/services/templeton/vendor/src/github.com/olebedev/config/config_test.go new file mode 100644 index 000000000..ae4f79f4e --- /dev/null +++ b/services/templeton/vendor/src/github.com/olebedev/config/config_test.go @@ -0,0 +1,486 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package config + +import ( + "os" + "reflect" + "testing" +) + +var yamlString = ` +map: + key0: true + key1: false + key2: "true" + key3: "false" + key4: 4.2 + key5: "4.2" + key6: 42 + key7: "42" + key8: value8 +list: + - true + - false + - "true" + - "false" + - 4.3 + - "4.3" + - 43 + - "43" + - item8 +config: + server: + - www.google.com + - www.cnn.com + - www.example.com + admin: + - username: calvin + password: yukon + - username: hobbes + password: tuna +messages: + - | + Welcome + + back! + - > + Farewell, + + my friend! +` + +var configTests = []struct { + path string + kind string + want interface{} + ok bool +}{ + // ok + {"map.key0", "Bool", true, true}, + {"map.key0", "String", "true", true}, + // bad + {"map.key0.foo", "Bool", "", false}, + {"map.key0", "Float64", "", false}, + {"map.key0", "Int", "", false}, + // ok + {"map.key1", "Bool", false, true}, + {"map.key1", "String", "false", true}, + // bad + {"map.key1", "Float64", "", false}, + {"map.key1", "Int", "", false}, + // ok + {"map.key2", "Bool", true, true}, + {"map.key2", "String", "true", true}, + // bad + {"map.key2", "Float64", "", false}, + {"map.key2", "Int", "", false}, + // ok + {"map.key3", "Bool", false, true}, + {"map.key3", "String", "false", true}, + // bad + {"map.key3", "Float64", "", false}, + {"map.key3", "Int", "", false}, + // ok + {"map.key4", "Float64", 4.2, true}, + {"map.key4", "String", "4.2", true}, + // bad + {"map.key4", "Bool", "", false}, + {"map.key4", "Int", "", false}, + // ok + {"map.key5", "Float64", 4.2, true}, + {"map.key5", "String", "4.2", true}, + // bad + {"map.key5", "Bool", "", false}, + {"map.key5", "Int", "", false}, + // ok + {"map.key6", "Float64", float64(42), true}, + {"map.key6", "Int", 42, true}, + {"map.key6", "String", "42", true}, + // bad + {"map.key6", "Bool", "", false}, + // ok + {"map.key7", "Float64", float64(42), true}, + {"map.key7", "Int", 42, true}, + {"map.key7", "String", "42", true}, + // bad + {"map.key7", "Bool", "", false}, + // ok + {"map.key8", "String", "value8", true}, + // bad + {"map.key8", "Bool", "", false}, + {"map.key8", "Float64", "", false}, + {"map.key8", "Int", "", false}, + // bad + {"map.key9", "Bool", "", false}, + {"map.key9", "Float64", "", false}, + {"map.key9", "Int", "", false}, + {"map.key9", "String", "", false}, + + // ok + {"list.0", "Bool", true, true}, + {"list.0", "String", "true", true}, + // bad + {"list.0", "Float64", "", false}, + {"list.0", "Int", "", false}, + // ok + {"list.1", "Bool", false, true}, + {"list.1", "String", "false", true}, + // bad + {"list.1", "Float64", "", false}, + {"list.1", "Int", "", false}, + // ok + {"list.2", "Bool", true, true}, + {"list.2", "String", "true", true}, + // bad + {"list.2", "Float64", "", false}, + {"list.2", "Int", "", false}, + // ok + {"list.3", "Bool", false, true}, + {"list.3", "String", "false", true}, + // bad + {"list.3", "Float64", "", false}, + {"list.3", "Int", "", false}, + // ok + {"list.4", "Float64", 4.3, true}, + {"list.4", "String", "4.3", true}, + // bad + {"list.4", "Bool", "", false}, + {"list.4", "Int", "", false}, + // ok + {"list.5", "Float64", 4.3, true}, + {"list.5", "String", "4.3", true}, + // bad + {"list.5", "Bool", "", false}, + {"list.5", "Int", "", false}, + // ok + {"list.6", "Float64", float64(43), true}, + {"list.6", "Int", 43, true}, + {"list.6", "String", "43", true}, + // bad + {"list.6", "Bool", "", false}, + // ok + {"list.7", "Float64", float64(43), true}, + {"list.7", "Int", 43, true}, + {"list.7", "String", "43", true}, + // bad + {"list.7", "Bool", "", false}, + // ok + {"list.8", "String", "item8", true}, + // bad + {"list.8", "Bool", "", false}, + {"list.8", "Float64", "", false}, + {"list.8", "Int", "", false}, + // bad + {"list.9", "Bool", "", false}, + {"list.9", "Float64", "", false}, + {"list.9", "Int", "", false}, + {"list.9", "String", "", false}, + + // ok + {"config.server.0", "String", "www.google.com", true}, + {"config.server.1", "String", "www.cnn.com", true}, + {"config.server.2", "String", "www.example.com", true}, + // bad + {"config.server.3", "Bool", "", false}, + {"config.server.3", "Float64", "", false}, + {"config.server.3", "Int", "", false}, + {"config.server.3", "String", "", false}, + + // ok + {"config.admin.0.username", "String", "calvin", true}, + {"config.admin.0.password", "String", "yukon", true}, + {"config.admin.1.username", "String", "hobbes", true}, + {"config.admin.1.password", "String", "tuna", true}, + // bad + {"config.admin.0.country", "Bool", "", false}, + {"config.admin.0.country", "Float64", "", false}, + {"config.admin.0.country", "Int", "", false}, + {"config.admin.0.country", "String", "", false}, + + // ok + {"messages.0", "String", "Welcome\n\nback!\n", true}, + {"messages.1", "String", "Farewell,\nmy friend!\n", true}, + // bad + {"messages.2", "Bool", "", false}, + {"messages.2", "Float64", "", false}, + {"messages.2", "Int", "", false}, + {"messages.2", "String", "", false}, + + // ok + {"config.server", "List", []interface{}{"www.google.com", "www.cnn.com", "www.example.com"}, true}, + {"config.admin.0", "Map", map[string]interface{}{"username": "calvin", "password": "yukon"}, true}, + {"config.admin.1", "Map", map[string]interface{}{"username": "hobbes", "password": "tuna"}, true}, +} + +func TestYamlConfig(t *testing.T) { + cfg, err := ParseYaml(yamlString) + if err != nil { + t.Fatal(err) + } + str, err := RenderYaml(cfg.Root) + if err != nil { + t.Fatal(err) + } + cfg, err = ParseYaml(str) + if err != nil { + t.Fatal(err) + } + testConfig(t, cfg) +} + +func TestJsonConfig(t *testing.T) { + cfg, err := ParseYaml(yamlString) + if err != nil { + t.Fatal(err) + } + str, err := RenderJson(cfg.Root) + if err != nil { + t.Fatal(err) + } + cfg, err = ParseJson(str) + if err != nil { + t.Fatal(err) + } + testConfig(t, cfg) +} + +func TestSet(t *testing.T) { + cfg, err := ParseYaml(yamlString) + if err != nil { + t.Fatal(err) + } + val := "test" + err = cfg.Set("map.key8", val) + if v, _ := cfg.String("map.key8"); v != val { + t.Errorf(`%s(%T) != "%s(%T)"`, v, v, val, val) + } +} + +func TestSetUnexistingValue(t *testing.T) { + cfg, err := ParseYaml(yamlString) + if err != nil { + t.Fatal(err) + } + + val := "test" + + err = cfg.Set("some.one", val) + v, _ := cfg.String("some.one") + expect(t, v, val) + + err = cfg.Set("some.thing.10", val) + v, _ = cfg.String("some.thing.10") + expect(t, v, val) + // try to set by string key into slice + expect(t, cfg.Set("some.thing.more", val) != nil, true) +} + +func TestEnv(t *testing.T) { + cfg, err := ParseYaml(yamlString) + if err != nil { + t.Fatal(err) + } + val := "test" + cfg.Set("map.key8", val) + os.Setenv("MAP_KEY8", val) + cfg.Env() + test, _ := cfg.String("map.key8") + if test != val { + t.Errorf(`"%s" != "%s"`, test, val) + } +} + +func TestUMethods(t *testing.T) { + cfg, err := ParseYaml(yamlString) + if err != nil { + t.Fatal(err) + } + + // UString + expect(t, cfg.UString("map.key8"), "value8") + expect(t, cfg.UString("map.key8", "default"), "value8") + expect(t, cfg.UString("map.undefined", "default"), "default") + expect(t, cfg.UString("map.undefined"), "") + + // UBool + expect(t, cfg.UBool("map.key0"), true) + expect(t, cfg.UBool("map.key0", false), true) + expect(t, cfg.UBool("map.undefined", true), true) + expect(t, cfg.UBool("map.undefined"), false) + + // UFloat64 + expect(t, cfg.UFloat64("map.key4"), float64(4.2)) + expect(t, cfg.UFloat64("map.key4", float64(1)), float64(4.2)) + expect(t, cfg.UFloat64("map.undefined", float64(0.99)), float64(0.99)) + expect(t, cfg.UFloat64("map.undefined"), float64(0)) + + // UInt + expect(t, cfg.UInt("map.key6"), 42) + expect(t, cfg.UInt("map.key6", 37), 42) + expect(t, cfg.UInt("map.undefined", 37), 37) + expect(t, cfg.UInt("map.undefined"), 0) + +} + +func TestCopy(t *testing.T) { + cfg, err := ParseYaml(yamlString) + if err != nil { + t.Fatal(err) + } + + cfg2, err := cfg.Copy() + expect(t, err, nil) + cfg2.Set("map.key6", 43) + + yaml1, _ := RenderYaml(cfg.Root) + yaml2, _ := RenderYaml(cfg2.Root) + + expect(t, yaml2 == yaml1, false) + + cfg3, err := cfg.Copy("config", "server") + expect(t, err, nil) + cfg4, err := cfg.Copy("config.server") + expect(t, err, nil) + + expect(t, cfg3.UString("0"), "www.google.com") + expect(t, cfg4.UString("0"), "www.google.com") + + yaml3, _ := RenderYaml(cfg3.Root) + yaml4, _ := RenderYaml(cfg4.Root) + expect(t, yaml3, yaml4) +} + +func TestExtendError(t *testing.T) { + cfg, err := ParseYaml(yamlString) + if err != nil { + t.Fatal(err) + } + cfg2, err := ParseYaml(` +list: + key0: true +map: + - true +`) + var nilCfg *Config + extended, err := cfg.Extend(cfg2) + expect(t, extended, nilCfg) + expect(t, err.Error(), "Invalid list index at \"list.key0\"") +} + +func TestExtend(t *testing.T) { + cfg, err := ParseYaml(yamlString) + if err != nil { + t.Fatal(err) + } + cfg2, err := ParseYaml(` +map: + key0: extend +list: + - extend +`) + + extended, err := cfg.Extend(cfg2) + expect(t, err, nil) + // immutable + expect(t, cfg.UBool("map.key0"), true) + expect(t, cfg.UBool("list.0"), true) + + expect(t, cfg2.UString("map.key8", "not found"), "not found") + expect(t, cfg2.UInt("list.8", 7), 7) + + // result + expect(t, extended.UString("map.key0"), "extend") + expect(t, extended.UString("map.key8"), "value8") + expect(t, extended.UString("list.0"), "extend") + expect(t, extended.UString("list.8"), "item8") +} + +func testConfig(t *testing.T, cfg *Config) { +Loop: + for _, test := range configTests { + var got interface{} + var err error + switch test.kind { + case "Bool": + got, err = cfg.Bool(test.path) + case "Float64": + got, err = cfg.Float64(test.path) + case "Int": + got, err = cfg.Int(test.path) + case "List": + got, err = cfg.List(test.path) + case "Map": + got, err = cfg.Map(test.path) + case "String": + got, err = cfg.String(test.path) + default: + t.Errorf("Unsupported kind %q", test.kind) + continue Loop + } + if test.ok { + if err != nil { + t.Errorf(`%s(%q) = "%v", got error: %v`, test.kind, test.path, test.want, err) + } else { + ok := false + switch test.kind { + case "List": + ok = equalList(got, test.want) + case "Map": + ok = equalMap(got, test.want) + default: + ok = got == test.want + } + if !ok { + t.Errorf(`%s(%q) = "%v", want "%v"`, test.kind, test.path, test.want, got) + } + } + } else { + if err == nil { + t.Errorf("%s(%q): expected error", test.kind, test.path) + } + } + } +} + +func equalList(l1, l2 interface{}) bool { + v1, ok1 := l1.([]interface{}) + v2, ok2 := l2.([]interface{}) + if !ok1 || !ok2 { + return false + } + if len(v1) != len(v2) { + return false + } + for k, v := range v1 { + if v2[k] != v { + return false + } + } + return true +} + +func equalMap(m1, m2 interface{}) bool { + v1, ok1 := m1.(map[string]interface{}) + v2, ok2 := m2.(map[string]interface{}) + if !ok1 || !ok2 { + return false + } + if len(v1) != len(v2) { + return false + } + for k, v := range v1 { + if v2[k] != v { + return false + } + } + return true +} + +func expect(t *testing.T, a interface{}, b interface{}) { + if a != b { + t.Errorf("Expected %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) + } +} diff --git a/services/templeton/vendor/src/github.com/olebedev/config/doc.go b/services/templeton/vendor/src/github.com/olebedev/config/doc.go new file mode 100644 index 000000000..13c63b156 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olebedev/config/doc.go @@ -0,0 +1,139 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package config provides convenient access methods to configuration stored as +JSON or YAML. + +Let's start with a simple YAML example: + + development: + database: + host: localhost + users: + - name: calvin + password: yukon + - name: hobbes + password: tuna + production: + database: + host: 192.168.1.1 + +We can parse it using ParseYaml(), which will return a *Config instance on +success: + + cfg, err := config.ParseYaml(yamlString) + +An equivalent JSON configuration could be built using ParseJson(): + + cfg, err := config.ParseJson(jsonString) + +From now, we can retrieve configuration values using a path in dotted notation: + + // "localhost" + host, err := cfg.String("development.database.host") + + // or... + + // "192.168.1.1" + host, err := cfg.String("production.database.host") + +Besides String(), other types can be fetched directly: Bool(), Float64(), +Int(), Map() and List(). All these methods will return an error if the path +doesn't exist, or the value doesn't match or can't be converted to the +requested type. + +A nested configuration can be fetched using Get(). Here we get a new *Config +instance with a subset of the configuration: + + cfg, err := cfg.Get("development") + +Then the inner values are fetched relatively to the subset: + + // "localhost" + host, err := cfg.String("database.host") + +For lists, the dotted path must use an index to refer to a specific value. +To retrieve the information from a user stored in the configuration above: + + // map[string]interface{}{ ... } + user1, err := cfg.Map("development.users.0") + // map[string]interface{}{ ... } + user2, err := cfg.Map("development.users.1") + + // or... + + // "calvin" + name1, err := cfg.String("development.users.0.name") + // "hobbes" + name2, err := cfg.String("development.users.1.name") + +JSON or YAML strings can be created calling the appropriate Render*() +functions. Here's how we render a configuration like the one used in these +examples: + + cfg := map[string]interface{}{ + "development": map[string]interface{}{ + "database": map[string]interface{}{ + "host": "localhost", + }, + "users": []interface{}{ + map[string]interface{}{ + "name": "calvin", + "password": "yukon", + }, + map[string]interface{}{ + "name": "hobbes", + "password": "tuna", + }, + }, + }, + "production": map[string]interface{}{ + "database": map[string]interface{}{ + "host": "192.168.1.1", + }, + }, + } + + json, err := config.RenderJson(cfg) + + // or... + + yaml, err := config.RenderYaml(cfg) + +This results in a configuration string to be stored in a file or database. + +For more more convenience it can parse OS environment variables and command line arguments. + + cfg, err := config.ParseYaml(yamlString) + cfg.Env() + + // or + + cfg.Flag() + +We can also specify the order of parsing: + + cfg.Env().Flag() + + // or + + cfg.Flag().Env() + +In case of OS environment all existing at the moment of parsing keys will be scanned in OS environment, +but in uppercase and the separator will be `_` instead of a `.`. In case of flags separator will be `-`. +In case of command line arguments possible to use regular dot notation syntax for all keys. +For see existing keys we can run application with `-h`. + +We can use unsafe method to get value: + + // "" + cfg.UString("undefined.key") + + // or with default value + unsafeValue := cfg.UString("undefined.key", "default value") + +There is unsafe methods, like regular, but wuth prefix `U`. +*/ +package config diff --git a/services/templeton/vendor/src/github.com/olebedev/config/wercker.yml b/services/templeton/vendor/src/github.com/olebedev/config/wercker.yml new file mode 100644 index 000000000..4a47bb173 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olebedev/config/wercker.yml @@ -0,0 +1 @@ +box: wercker/golang@1.1.1 diff --git a/services/templeton/vendor/src/gopkg.in/yaml.v2/LICENSE b/services/templeton/vendor/src/gopkg.in/yaml.v2/LICENSE new file mode 100644 index 000000000..a68e67f01 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/yaml.v2/LICENSE @@ -0,0 +1,188 @@ + +Copyright (c) 2011-2014 - Canonical Inc. + +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/services/templeton/vendor/src/gopkg.in/yaml.v2/LICENSE.libyaml b/services/templeton/vendor/src/gopkg.in/yaml.v2/LICENSE.libyaml new file mode 100644 index 000000000..8da58fbf6 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/yaml.v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/services/templeton/vendor/src/gopkg.in/yaml.v2/README.md b/services/templeton/vendor/src/gopkg.in/yaml.v2/README.md new file mode 100644 index 000000000..7b8bd8670 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/yaml.v2/README.md @@ -0,0 +1,131 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/services/templeton/vendor/src/gopkg.in/yaml.v2/apic.go b/services/templeton/vendor/src/gopkg.in/yaml.v2/apic.go new file mode 100644 index 000000000..95ec014e8 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/yaml.v2/apic.go @@ -0,0 +1,742 @@ +package yaml + +import ( + "io" + "os" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// File read handler. +func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_file.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_file_read_handler + parser.input_file = file +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + return true +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// File write handler. +func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_file.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_file_write_handler + emitter.output_file = file +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } + return true +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } + return true +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } + return true +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } + return true +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } + return true +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compliler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/services/templeton/vendor/src/gopkg.in/yaml.v2/decode.go b/services/templeton/vendor/src/gopkg.in/yaml.v2/decode.go new file mode 100644 index 000000000..085cddc44 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/yaml.v2/decode.go @@ -0,0 +1,683 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + + if len(b) == 0 { + b = []byte{'\n'} + } + + yaml_parser_set_input_string(&p.parser, b) + + p.skip() + if p.event.typ != yaml_STREAM_START_EVENT { + panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return &p +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +func (p *parser) skip() { + if p.event.typ != yaml_NO_EVENT { + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + yaml_event_delete(&p.event) + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + switch p.event.typ { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) + } + panic("unreachable") +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.skip() + n.children = append(n.children, p.parse()) + if p.event.typ != yaml_DOCUMENT_END_EVENT { + panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + p.skip() + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.skip() + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.skip() + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.skip() + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[string]bool + mapType reflect.Type + terrors []string +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() +) + +func newDecoder() *decoder { + d := &decoder{mapType: defaultMapType} + d.aliases = make(map[string]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + an, ok := d.doc.anchors[n.value] + if !ok { + failf("unknown anchor '%s' referenced", n.value) + } + if d.aliases[n.value] { + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n.value] = true + good = d.unmarshal(an, out) + delete(d.aliases, n.value) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if s, ok := resolved.(string); ok && out.CanAddr() { + if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok { + err := u.UnmarshalText([]byte(s)) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + good = true + } else if resolved != nil { + out.SetString(n.value) + good = true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else { + out.Set(reflect.ValueOf(resolved)) + } + good = true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + good = true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + good = true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + good = true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + good = true + case int64: + out.SetFloat(float64(resolved)) + good = true + case uint64: + out.SetFloat(float64(resolved)) + good = true + case float64: + out.SetFloat(resolved) + good = true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + good = true + } + } + if !good { + d.terror(n, tag, out) + } + return good +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + out.Set(out.Slice(0, j)) + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + out.SetMapIndex(k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + inlineMap.SetMapIndex(name, value) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + an, ok := d.doc.anchors[n.value] + if ok && an.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + an, ok := d.doc.anchors[ni.value] + if ok && an.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/services/templeton/vendor/src/gopkg.in/yaml.v2/decode_test.go b/services/templeton/vendor/src/gopkg.in/yaml.v2/decode_test.go new file mode 100644 index 000000000..c159760b6 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/yaml.v2/decode_test.go @@ -0,0 +1,988 @@ +package yaml_test + +import ( + "errors" + . "gopkg.in/check.v1" + "gopkg.in/yaml.v2" + "math" + "net" + "reflect" + "strings" + "time" +) + +var unmarshalIntTest = 123 + +var unmarshalTests = []struct { + data string + value interface{} +}{ + { + "", + &struct{}{}, + }, { + "{}", &struct{}{}, + }, { + "v: hi", + map[string]string{"v": "hi"}, + }, { + "v: hi", map[string]interface{}{"v": "hi"}, + }, { + "v: true", + map[string]string{"v": "true"}, + }, { + "v: true", + map[string]interface{}{"v": true}, + }, { + "v: 10", + map[string]interface{}{"v": 10}, + }, { + "v: 0b10", + map[string]interface{}{"v": 2}, + }, { + "v: 0xA", + map[string]interface{}{"v": 10}, + }, { + "v: 4294967296", + map[string]int64{"v": 4294967296}, + }, { + "v: 0.1", + map[string]interface{}{"v": 0.1}, + }, { + "v: .1", + map[string]interface{}{"v": 0.1}, + }, { + "v: .Inf", + map[string]interface{}{"v": math.Inf(+1)}, + }, { + "v: -.Inf", + map[string]interface{}{"v": math.Inf(-1)}, + }, { + "v: -10", + map[string]interface{}{"v": -10}, + }, { + "v: -.1", + map[string]interface{}{"v": -0.1}, + }, + + // Simple values. + { + "123", + &unmarshalIntTest, + }, + + // Floats from spec + { + "canonical: 6.8523e+5", + map[string]interface{}{"canonical": 6.8523e+5}, + }, { + "expo: 685.230_15e+03", + map[string]interface{}{"expo": 685.23015e+03}, + }, { + "fixed: 685_230.15", + map[string]interface{}{"fixed": 685230.15}, + }, { + "neginf: -.inf", + map[string]interface{}{"neginf": math.Inf(-1)}, + }, { + "fixed: 685_230.15", + map[string]float64{"fixed": 685230.15}, + }, + //{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported + //{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails. + + // Bools from spec + { + "canonical: y", + map[string]interface{}{"canonical": true}, + }, { + "answer: NO", + map[string]interface{}{"answer": false}, + }, { + "logical: True", + map[string]interface{}{"logical": true}, + }, { + "option: on", + map[string]interface{}{"option": true}, + }, { + "option: on", + map[string]bool{"option": true}, + }, + // Ints from spec + { + "canonical: 685230", + map[string]interface{}{"canonical": 685230}, + }, { + "decimal: +685_230", + map[string]interface{}{"decimal": 685230}, + }, { + "octal: 02472256", + map[string]interface{}{"octal": 685230}, + }, { + "hexa: 0x_0A_74_AE", + map[string]interface{}{"hexa": 685230}, + }, { + "bin: 0b1010_0111_0100_1010_1110", + map[string]interface{}{"bin": 685230}, + }, { + "bin: -0b101010", + map[string]interface{}{"bin": -42}, + }, { + "decimal: +685_230", + map[string]int{"decimal": 685230}, + }, + + //{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported + + // Nulls from spec + { + "empty:", + map[string]interface{}{"empty": nil}, + }, { + "canonical: ~", + map[string]interface{}{"canonical": nil}, + }, { + "english: null", + map[string]interface{}{"english": nil}, + }, { + "~: null key", + map[interface{}]string{nil: "null key"}, + }, { + "empty:", + map[string]*bool{"empty": nil}, + }, + + // Flow sequence + { + "seq: [A,B]", + map[string]interface{}{"seq": []interface{}{"A", "B"}}, + }, { + "seq: [A,B,C,]", + map[string][]string{"seq": []string{"A", "B", "C"}}, + }, { + "seq: [A,1,C]", + map[string][]string{"seq": []string{"A", "1", "C"}}, + }, { + "seq: [A,1,C]", + map[string][]int{"seq": []int{1}}, + }, { + "seq: [A,1,C]", + map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, + }, + // Block sequence + { + "seq:\n - A\n - B", + map[string]interface{}{"seq": []interface{}{"A", "B"}}, + }, { + "seq:\n - A\n - B\n - C", + map[string][]string{"seq": []string{"A", "B", "C"}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string][]string{"seq": []string{"A", "1", "C"}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string][]int{"seq": []int{1}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, + }, + + // Literal block scalar + { + "scalar: | # Comment\n\n literal\n\n \ttext\n\n", + map[string]string{"scalar": "\nliteral\n\n\ttext\n"}, + }, + + // Folded block scalar + { + "scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n", + map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"}, + }, + + // Map inside interface with no type hints. + { + "a: {b: c}", + map[interface{}]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, + }, + + // Structs and type conversions. + { + "hello: world", + &struct{ Hello string }{"world"}, + }, { + "a: {b: c}", + &struct{ A struct{ B string } }{struct{ B string }{"c"}}, + }, { + "a: {b: c}", + &struct{ A *struct{ B string } }{&struct{ B string }{"c"}}, + }, { + "a: {b: c}", + &struct{ A map[string]string }{map[string]string{"b": "c"}}, + }, { + "a: {b: c}", + &struct{ A *map[string]string }{&map[string]string{"b": "c"}}, + }, { + "a:", + &struct{ A map[string]string }{}, + }, { + "a: 1", + &struct{ A int }{1}, + }, { + "a: 1", + &struct{ A float64 }{1}, + }, { + "a: 1.0", + &struct{ A int }{1}, + }, { + "a: 1.0", + &struct{ A uint }{1}, + }, { + "a: [1, 2]", + &struct{ A []int }{[]int{1, 2}}, + }, { + "a: 1", + &struct{ B int }{0}, + }, { + "a: 1", + &struct { + B int "a" + }{1}, + }, { + "a: y", + &struct{ A bool }{true}, + }, + + // Some cross type conversions + { + "v: 42", + map[string]uint{"v": 42}, + }, { + "v: -42", + map[string]uint{}, + }, { + "v: 4294967296", + map[string]uint64{"v": 4294967296}, + }, { + "v: -4294967296", + map[string]uint64{}, + }, + + // int + { + "int_max: 2147483647", + map[string]int{"int_max": math.MaxInt32}, + }, + { + "int_min: -2147483648", + map[string]int{"int_min": math.MinInt32}, + }, + { + "int_overflow: 9223372036854775808", // math.MaxInt64 + 1 + map[string]int{}, + }, + + // int64 + { + "int64_max: 9223372036854775807", + map[string]int64{"int64_max": math.MaxInt64}, + }, + { + "int64_max_base2: 0b111111111111111111111111111111111111111111111111111111111111111", + map[string]int64{"int64_max_base2": math.MaxInt64}, + }, + { + "int64_min: -9223372036854775808", + map[string]int64{"int64_min": math.MinInt64}, + }, + { + "int64_neg_base2: -0b111111111111111111111111111111111111111111111111111111111111111", + map[string]int64{"int64_neg_base2": -math.MaxInt64}, + }, + { + "int64_overflow: 9223372036854775808", // math.MaxInt64 + 1 + map[string]int64{}, + }, + + // uint + { + "uint_min: 0", + map[string]uint{"uint_min": 0}, + }, + { + "uint_max: 4294967295", + map[string]uint{"uint_max": math.MaxUint32}, + }, + { + "uint_underflow: -1", + map[string]uint{}, + }, + + // uint64 + { + "uint64_min: 0", + map[string]uint{"uint64_min": 0}, + }, + { + "uint64_max: 18446744073709551615", + map[string]uint64{"uint64_max": math.MaxUint64}, + }, + { + "uint64_max_base2: 0b1111111111111111111111111111111111111111111111111111111111111111", + map[string]uint64{"uint64_max_base2": math.MaxUint64}, + }, + { + "uint64_maxint64: 9223372036854775807", + map[string]uint64{"uint64_maxint64": math.MaxInt64}, + }, + { + "uint64_underflow: -1", + map[string]uint64{}, + }, + + // float32 + { + "float32_max: 3.40282346638528859811704183484516925440e+38", + map[string]float32{"float32_max": math.MaxFloat32}, + }, + { + "float32_nonzero: 1.401298464324817070923729583289916131280e-45", + map[string]float32{"float32_nonzero": math.SmallestNonzeroFloat32}, + }, + { + "float32_maxuint64: 18446744073709551615", + map[string]float32{"float32_maxuint64": float32(math.MaxUint64)}, + }, + { + "float32_maxuint64+1: 18446744073709551616", + map[string]float32{"float32_maxuint64+1": float32(math.MaxUint64 + 1)}, + }, + + // float64 + { + "float64_max: 1.797693134862315708145274237317043567981e+308", + map[string]float64{"float64_max": math.MaxFloat64}, + }, + { + "float64_nonzero: 4.940656458412465441765687928682213723651e-324", + map[string]float64{"float64_nonzero": math.SmallestNonzeroFloat64}, + }, + { + "float64_maxuint64: 18446744073709551615", + map[string]float64{"float64_maxuint64": float64(math.MaxUint64)}, + }, + { + "float64_maxuint64+1: 18446744073709551616", + map[string]float64{"float64_maxuint64+1": float64(math.MaxUint64 + 1)}, + }, + + // Overflow cases. + { + "v: 4294967297", + map[string]int32{}, + }, { + "v: 128", + map[string]int8{}, + }, + + // Quoted values. + { + "'1': '\"2\"'", + map[interface{}]interface{}{"1": "\"2\""}, + }, { + "v:\n- A\n- 'B\n\n C'\n", + map[string][]string{"v": []string{"A", "B\nC"}}, + }, + + // Explicit tags. + { + "v: !!float '1.1'", + map[string]interface{}{"v": 1.1}, + }, { + "v: !!null ''", + map[string]interface{}{"v": nil}, + }, { + "%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'", + map[string]interface{}{"v": 1}, + }, + + // Anchors and aliases. + { + "a: &x 1\nb: &y 2\nc: *x\nd: *y\n", + &struct{ A, B, C, D int }{1, 2, 1, 2}, + }, { + "a: &a {c: 1}\nb: *a", + &struct { + A, B struct { + C int + } + }{struct{ C int }{1}, struct{ C int }{1}}, + }, { + "a: &a [1, 2]\nb: *a", + &struct{ B []int }{[]int{1, 2}}, + }, { + "b: *a\na: &a {c: 1}", + &struct { + A, B struct { + C int + } + }{struct{ C int }{1}, struct{ C int }{1}}, + }, + + // Bug #1133337 + { + "foo: ''", + map[string]*string{"foo": new(string)}, + }, { + "foo: null", + map[string]string{"foo": ""}, + }, { + "foo: null", + map[string]interface{}{"foo": nil}, + }, + + // Ignored field + { + "a: 1\nb: 2\n", + &struct { + A int + B int "-" + }{1, 0}, + }, + + // Bug #1191981 + { + "" + + "%YAML 1.1\n" + + "--- !!str\n" + + `"Generic line break (no glyph)\n\` + "\n" + + ` Generic line break (glyphed)\n\` + "\n" + + ` Line separator\u2028\` + "\n" + + ` Paragraph separator\u2029"` + "\n", + "" + + "Generic line break (no glyph)\n" + + "Generic line break (glyphed)\n" + + "Line separator\u2028Paragraph separator\u2029", + }, + + // Struct inlining + { + "a: 1\nb: 2\nc: 3\n", + &struct { + A int + C inlineB `yaml:",inline"` + }{1, inlineB{2, inlineC{3}}}, + }, + + // Map inlining + { + "a: 1\nb: 2\nc: 3\n", + &struct { + A int + C map[string]int `yaml:",inline"` + }{1, map[string]int{"b": 2, "c": 3}}, + }, + + // bug 1243827 + { + "a: -b_c", + map[string]interface{}{"a": "-b_c"}, + }, + { + "a: +b_c", + map[string]interface{}{"a": "+b_c"}, + }, + { + "a: 50cent_of_dollar", + map[string]interface{}{"a": "50cent_of_dollar"}, + }, + + // Duration + { + "a: 3s", + map[string]time.Duration{"a": 3 * time.Second}, + }, + + // Issue #24. + { + "a: ", + map[string]string{"a": ""}, + }, + + // Base 60 floats are obsolete and unsupported. + { + "a: 1:1\n", + map[string]string{"a": "1:1"}, + }, + + // Binary data. + { + "a: !!binary gIGC\n", + map[string]string{"a": "\x80\x81\x82"}, + }, { + "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", + map[string]string{"a": strings.Repeat("\x90", 54)}, + }, { + "a: !!binary |\n " + strings.Repeat("A", 70) + "\n ==\n", + map[string]string{"a": strings.Repeat("\x00", 52)}, + }, + + // Ordered maps. + { + "{b: 2, a: 1, d: 4, c: 3, sub: {e: 5}}", + &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}}, + }, + + // Issue #39. + { + "a:\n b:\n c: d\n", + map[string]struct{ B interface{} }{"a": {map[interface{}]interface{}{"c": "d"}}}, + }, + + // Custom map type. + { + "a: {b: c}", + M{"a": M{"b": "c"}}, + }, + + // Support encoding.TextUnmarshaler. + { + "a: 1.2.3.4\n", + map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)}, + }, + { + "a: 2015-02-24T18:19:39Z\n", + map[string]time.Time{"a": time.Unix(1424801979, 0)}, + }, + + // Encode empty lists as zero-length slices. + { + "a: []", + &struct{ A []int }{[]int{}}, + }, + + // UTF-16-LE + { + "\xff\xfe\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00\n\x00", + M{"ñoño": "very yes"}, + }, + // UTF-16-LE with surrogate. + { + "\xff\xfe\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00 \x00=\xd8\xd4\xdf\n\x00", + M{"ñoño": "very yes 🟔"}, + }, + + // UTF-16-BE + { + "\xfe\xff\x00\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00\n", + M{"ñoño": "very yes"}, + }, + // UTF-16-BE with surrogate. + { + "\xfe\xff\x00\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00 \xd8=\xdf\xd4\x00\n", + M{"ñoño": "very yes 🟔"}, + }, +} + +type M map[interface{}]interface{} + +type inlineB struct { + B int + inlineC `yaml:",inline"` +} + +type inlineC struct { + C int +} + +func (s *S) TestUnmarshal(c *C) { + for _, item := range unmarshalTests { + t := reflect.ValueOf(item.value).Type() + var value interface{} + switch t.Kind() { + case reflect.Map: + value = reflect.MakeMap(t).Interface() + case reflect.String: + value = reflect.New(t).Interface() + case reflect.Ptr: + value = reflect.New(t.Elem()).Interface() + default: + c.Fatalf("missing case for %s", t) + } + err := yaml.Unmarshal([]byte(item.data), value) + if _, ok := err.(*yaml.TypeError); !ok { + c.Assert(err, IsNil) + } + if t.Kind() == reflect.String { + c.Assert(*value.(*string), Equals, item.value) + } else { + c.Assert(value, DeepEquals, item.value) + } + } +} + +func (s *S) TestUnmarshalNaN(c *C) { + value := map[string]interface{}{} + err := yaml.Unmarshal([]byte("notanum: .NaN"), &value) + c.Assert(err, IsNil) + c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true) +} + +var unmarshalErrorTests = []struct { + data, error string +}{ + {"v: !!float 'error'", "yaml: cannot decode !!str `error` as a !!float"}, + {"v: [A,", "yaml: line 1: did not find expected node content"}, + {"v:\n- [A,", "yaml: line 2: did not find expected node content"}, + {"a: *b\n", "yaml: unknown anchor 'b' referenced"}, + {"a: &a\n b: *a\n", "yaml: anchor 'a' value contains itself"}, + {"value: -", "yaml: block sequence entries are not allowed in this context"}, + {"a: !!binary ==", "yaml: !!binary value contains invalid base64 data"}, + {"{[.]}", `yaml: invalid map key: \[\]interface \{\}\{"\."\}`}, + {"{{.}}", `yaml: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`}, +} + +func (s *S) TestUnmarshalErrors(c *C) { + for _, item := range unmarshalErrorTests { + var value interface{} + err := yaml.Unmarshal([]byte(item.data), &value) + c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value)) + } +} + +var unmarshalerTests = []struct { + data, tag string + value interface{} +}{ + {"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}}, + {"_: [1,A]", "!!seq", []interface{}{1, "A"}}, + {"_: 10", "!!int", 10}, + {"_: null", "!!null", nil}, + {`_: BAR!`, "!!str", "BAR!"}, + {`_: "BAR!"`, "!!str", "BAR!"}, + {"_: !!foo 'BAR!'", "!!foo", "BAR!"}, +} + +var unmarshalerResult = map[int]error{} + +type unmarshalerType struct { + value interface{} +} + +func (o *unmarshalerType) UnmarshalYAML(unmarshal func(v interface{}) error) error { + if err := unmarshal(&o.value); err != nil { + return err + } + if i, ok := o.value.(int); ok { + if result, ok := unmarshalerResult[i]; ok { + return result + } + } + return nil +} + +type unmarshalerPointer struct { + Field *unmarshalerType "_" +} + +type unmarshalerValue struct { + Field unmarshalerType "_" +} + +func (s *S) TestUnmarshalerPointerField(c *C) { + for _, item := range unmarshalerTests { + obj := &unmarshalerPointer{} + err := yaml.Unmarshal([]byte(item.data), obj) + c.Assert(err, IsNil) + if item.value == nil { + c.Assert(obj.Field, IsNil) + } else { + c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) + c.Assert(obj.Field.value, DeepEquals, item.value) + } + } +} + +func (s *S) TestUnmarshalerValueField(c *C) { + for _, item := range unmarshalerTests { + obj := &unmarshalerValue{} + err := yaml.Unmarshal([]byte(item.data), obj) + c.Assert(err, IsNil) + c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) + c.Assert(obj.Field.value, DeepEquals, item.value) + } +} + +func (s *S) TestUnmarshalerWholeDocument(c *C) { + obj := &unmarshalerType{} + err := yaml.Unmarshal([]byte(unmarshalerTests[0].data), obj) + c.Assert(err, IsNil) + value, ok := obj.value.(map[interface{}]interface{}) + c.Assert(ok, Equals, true, Commentf("value: %#v", obj.value)) + c.Assert(value["_"], DeepEquals, unmarshalerTests[0].value) +} + +func (s *S) TestUnmarshalerTypeError(c *C) { + unmarshalerResult[2] = &yaml.TypeError{[]string{"foo"}} + unmarshalerResult[4] = &yaml.TypeError{[]string{"bar"}} + defer func() { + delete(unmarshalerResult, 2) + delete(unmarshalerResult, 4) + }() + + type T struct { + Before int + After int + M map[string]*unmarshalerType + } + var v T + data := `{before: A, m: {abc: 1, def: 2, ghi: 3, jkl: 4}, after: B}` + err := yaml.Unmarshal([]byte(data), &v) + c.Assert(err, ErrorMatches, ""+ + "yaml: unmarshal errors:\n"+ + " line 1: cannot unmarshal !!str `A` into int\n"+ + " foo\n"+ + " bar\n"+ + " line 1: cannot unmarshal !!str `B` into int") + c.Assert(v.M["abc"], NotNil) + c.Assert(v.M["def"], IsNil) + c.Assert(v.M["ghi"], NotNil) + c.Assert(v.M["jkl"], IsNil) + + c.Assert(v.M["abc"].value, Equals, 1) + c.Assert(v.M["ghi"].value, Equals, 3) +} + +type proxyTypeError struct{} + +func (v *proxyTypeError) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + var a int32 + var b int64 + if err := unmarshal(&s); err != nil { + panic(err) + } + if s == "a" { + if err := unmarshal(&b); err == nil { + panic("should have failed") + } + return unmarshal(&a) + } + if err := unmarshal(&a); err == nil { + panic("should have failed") + } + return unmarshal(&b) +} + +func (s *S) TestUnmarshalerTypeErrorProxying(c *C) { + type T struct { + Before int + After int + M map[string]*proxyTypeError + } + var v T + data := `{before: A, m: {abc: a, def: b}, after: B}` + err := yaml.Unmarshal([]byte(data), &v) + c.Assert(err, ErrorMatches, ""+ + "yaml: unmarshal errors:\n"+ + " line 1: cannot unmarshal !!str `A` into int\n"+ + " line 1: cannot unmarshal !!str `a` into int32\n"+ + " line 1: cannot unmarshal !!str `b` into int64\n"+ + " line 1: cannot unmarshal !!str `B` into int") +} + +type failingUnmarshaler struct{} + +var failingErr = errors.New("failingErr") + +func (ft *failingUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error { + return failingErr +} + +func (s *S) TestUnmarshalerError(c *C) { + err := yaml.Unmarshal([]byte("a: b"), &failingUnmarshaler{}) + c.Assert(err, Equals, failingErr) +} + +type sliceUnmarshaler []int + +func (su *sliceUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error { + var slice []int + err := unmarshal(&slice) + if err == nil { + *su = slice + return nil + } + + var intVal int + err = unmarshal(&intVal) + if err == nil { + *su = []int{intVal} + return nil + } + + return err +} + +func (s *S) TestUnmarshalerRetry(c *C) { + var su sliceUnmarshaler + err := yaml.Unmarshal([]byte("[1, 2, 3]"), &su) + c.Assert(err, IsNil) + c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1, 2, 3})) + + err = yaml.Unmarshal([]byte("1"), &su) + c.Assert(err, IsNil) + c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1})) +} + +// From http://yaml.org/type/merge.html +var mergeTests = ` +anchors: + list: + - &CENTER { "x": 1, "y": 2 } + - &LEFT { "x": 0, "y": 2 } + - &BIG { "r": 10 } + - &SMALL { "r": 1 } + +# All the following maps are equal: + +plain: + # Explicit keys + "x": 1 + "y": 2 + "r": 10 + label: center/big + +mergeOne: + # Merge one map + << : *CENTER + "r": 10 + label: center/big + +mergeMultiple: + # Merge multiple maps + << : [ *CENTER, *BIG ] + label: center/big + +override: + # Override + << : [ *BIG, *LEFT, *SMALL ] + "x": 1 + label: center/big + +shortTag: + # Explicit short merge tag + !!merge "<<" : [ *CENTER, *BIG ] + label: center/big + +longTag: + # Explicit merge long tag + ! "<<" : [ *CENTER, *BIG ] + label: center/big + +inlineMap: + # Inlined map + << : {"x": 1, "y": 2, "r": 10} + label: center/big + +inlineSequenceMap: + # Inlined map in sequence + << : [ *CENTER, {"r": 10} ] + label: center/big +` + +func (s *S) TestMerge(c *C) { + var want = map[interface{}]interface{}{ + "x": 1, + "y": 2, + "r": 10, + "label": "center/big", + } + + var m map[interface{}]interface{} + err := yaml.Unmarshal([]byte(mergeTests), &m) + c.Assert(err, IsNil) + for name, test := range m { + if name == "anchors" { + continue + } + c.Assert(test, DeepEquals, want, Commentf("test %q failed", name)) + } +} + +func (s *S) TestMergeStruct(c *C) { + type Data struct { + X, Y, R int + Label string + } + want := Data{1, 2, 10, "center/big"} + + var m map[string]Data + err := yaml.Unmarshal([]byte(mergeTests), &m) + c.Assert(err, IsNil) + for name, test := range m { + if name == "anchors" { + continue + } + c.Assert(test, Equals, want, Commentf("test %q failed", name)) + } +} + +var unmarshalNullTests = []func() interface{}{ + func() interface{} { var v interface{}; v = "v"; return &v }, + func() interface{} { var s = "s"; return &s }, + func() interface{} { var s = "s"; sptr := &s; return &sptr }, + func() interface{} { var i = 1; return &i }, + func() interface{} { var i = 1; iptr := &i; return &iptr }, + func() interface{} { m := map[string]int{"s": 1}; return &m }, + func() interface{} { m := map[string]int{"s": 1}; return m }, +} + +func (s *S) TestUnmarshalNull(c *C) { + for _, test := range unmarshalNullTests { + item := test() + zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface() + err := yaml.Unmarshal([]byte("null"), item) + c.Assert(err, IsNil) + if reflect.TypeOf(item).Kind() == reflect.Map { + c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface()) + } else { + c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero) + } + } +} + +func (s *S) TestUnmarshalSliceOnPreset(c *C) { + // Issue #48. + v := struct{ A []int }{[]int{1}} + yaml.Unmarshal([]byte("a: [2]"), &v) + c.Assert(v.A, DeepEquals, []int{2}) +} + +//var data []byte +//func init() { +// var err error +// data, err = ioutil.ReadFile("/tmp/file.yaml") +// if err != nil { +// panic(err) +// } +//} +// +//func (s *S) BenchmarkUnmarshal(c *C) { +// var err error +// for i := 0; i < c.N; i++ { +// var v map[string]interface{} +// err = yaml.Unmarshal(data, &v) +// } +// if err != nil { +// panic(err) +// } +//} +// +//func (s *S) BenchmarkMarshal(c *C) { +// var v map[string]interface{} +// yaml.Unmarshal(data, &v) +// c.ResetTimer() +// for i := 0; i < c.N; i++ { +// yaml.Marshal(&v) +// } +//} diff --git a/services/templeton/vendor/src/gopkg.in/yaml.v2/emitterc.go b/services/templeton/vendor/src/gopkg.in/yaml.v2/emitterc.go new file mode 100644 index 000000000..2befd553e --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/yaml.v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") + } + return false +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an achor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceeded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceeded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceeded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceeded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/services/templeton/vendor/src/gopkg.in/yaml.v2/encode.go b/services/templeton/vendor/src/gopkg.in/yaml.v2/encode.go new file mode 100644 index 000000000..84f849955 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/yaml.v2/encode.go @@ -0,0 +1,306 @@ +package yaml + +import ( + "encoding" + "fmt" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool +} + +func newEncoder() (e *encoder) { + e = &encoder{} + e.must(yaml_emitter_initialize(&e.emitter)) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) + e.emit() + e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) + e.emit() + return e +} + +func (e *encoder) finish() { + e.must(yaml_document_end_event_initialize(&e.event, true)) + e.emit() + e.emitter.open_ended = false + e.must(yaml_stream_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { + e.must(false) + } +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() { + e.nilv() + return + } + iface := in.Interface() + if m, ok := iface.(Marshaler); ok { + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + } else if m, ok := iface.(encoding.TextMarshaler); ok { + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + } + switch in.Kind() { + case reflect.Interface: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + f() + e.must(yaml_mapping_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + rtag, rs := resolve("", s) + if rtag == yaml_BINARY_TAG { + if tag == "" || tag == yaml_STR_TAG { + tag = rtag + s = rs.(string) + } else if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } else { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + } + if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else if strings.Contains(s, "\n") { + style = yaml_LITERAL_SCALAR_STYLE + } else { + style = yaml_PLAIN_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // FIXME: Handle 64 bits here. + s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/services/templeton/vendor/src/gopkg.in/yaml.v2/encode_test.go b/services/templeton/vendor/src/gopkg.in/yaml.v2/encode_test.go new file mode 100644 index 000000000..84099bd38 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/yaml.v2/encode_test.go @@ -0,0 +1,501 @@ +package yaml_test + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" + + . "gopkg.in/check.v1" + "gopkg.in/yaml.v2" + "net" + "os" +) + +var marshalIntTest = 123 + +var marshalTests = []struct { + value interface{} + data string +}{ + { + nil, + "null\n", + }, { + &struct{}{}, + "{}\n", + }, { + map[string]string{"v": "hi"}, + "v: hi\n", + }, { + map[string]interface{}{"v": "hi"}, + "v: hi\n", + }, { + map[string]string{"v": "true"}, + "v: \"true\"\n", + }, { + map[string]string{"v": "false"}, + "v: \"false\"\n", + }, { + map[string]interface{}{"v": true}, + "v: true\n", + }, { + map[string]interface{}{"v": false}, + "v: false\n", + }, { + map[string]interface{}{"v": 10}, + "v: 10\n", + }, { + map[string]interface{}{"v": -10}, + "v: -10\n", + }, { + map[string]uint{"v": 42}, + "v: 42\n", + }, { + map[string]interface{}{"v": int64(4294967296)}, + "v: 4294967296\n", + }, { + map[string]int64{"v": int64(4294967296)}, + "v: 4294967296\n", + }, { + map[string]uint64{"v": 4294967296}, + "v: 4294967296\n", + }, { + map[string]interface{}{"v": "10"}, + "v: \"10\"\n", + }, { + map[string]interface{}{"v": 0.1}, + "v: 0.1\n", + }, { + map[string]interface{}{"v": float64(0.1)}, + "v: 0.1\n", + }, { + map[string]interface{}{"v": -0.1}, + "v: -0.1\n", + }, { + map[string]interface{}{"v": math.Inf(+1)}, + "v: .inf\n", + }, { + map[string]interface{}{"v": math.Inf(-1)}, + "v: -.inf\n", + }, { + map[string]interface{}{"v": math.NaN()}, + "v: .nan\n", + }, { + map[string]interface{}{"v": nil}, + "v: null\n", + }, { + map[string]interface{}{"v": ""}, + "v: \"\"\n", + }, { + map[string][]string{"v": []string{"A", "B"}}, + "v:\n- A\n- B\n", + }, { + map[string][]string{"v": []string{"A", "B\nC"}}, + "v:\n- A\n- |-\n B\n C\n", + }, { + map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}}, + "v:\n- A\n- 1\n- B:\n - 2\n - 3\n", + }, { + map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, + "a:\n b: c\n", + }, { + map[string]interface{}{"a": "-"}, + "a: '-'\n", + }, + + // Simple values. + { + &marshalIntTest, + "123\n", + }, + + // Structures + { + &struct{ Hello string }{"world"}, + "hello: world\n", + }, { + &struct { + A struct { + B string + } + }{struct{ B string }{"c"}}, + "a:\n b: c\n", + }, { + &struct { + A *struct { + B string + } + }{&struct{ B string }{"c"}}, + "a:\n b: c\n", + }, { + &struct { + A *struct { + B string + } + }{}, + "a: null\n", + }, { + &struct{ A int }{1}, + "a: 1\n", + }, { + &struct{ A []int }{[]int{1, 2}}, + "a:\n- 1\n- 2\n", + }, { + &struct { + B int "a" + }{1}, + "a: 1\n", + }, { + &struct{ A bool }{true}, + "a: true\n", + }, + + // Conditional flag + { + &struct { + A int "a,omitempty" + B int "b,omitempty" + }{1, 0}, + "a: 1\n", + }, { + &struct { + A int "a,omitempty" + B int "b,omitempty" + }{0, 0}, + "{}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{&struct{ X, y int }{1, 2}}, + "a: {x: 1}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{nil}, + "{}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{&struct{ X, y int }{}}, + "a: {x: 0}\n", + }, { + &struct { + A struct{ X, y int } "a,omitempty,flow" + }{struct{ X, y int }{1, 2}}, + "a: {x: 1}\n", + }, { + &struct { + A struct{ X, y int } "a,omitempty,flow" + }{struct{ X, y int }{0, 1}}, + "{}\n", + }, { + &struct { + A float64 "a,omitempty" + B float64 "b,omitempty" + }{1, 0}, + "a: 1\n", + }, + + // Flow flag + { + &struct { + A []int "a,flow" + }{[]int{1, 2}}, + "a: [1, 2]\n", + }, { + &struct { + A map[string]string "a,flow" + }{map[string]string{"b": "c", "d": "e"}}, + "a: {b: c, d: e}\n", + }, { + &struct { + A struct { + B, D string + } "a,flow" + }{struct{ B, D string }{"c", "e"}}, + "a: {b: c, d: e}\n", + }, + + // Unexported field + { + &struct { + u int + A int + }{0, 1}, + "a: 1\n", + }, + + // Ignored field + { + &struct { + A int + B int "-" + }{1, 2}, + "a: 1\n", + }, + + // Struct inlining + { + &struct { + A int + C inlineB `yaml:",inline"` + }{1, inlineB{2, inlineC{3}}}, + "a: 1\nb: 2\nc: 3\n", + }, + + // Map inlining + { + &struct { + A int + C map[string]int `yaml:",inline"` + }{1, map[string]int{"b": 2, "c": 3}}, + "a: 1\nb: 2\nc: 3\n", + }, + + // Duration + { + map[string]time.Duration{"a": 3 * time.Second}, + "a: 3s\n", + }, + + // Issue #24: bug in map merging logic. + { + map[string]string{"a": ""}, + "a: \n", + }, + + // Issue #34: marshal unsupported base 60 floats quoted for compatibility + // with old YAML 1.1 parsers. + { + map[string]string{"a": "1:1"}, + "a: \"1:1\"\n", + }, + + // Binary data. + { + map[string]string{"a": "\x00"}, + "a: \"\\0\"\n", + }, { + map[string]string{"a": "\x80\x81\x82"}, + "a: !!binary gIGC\n", + }, { + map[string]string{"a": strings.Repeat("\x90", 54)}, + "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", + }, + + // Ordered maps. + { + &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}}, + "b: 2\na: 1\nd: 4\nc: 3\nsub:\n e: 5\n", + }, + + // Encode unicode as utf-8 rather than in escaped form. + { + map[string]string{"a": "你好"}, + "a: 你好\n", + }, + + // Support encoding.TextMarshaler. + { + map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)}, + "a: 1.2.3.4\n", + }, + { + map[string]time.Time{"a": time.Unix(1424801979, 0)}, + "a: 2015-02-24T18:19:39Z\n", + }, + + // Ensure strings containing ": " are quoted (reported as PR #43, but not reproducible). + { + map[string]string{"a": "b: c"}, + "a: 'b: c'\n", + }, + + // Containing hash mark ('#') in string should be quoted + { + map[string]string{"a": "Hello #comment"}, + "a: 'Hello #comment'\n", + }, + { + map[string]string{"a": "你好 #comment"}, + "a: '你好 #comment'\n", + }, +} + +func (s *S) TestMarshal(c *C) { + defer os.Setenv("TZ", os.Getenv("TZ")) + os.Setenv("TZ", "UTC") + for _, item := range marshalTests { + data, err := yaml.Marshal(item.value) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, item.data) + } +} + +var marshalErrorTests = []struct { + value interface{} + error string + panic string +}{{ + value: &struct { + B int + inlineB ",inline" + }{1, inlineB{2, inlineC{3}}}, + panic: `Duplicated key 'b' in struct struct \{ B int; .*`, +}, { + value: &struct { + A int + B map[string]int ",inline" + }{1, map[string]int{"a": 2}}, + panic: `Can't have key "a" in inlined map; conflicts with struct field`, +}} + +func (s *S) TestMarshalErrors(c *C) { + for _, item := range marshalErrorTests { + if item.panic != "" { + c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic) + } else { + _, err := yaml.Marshal(item.value) + c.Assert(err, ErrorMatches, item.error) + } + } +} + +func (s *S) TestMarshalTypeCache(c *C) { + var data []byte + var err error + func() { + type T struct{ A int } + data, err = yaml.Marshal(&T{}) + c.Assert(err, IsNil) + }() + func() { + type T struct{ B int } + data, err = yaml.Marshal(&T{}) + c.Assert(err, IsNil) + }() + c.Assert(string(data), Equals, "b: 0\n") +} + +var marshalerTests = []struct { + data string + value interface{} +}{ + {"_:\n hi: there\n", map[interface{}]interface{}{"hi": "there"}}, + {"_:\n- 1\n- A\n", []interface{}{1, "A"}}, + {"_: 10\n", 10}, + {"_: null\n", nil}, + {"_: BAR!\n", "BAR!"}, +} + +type marshalerType struct { + value interface{} +} + +func (o marshalerType) MarshalText() ([]byte, error) { + panic("MarshalText called on type with MarshalYAML") +} + +func (o marshalerType) MarshalYAML() (interface{}, error) { + return o.value, nil +} + +type marshalerValue struct { + Field marshalerType "_" +} + +func (s *S) TestMarshaler(c *C) { + for _, item := range marshalerTests { + obj := &marshalerValue{} + obj.Field.value = item.value + data, err := yaml.Marshal(obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, string(item.data)) + } +} + +func (s *S) TestMarshalerWholeDocument(c *C) { + obj := &marshalerType{} + obj.value = map[string]string{"hello": "world!"} + data, err := yaml.Marshal(obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, "hello: world!\n") +} + +type failingMarshaler struct{} + +func (ft *failingMarshaler) MarshalYAML() (interface{}, error) { + return nil, failingErr +} + +func (s *S) TestMarshalerError(c *C) { + _, err := yaml.Marshal(&failingMarshaler{}) + c.Assert(err, Equals, failingErr) +} + +func (s *S) TestSortedOutput(c *C) { + order := []interface{}{ + false, + true, + 1, + uint(1), + 1.0, + 1.1, + 1.2, + 2, + uint(2), + 2.0, + 2.1, + "", + ".1", + ".2", + ".a", + "1", + "2", + "a!10", + "a/2", + "a/10", + "a~10", + "ab/1", + "b/1", + "b/01", + "b/2", + "b/02", + "b/3", + "b/03", + "b1", + "b01", + "b3", + "c2.10", + "c10.2", + "d1", + "d12", + "d12a", + } + m := make(map[interface{}]int) + for _, k := range order { + m[k] = 1 + } + data, err := yaml.Marshal(m) + c.Assert(err, IsNil) + out := "\n" + string(data) + last := 0 + for i, k := range order { + repr := fmt.Sprint(k) + if s, ok := k.(string); ok { + if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil { + repr = `"` + repr + `"` + } + } + index := strings.Index(out, "\n"+repr+":") + if index == -1 { + c.Fatalf("%#v is not in the output: %#v", k, out) + } + if index < last { + c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out) + } + last = index + } +} diff --git a/services/templeton/vendor/src/gopkg.in/yaml.v2/parserc.go b/services/templeton/vendor/src/gopkg.in/yaml.v2/parserc.go new file mode 100644 index 000000000..0a7037ad1 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/yaml.v2/parserc.go @@ -0,0 +1,1096 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } + return false +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/services/templeton/vendor/src/gopkg.in/yaml.v2/readerc.go b/services/templeton/vendor/src/gopkg.in/yaml.v2/readerc.go new file mode 100644 index 000000000..f45079171 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/yaml.v2/readerc.go @@ -0,0 +1,394 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/services/templeton/vendor/src/gopkg.in/yaml.v2/resolve.go b/services/templeton/vendor/src/gopkg.in/yaml.v2/resolve.go new file mode 100644 index 000000000..93a863274 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/yaml.v2/resolve.go @@ -0,0 +1,203 @@ +package yaml + +import ( + "encoding/base64" + "math" + "strconv" + "strings" + "unicode/utf8" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: + return true + } + return false +} + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt(plain[3:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, -int(intv) + } else { + return yaml_INT_TAG, -intv + } + } + } + // XXX Handle timestamps here. + + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + if tag == yaml_BINARY_TAG { + return yaml_BINARY_TAG, in + } + if utf8.ValidString(in) { + return yaml_STR_TAG, in + } + return yaml_BINARY_TAG, encodeBase64(in) +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} diff --git a/services/templeton/vendor/src/gopkg.in/yaml.v2/scannerc.go b/services/templeton/vendor/src/gopkg.in/yaml.v2/scannerc.go new file mode 100644 index 000000000..d97d76fa5 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/yaml.v2/scannerc.go @@ -0,0 +1,2710 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // Check if we really need to fetch more tokens. + need_more_tokens := false + + if parser.tokens_head == len(parser.tokens) { + // Queue is empty. + need_more_tokens = true + } else { + // Check if any potential simple key may occupy the head position. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + if simple_key.possible && simple_key.token_number == parser.tokens_parsed { + need_more_tokens = true + break + } + } + } + + // We are finished. + if !need_more_tokens { + break + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Remove obsolete potential simple keys. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +// Check the list of potential simple keys and remove the positions that +// cannot contain simple keys anymore. +func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { + // Check for a potential simple key for each flow level. + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + + // The specification requires that a simple key + // + // - is limited to a single line, + // - is shorter than 1024 characters. + if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { + + // Check if the potential simple key to be removed is required. + if simple_key.required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + } + } + return true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // A simple key is required only when it is the first token in the current + // line. Therefore it is always allowed. But we add a check anyway. + if required && !parser.simple_key_allowed { + panic("should not happen") + } + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + } + simple_key.mark = parser.mark + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + return true +} + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // Increase the flow level. + parser.flow_level++ + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + } + return true +} + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if simple_key.possible { + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found uknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && !(s[0] == '!' && s[1] == 0) { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the tag is non-empty. + if len(s) == 0 { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". + if parser.flow_level > 0 && + parser.buffer[parser.buffer_pos] == ':' && + !is_blankz(parser.buffer, parser.buffer_pos+1) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found unexpected ':'") + return false + } + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab character that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violate indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/services/templeton/vendor/src/gopkg.in/yaml.v2/sorter.go b/services/templeton/vendor/src/gopkg.in/yaml.v2/sorter.go new file mode 100644 index 000000000..5958822f9 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/yaml.v2/sorter.go @@ -0,0 +1,104 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/services/templeton/vendor/src/gopkg.in/yaml.v2/suite_test.go b/services/templeton/vendor/src/gopkg.in/yaml.v2/suite_test.go new file mode 100644 index 000000000..c5cf1ed4f --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/yaml.v2/suite_test.go @@ -0,0 +1,12 @@ +package yaml_test + +import ( + . "gopkg.in/check.v1" + "testing" +) + +func Test(t *testing.T) { TestingT(t) } + +type S struct{} + +var _ = Suite(&S{}) diff --git a/services/templeton/vendor/src/gopkg.in/yaml.v2/writerc.go b/services/templeton/vendor/src/gopkg.in/yaml.v2/writerc.go new file mode 100644 index 000000000..190362f25 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/yaml.v2/writerc.go @@ -0,0 +1,89 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + // If the output encoding is UTF-8, we don't need to recode the buffer. + if emitter.encoding == yaml_UTF8_ENCODING { + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true + } + + // Recode the buffer into the raw buffer. + var low, high int + if emitter.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + high, low = 1, 0 + } + + pos := 0 + for pos < emitter.buffer_pos { + // See the "reader.c" code for more details on UTF-8 encoding. Note + // that we assume that the buffer contains a valid UTF-8 sequence. + + // Read the next UTF-8 character. + octet := emitter.buffer[pos] + + var w int + var value rune + switch { + case octet&0x80 == 0x00: + w, value = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, value = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, value = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, value = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = emitter.buffer[pos+k] + value = (value << 6) + (rune(octet) & 0x3F) + } + pos += w + + // Write the character. + if value < 0x10000 { + var b [2]byte + b[high] = byte(value >> 8) + b[low] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) + } else { + // Write the character using a surrogate pair (check "reader.c"). + var b [4]byte + value -= 0x10000 + b[high] = byte(0xD8 + (value >> 18)) + b[low] = byte((value >> 10) & 0xFF) + b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) + b[low+2] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) + } + } + + // Write the raw buffer. + if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + emitter.raw_buffer = emitter.raw_buffer[:0] + return true +} diff --git a/services/templeton/vendor/src/gopkg.in/yaml.v2/yaml.go b/services/templeton/vendor/src/gopkg.in/yaml.v2/yaml.go new file mode 100644 index 000000000..d133edf9d --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/yaml.v2/yaml.go @@ -0,0 +1,346 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only unmarshalled if they are exported (have an upper case +// first letter), and are unmarshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Does not apply to zero valued structs. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int "a,omitempty" +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshal("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/services/templeton/vendor/src/gopkg.in/yaml.v2/yamlh.go b/services/templeton/vendor/src/gopkg.in/yaml.v2/yamlh.go new file mode 100644 index 000000000..d60a6b6b0 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/yaml.v2/yamlh.go @@ -0,0 +1,716 @@ +package yaml + +import ( + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occured. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_file io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_file io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/services/templeton/vendor/src/gopkg.in/yaml.v2/yamlprivateh.go b/services/templeton/vendor/src/gopkg.in/yaml.v2/yamlprivateh.go new file mode 100644 index 000000000..8110ce3c3 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/yaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} From 86110aaf3f11c4ee0224fee79707229559d5ccfa Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Mon, 15 Feb 2016 15:59:01 -0700 Subject: [PATCH 084/183] Add minor documentation to the UI's 'getSimpleQuery', as to produced JSON value. --- .../iplantc/de/client/services/impl/DataSearchQueryBuilder.java | 1 + 1 file changed, 1 insertion(+) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/DataSearchQueryBuilder.java b/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/DataSearchQueryBuilder.java index b79dba54f..fea8fda91 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/DataSearchQueryBuilder.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/DataSearchQueryBuilder.java @@ -436,6 +436,7 @@ private void appendArrayItem(Splittable array, Splittable item) { } public Splittable getSimpleQuery(String field, String userEntry) { + // {"query": {"query_string": {"query": "*la* OR *foo*", "fields":["whatever"]}}} Splittable query = StringQuoter.createSplittable(); Splittable simpleQuery = addChild(query, QUERY_STRING); String entry = applyImplicitAsteriskSearchText(applyOROperator(userEntry)); From cd9d01ff8c2adc02d8916ef361e6d5b344be91f0 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Tue, 16 Feb 2016 11:15:35 -0700 Subject: [PATCH 085/183] Add stubs for periodic and incremental modes. --- services/templeton/src/templeton/main.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/services/templeton/src/templeton/main.go b/services/templeton/src/templeton/main.go index 590167ece..e1b2adce8 100644 --- a/services/templeton/src/templeton/main.go +++ b/services/templeton/src/templeton/main.go @@ -127,9 +127,24 @@ func main() { if *mode == "full" { logger.Println("Full indexing mode selected.") + // TODO: purge deleted items es.IndexEverything(d) return } loadAMQPConfig() + + if *mode == "periodic" { + logger.Println("Periodic indexing mode selected.") + + // TODO: AMQP listener triggering same steps as full mode + return + } + + if *mode == "incremental" { + logger.Println("Incremental indexing mode selected.") + + // TODO: AMQP listener triggering incremental updates + return + } } From 10a9d6e412b271056dce98210533c55c8b52635e Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Tue, 16 Feb 2016 15:30:52 -0700 Subject: [PATCH 086/183] Stub out purge functionality. --- .../src/templeton/elasticsearch/elasticsearch.go | 10 ++++++++-- services/templeton/src/templeton/main.go | 2 +- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/services/templeton/src/templeton/elasticsearch/elasticsearch.go b/services/templeton/src/templeton/elasticsearch/elasticsearch.go index 757144633..5a80edcce 100644 --- a/services/templeton/src/templeton/elasticsearch/elasticsearch.go +++ b/services/templeton/src/templeton/elasticsearch/elasticsearch.go @@ -43,8 +43,15 @@ func (e *Elasticer) Close() { e.es.Close() } +// PurgeIndex walks an index querying a database, deleting those which should not exist +func (e *Elasticer) PurgeIndex(d *database.Databaser) { + indexer := e.es.NewBulkIndexerErrors(10, 60) + indexer.Start() + defer indexer.Stop() +} + // IndexEverything creates a bulk indexer and takes a database, and iterates to index its contents -func (e *Elasticer) IndexEverything(d *database.Databaser) error { +func (e *Elasticer) IndexEverything(d *database.Databaser) { indexer := e.es.NewBulkIndexerErrors(10, 60) indexer.Start() defer indexer.Stop() @@ -82,5 +89,4 @@ func (e *Elasticer) IndexEverything(d *database.Databaser) error { indexer.Index(e.index, "metadata", formatted.ID, "", "", nil, js) } - return nil } diff --git a/services/templeton/src/templeton/main.go b/services/templeton/src/templeton/main.go index e1b2adce8..308361b3c 100644 --- a/services/templeton/src/templeton/main.go +++ b/services/templeton/src/templeton/main.go @@ -127,7 +127,7 @@ func main() { if *mode == "full" { logger.Println("Full indexing mode selected.") - // TODO: purge deleted items + go es.PurgeIndex(d) es.IndexEverything(d) return } From ed5c8db6083d816990b523d07c0e51f02a140fb7 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Tue, 16 Feb 2016 16:09:03 -0700 Subject: [PATCH 087/183] Refine elasticsearch.GetAllObjects to return a cursor rather than closures. --- .../src/templeton/database/database.go | 99 ++++++++++++------- .../templeton/elasticsearch/elasticsearch.go | 15 ++- 2 files changed, 72 insertions(+), 42 deletions(-) diff --git a/services/templeton/src/templeton/database/database.go b/services/templeton/src/templeton/database/database.go index 9c34c6834..4ce2378fb 100644 --- a/services/templeton/src/templeton/database/database.go +++ b/services/templeton/src/templeton/database/database.go @@ -2,13 +2,21 @@ package database import ( "database/sql" + "errors" "fmt" + "logcabin" + "templeton/model" _ "github.com/lib/pq" ) +var ( + EOS = errors.New("EOS") + logger = logcabin.New() +) + // Databaser is a type used to interact with the database. type Databaser struct { db *sql.DB @@ -124,46 +132,69 @@ func (d *Databaser) GetObjectAVUs(uuid string) ([]model.AVURecord, error) { return retval, err } +type objectCursor struct { + rows *sql.Rows + lastRow *model.AVURecord + moreRows bool + anyRows bool +} + +func newObjectCursor(rows *sql.Rows) *objectCursor { + return &objectCursor{rows: rows, lastRow: &model.AVURecord{TargetId: ""}, moreRows: true, anyRows: false} +} + +func (o *objectCursor) Next() ([]model.AVURecord, error) { + if !o.moreRows { + return nil, EOS + } + + var retval []model.AVURecord + + if o.lastRow.TargetId != "" { + retval = append(retval, *o.lastRow) + } + + for o.moreRows { + o.moreRows = o.rows.Next() + if !o.moreRows { + break + } + o.anyRows = true + + ar, err := avuRecordFromRow(o.rows) + if err != nil { + return nil, err + } + + if o.lastRow.TargetId == "" || o.lastRow.TargetId == ar.TargetId { + o.lastRow = ar + retval = append(retval, *ar) + } else { + o.lastRow = ar + break + } + } + err := o.rows.Err() + if err == nil && !o.anyRows { + logger.Print("No metadata was found in the configured database.") + return nil, EOS + } + return retval, err +} + +func (o *objectCursor) Close() { + o.rows.Close() +} + // GetAllObjects returns a function to iterate through individual objects' worth of AVURecords, and a function to clean up // The function it returns will return nil if all records have been read. -func (d *Databaser) GetAllObjects() (func() ([]model.AVURecord, error), func(), error) { +func (d *Databaser) GetAllObjects() (*objectCursor, error) { query := selectAVUsWhere("") rows, err := d.db.Query(query) - endFunc := func() { rows.Close() } if err != nil { - return nil, endFunc, err + return nil, err } - lastRow := &model.AVURecord{TargetId: ""} - moreRows := true - - return func() ([]model.AVURecord, error) { - if !moreRows { - return nil, nil - } - var retval []model.AVURecord - if lastRow.TargetId != "" { - retval = append(retval, *lastRow) - } - for moreRows { - moreRows = rows.Next() - if !moreRows { - break - } - ar, err := avuRecordFromRow(rows) - if err != nil { - return nil, err - } - if lastRow.TargetId == "" || lastRow.TargetId == ar.TargetId { - lastRow = ar - retval = append(retval, *ar) - } else { - lastRow = ar - break - } - } - err = rows.Err() - return retval, err - }, endFunc, err + return newObjectCursor(rows), nil } diff --git a/services/templeton/src/templeton/elasticsearch/elasticsearch.go b/services/templeton/src/templeton/elasticsearch/elasticsearch.go index 5a80edcce..c73794951 100644 --- a/services/templeton/src/templeton/elasticsearch/elasticsearch.go +++ b/services/templeton/src/templeton/elasticsearch/elasticsearch.go @@ -56,21 +56,20 @@ func (e *Elasticer) IndexEverything(d *database.Databaser) { indexer.Start() defer indexer.Stop() - nextObjFunc, endFunc, err := d.GetAllObjects() - defer endFunc() - + cursor, err := d.GetAllObjects() if err != nil { logger.Fatal(err) } + defer cursor.Close() for { - ids, err := nextObjFunc() - if err != nil { - logger.Print(err) + ids, err := cursor.Next() + if err == database.EOS { + logger.Print("Done all rows, finishing.") break } - if ids == nil { - logger.Print("Done all rows, finishing.") + if err != nil { + logger.Print(err) break } From d083f08eceb282a1b6413820473c69a301037861 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Tue, 16 Feb 2016 16:51:23 -0700 Subject: [PATCH 088/183] Switch to olivere/elastic, whose code seems to be more stable. Vendored deps in second commit. --- .../templeton/elasticsearch/elasticsearch.go | 62 +++++++++++++------ 1 file changed, 42 insertions(+), 20 deletions(-) diff --git a/services/templeton/src/templeton/elasticsearch/elasticsearch.go b/services/templeton/src/templeton/elasticsearch/elasticsearch.go index c73794951..511ddeeb6 100644 --- a/services/templeton/src/templeton/elasticsearch/elasticsearch.go +++ b/services/templeton/src/templeton/elasticsearch/elasticsearch.go @@ -3,8 +3,7 @@ package elasticsearch import ( "logcabin" - "encoding/json" - "github.com/mattbaird/elastigo/lib" + "github.com/olivere/elastic" "templeton/database" "templeton/model" @@ -16,7 +15,7 @@ var ( // Elasticer is a type used to interact with Elasticsearch type Elasticer struct { - es *elastigo.Conn + es *elastic.Client baseURL string index string } @@ -24,14 +23,8 @@ type Elasticer struct { // NewElasticer returns a pointer to an Elasticer instance that has already tested its connection // by making a WaitForStatus call to the configured Elasticsearch cluster func NewElasticer(elasticsearchBase string, elasticsearchIndex string) (*Elasticer, error) { - c := elastigo.NewConn() + c, err := elastic.NewClient(elastic.SetURL(elasticsearchBase)) - err := c.SetFromUrl(elasticsearchBase) - if err != nil { - return nil, err - } - - _, err = c.WaitForStatus("red", 10, elasticsearchIndex) if err != nil { return nil, err } @@ -40,21 +33,51 @@ func NewElasticer(elasticsearchBase string, elasticsearchIndex string) (*Elastic } func (e *Elasticer) Close() { - e.es.Close() + e.es.Stop() +} + +type BulkIndexer struct { + es *elastic.Client + bulkSize int + bulkService *elastic.BulkService +} + +func (e *Elasticer) NewBulkIndexer(bulkSize int) *BulkIndexer { + return &BulkIndexer{bulkSize: bulkSize, es: e.es, bulkService: e.es.Bulk()} +} + +func (b *BulkIndexer) Add(r elastic.BulkableRequest) error { + b.bulkService.Add(r) + if b.bulkService.NumberOfActions() >= b.bulkSize { + err := b.Flush() + if err != nil { + return err + } + } + return nil +} + +func (b *BulkIndexer) Flush() error { + _, err := b.bulkService.Do() + if err != nil { + return err + } + + b.bulkService = b.es.Bulk() + + return nil } // PurgeIndex walks an index querying a database, deleting those which should not exist func (e *Elasticer) PurgeIndex(d *database.Databaser) { - indexer := e.es.NewBulkIndexerErrors(10, 60) - indexer.Start() - defer indexer.Stop() + indexer := e.NewBulkIndexer(10) + defer indexer.Flush() } // IndexEverything creates a bulk indexer and takes a database, and iterates to index its contents func (e *Elasticer) IndexEverything(d *database.Databaser) { - indexer := e.es.NewBulkIndexerErrors(10, 60) - indexer.Start() - defer indexer.Stop() + indexer := e.NewBulkIndexer(10) + defer indexer.Flush() cursor, err := d.GetAllObjects() if err != nil { @@ -80,12 +103,11 @@ func (e *Elasticer) IndexEverything(d *database.Databaser) { } logger.Printf("Indexing %s", formatted.ID) - js, err := json.Marshal(formatted) + req := elastic.NewBulkIndexRequest().Index(e.index).Type("metadata").Id(formatted.ID).Doc(formatted) + err = indexer.Add(req) if err != nil { logger.Print(err) break } - - indexer.Index(e.index, "metadata", formatted.ID, "", "", nil, js) } } From fb975098bd8d8b08e732015fad5cd3d86618dcb0 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Tue, 16 Feb 2016 16:52:00 -0700 Subject: [PATCH 089/183] Switch to olivere/elastic (vendored deps update) --- services/templeton/vendor/manifest | 19 +- .../mattbaird/elastigo/lib/baserequest.go | 141 - .../mattbaird/elastigo/lib/baseresponse.go | 146 - .../mattbaird/elastigo/lib/catindexinfo.go | 80 - .../elastigo/lib/catindexinfo_test.go | 117 - .../mattbaird/elastigo/lib/catnodeinfo.go | 249 -- .../elastigo/lib/catnodeinfo_test.go | 58 - .../mattbaird/elastigo/lib/catresponses.go | 105 - .../mattbaird/elastigo/lib/catshardinfo.go | 106 - .../elastigo/lib/catshardinfo_test.go | 85 - .../mattbaird/elastigo/lib/clusterhealth.go | 128 - .../elastigo/lib/clusterhealthresponses.go | 45 - .../elastigo/lib/clusternodeshotthreads.go | 12 - .../elastigo/lib/clusternodesinfo.go | 184 - .../elastigo/lib/clusternodesinfo_test.go | 37 - .../elastigo/lib/clusternodesshutdown.go | 37 - .../elastigo/lib/clusternodesstats.go | 31 - .../mattbaird/elastigo/lib/clusterreroute.go | 81 - .../mattbaird/elastigo/lib/clusterstate.go | 38 - .../elastigo/lib/clusterstatresponses.go | 299 -- .../elastigo/lib/clusterupdatesettings.go | 46 - .../mattbaird/elastigo/lib/connection.go | 184 - .../mattbaird/elastigo/lib/connection_test.go | 62 - .../mattbaird/elastigo/lib/corebulk.go | 414 --- .../mattbaird/elastigo/lib/corebulk_test.go | 399 --- .../mattbaird/elastigo/lib/corebulkudp.go | 12 - .../mattbaird/elastigo/lib/corecount.go | 45 - .../mattbaird/elastigo/lib/coredelete.go | 37 - .../elastigo/lib/coredeletebyquery.go | 57 - .../elastigo/lib/coreexample_test.go | 52 - .../mattbaird/elastigo/lib/coreexplain.go | 43 - .../mattbaird/elastigo/lib/coreget.go | 129 - .../mattbaird/elastigo/lib/coreindex.go | 132 - .../mattbaird/elastigo/lib/coremget.go | 62 - .../elastigo/lib/coremorelikethis.go | 57 - .../mattbaird/elastigo/lib/coremsearch.go | 12 - .../mattbaird/elastigo/lib/corepercolate.go | 64 - .../elastigo/lib/corepercolate_test.go | 64 - .../mattbaird/elastigo/lib/coresearch.go | 246 -- .../mattbaird/elastigo/lib/coresearch_test.go | 83 - .../mattbaird/elastigo/lib/coretest_test.go | 198 -- .../mattbaird/elastigo/lib/coreupdate.go | 94 - .../mattbaird/elastigo/lib/corevalidate.go | 53 - .../mattbaird/elastigo/lib/error.go | 8 - .../mattbaird/elastigo/lib/indicesaliases.go | 65 - .../mattbaird/elastigo/lib/indicesanalyze.go | 55 - .../elastigo/lib/indicesclearcache.go | 44 - .../elastigo/lib/indicescreateindex.go | 77 - .../elastigo/lib/indicesdeleteindex.go | 42 - .../elastigo/lib/indicesdeletemapping.go | 45 - .../elastigo/lib/indicesdeletemapping_test.go | 54 - .../mattbaird/elastigo/lib/indicesdoc.go | 12 - .../mattbaird/elastigo/lib/indicesflush.go | 46 - .../elastigo/lib/indicesgetsettings.go | 12 - .../elastigo/lib/indicesindicesexists.go | 37 - .../elastigo/lib/indicesopencloseindex.go | 54 - .../mattbaird/elastigo/lib/indicesoptimize.go | 41 - .../elastigo/lib/indicesputmapping.go | 171 - .../elastigo/lib/indicesputmapping_test.go | 302 -- .../elastigo/lib/indicesputsettings.go | 42 - .../mattbaird/elastigo/lib/indicesrefresh.go | 45 - .../mattbaird/elastigo/lib/indicessegments.go | 12 - .../mattbaird/elastigo/lib/indicessnapshot.go | 44 - .../mattbaird/elastigo/lib/indicesstats.go | 12 - .../mattbaird/elastigo/lib/indicesstatus.go | 43 - .../elastigo/lib/indicestemplates.go | 12 - .../elastigo/lib/indicesupdatesettings.go | 12 - .../mattbaird/elastigo/lib/request.go | 126 - .../mattbaird/elastigo/lib/request_test.go | 74 - .../mattbaird/elastigo/lib/searchaggregate.go | 226 -- .../elastigo/lib/searchaggregate_test.go | 177 - .../mattbaird/elastigo/lib/searchdsl.go | 28 - .../mattbaird/elastigo/lib/searchfacet.go | 142 - .../elastigo/lib/searchfacet_test.go | 42 - .../mattbaird/elastigo/lib/searchfilter.go | 402 --- .../elastigo/lib/searchfilter_test.go | 287 -- .../mattbaird/elastigo/lib/searchhighlight.go | 138 - .../elastigo/lib/searchhighlight_test.go | 67 - .../mattbaird/elastigo/lib/searchquery.go | 262 -- .../mattbaird/elastigo/lib/searchreadme | 4 - .../mattbaird/elastigo/lib/searchsearch.go | 204 -- .../elastigo/lib/searchsearch_test.go | 291 -- .../mattbaird/elastigo/lib/searchsort.go | 52 - .../mattbaird/elastigo/lib/setup_test.go | 84 - .../mattbaird/elastigo/lib/shared.go | 18 - .../mattbaird/elastigo/lib/shared_test.go | 43 - .../mattbaird/elastigo/lib/snapshot.go | 120 - .../olivere/elastic/CHANGELOG-3.0.md | 363 ++ .../olivere/elastic/CONTRIBUTING.md | 40 + .../github.com/olivere/elastic/CONTRIBUTORS | 35 + .../src/github.com/olivere/elastic/LICENSE | 20 + .../src/github.com/olivere/elastic/README.md | 415 +++ .../olivere/elastic/backoff/LICENSE | 22 + .../olivere/elastic/backoff/backoff.go | 159 + .../olivere/elastic/backoff/backoff_test.go | 146 + .../olivere/elastic/backoff/retry.go | 53 + .../olivere/elastic/backoff/retry_test.go | 44 + .../src/github.com/olivere/elastic/bulk.go | 314 ++ .../olivere/elastic/bulk_delete_request.go | 112 + .../elastic/bulk_delete_request_test.go | 42 + .../olivere/elastic/bulk_index_request.go | 173 + .../elastic/bulk_index_request_test.go | 63 + .../olivere/elastic/bulk_processor.go | 515 +++ .../olivere/elastic/bulk_processor_test.go | 406 +++ .../olivere/elastic/bulk_request.go | 17 + .../github.com/olivere/elastic/bulk_test.go | 463 +++ .../olivere/elastic/bulk_update_request.go | 219 ++ .../elastic/bulk_update_request_test.go | 77 + .../olivere/elastic/canonicalize.go | 28 + .../olivere/elastic/canonicalize_test.go | 41 + .../olivere/elastic/clear_scroll.go | 102 + .../olivere/elastic/clear_scroll_test.go | 85 + .../src/github.com/olivere/elastic/client.go | 1551 +++++++++ .../github.com/olivere/elastic/client_test.go | 899 +++++ .../olivere/elastic/cluster-test/Makefile | 16 + .../olivere/elastic/cluster-test/README.md | 63 + .../elastic/cluster-test/cluster-test.go | 356 ++ .../olivere/elastic/cluster_health.go | 244 ++ .../olivere/elastic/cluster_health_test.go | 109 + .../olivere/elastic/cluster_state.go | 284 ++ .../olivere/elastic/cluster_state_test.go | 92 + .../olivere/elastic/cluster_stats.go | 349 ++ .../olivere/elastic/cluster_stats_test.go | 85 + .../olivere/elastic/config/elasticsearch.yml | 103 + .../github.com/olivere/elastic/connection.go | 90 + .../src/github.com/olivere/elastic/count.go | 310 ++ .../github.com/olivere/elastic/count_test.go | 124 + .../src/github.com/olivere/elastic/decoder.go | 26 + .../olivere/elastic/decoder_test.go | 49 + .../src/github.com/olivere/elastic/delete.go | 214 ++ .../olivere/elastic/delete_by_query.go | 302 ++ .../olivere/elastic/delete_by_query_test.go | 114 + .../olivere/elastic/delete_template.go | 118 + .../olivere/elastic/delete_template_test.go | 22 + .../github.com/olivere/elastic/delete_test.go | 118 + .../src/github.com/olivere/elastic/doc.go | 51 + .../src/github.com/olivere/elastic/errors.go | 141 + .../github.com/olivere/elastic/errors_test.go | 202 ++ .../olivere/elastic/example_test.go | 547 +++ .../src/github.com/olivere/elastic/exists.go | 175 + .../github.com/olivere/elastic/exists_test.go | 52 + .../src/github.com/olivere/elastic/explain.go | 330 ++ .../olivere/elastic/explain_test.go | 41 + .../olivere/elastic/fetch_source_context.go | 74 + .../elastic/fetch_source_context_test.go | 125 + .../github.com/olivere/elastic/geo_point.go | 48 + .../olivere/elastic/geo_point_test.go | 24 + .../src/github.com/olivere/elastic/get.go | 271 ++ .../olivere/elastic/get_template.go | 113 + .../olivere/elastic/get_template_test.go | 51 + .../github.com/olivere/elastic/get_test.go | 165 + .../github.com/olivere/elastic/highlight.go | 455 +++ .../olivere/elastic/highlight_test.go | 192 ++ .../src/github.com/olivere/elastic/index.go | 284 ++ .../github.com/olivere/elastic/index_test.go | 279 ++ .../olivere/elastic/indices_close.go | 153 + .../olivere/elastic/indices_close_test.go | 81 + .../olivere/elastic/indices_create.go | 129 + .../olivere/elastic/indices_create_test.go | 60 + .../olivere/elastic/indices_delete.go | 129 + .../elastic/indices_delete_template.go | 122 + .../olivere/elastic/indices_delete_test.go | 20 + .../olivere/elastic/indices_delete_warmer.go | 131 + .../elastic/indices_delete_warmer_test.go | 48 + .../olivere/elastic/indices_exists.go | 149 + .../elastic/indices_exists_template.go | 112 + .../elastic/indices_exists_template_test.go | 68 + .../olivere/elastic/indices_exists_test.go | 20 + .../olivere/elastic/indices_exists_type.go | 161 + .../elastic/indices_exists_type_test.go | 134 + .../olivere/elastic/indices_flush.go | 169 + .../olivere/elastic/indices_flush_test.go | 69 + .../olivere/elastic/indices_forcemerge.go | 200 ++ .../elastic/indices_forcemerge_test.go | 56 + .../github.com/olivere/elastic/indices_get.go | 202 ++ .../olivere/elastic/indices_get_aliases.go | 155 + .../elastic/indices_get_aliases_test.go | 146 + .../olivere/elastic/indices_get_mapping.go | 170 + .../elastic/indices_get_mapping_test.go | 50 + .../olivere/elastic/indices_get_settings.go | 183 + .../elastic/indices_get_settings_test.go | 81 + .../olivere/elastic/indices_get_template.go | 128 + .../elastic/indices_get_template_test.go | 41 + .../olivere/elastic/indices_get_test.go | 97 + .../olivere/elastic/indices_get_warmer.go | 194 ++ .../elastic/indices_get_warmer_test.go | 83 + .../olivere/elastic/indices_open.go | 157 + .../olivere/elastic/indices_open_test.go | 20 + .../olivere/elastic/indices_put_alias.go | 111 + .../olivere/elastic/indices_put_alias_test.go | 123 + .../olivere/elastic/indices_put_mapping.go | 221 ++ .../elastic/indices_put_mapping_test.go | 82 + .../olivere/elastic/indices_put_settings.go | 184 + .../elastic/indices_put_settings_test.go | 92 + .../olivere/elastic/indices_put_template.go | 179 + .../olivere/elastic/indices_put_warmer.go | 222 ++ .../elastic/indices_put_warmer_test.go | 100 + .../olivere/elastic/indices_refresh.go | 94 + .../olivere/elastic/indices_refresh_test.go | 47 + .../olivere/elastic/indices_stats.go | 385 +++ .../olivere/elastic/indices_stats_test.go | 85 + .../github.com/olivere/elastic/inner_hit.go | 160 + .../olivere/elastic/inner_hit_test.go | 44 + .../src/github.com/olivere/elastic/logger.go | 10 + .../src/github.com/olivere/elastic/mget.go | 219 ++ .../github.com/olivere/elastic/mget_test.go | 95 + .../src/github.com/olivere/elastic/msearch.go | 96 + .../olivere/elastic/msearch_test.go | 197 ++ .../github.com/olivere/elastic/nodes_info.go | 318 ++ .../olivere/elastic/nodes_info_test.go | 40 + .../github.com/olivere/elastic/optimize.go | 130 + .../olivere/elastic/optimize_test.go | 47 + .../github.com/olivere/elastic/percolate.go | 309 ++ .../olivere/elastic/percolate_test.go | 92 + .../src/github.com/olivere/elastic/ping.go | 126 + .../github.com/olivere/elastic/ping_test.go | 64 + .../src/github.com/olivere/elastic/plugins.go | 38 + .../olivere/elastic/plugins_test.go | 32 + .../src/github.com/olivere/elastic/query.go | 13 + .../github.com/olivere/elastic/reindexer.go | 270 ++ .../olivere/elastic/reindexer_test.go | 285 ++ .../src/github.com/olivere/elastic/request.go | 123 + .../src/github.com/olivere/elastic/rescore.go | 44 + .../github.com/olivere/elastic/rescorer.go | 64 + .../github.com/olivere/elastic/response.go | 43 + .../src/github.com/olivere/elastic/scan.go | 359 ++ .../github.com/olivere/elastic/scan_test.go | 559 +++ .../src/github.com/olivere/elastic/script.go | 131 + .../github.com/olivere/elastic/script_test.go | 78 + .../src/github.com/olivere/elastic/scroll.go | 208 ++ .../github.com/olivere/elastic/scroll_test.go | 106 + .../src/github.com/olivere/elastic/search.go | 429 +++ .../github.com/olivere/elastic/search_aggs.go | 1270 +++++++ .../elastic/search_aggs_bucket_children.go | 76 + .../search_aggs_bucket_children_test.go | 46 + .../search_aggs_bucket_date_histogram.go | 285 ++ .../search_aggs_bucket_date_histogram_test.go | 49 + .../elastic/search_aggs_bucket_date_range.go | 234 ++ .../search_aggs_bucket_date_range_test.go | 130 + .../elastic/search_aggs_bucket_filter.go | 77 + .../elastic/search_aggs_bucket_filter_test.go | 66 + .../elastic/search_aggs_bucket_filters.go | 96 + .../search_aggs_bucket_filters_test.go | 68 + .../search_aggs_bucket_geo_distance.go | 194 ++ .../search_aggs_bucket_geo_distance_test.go | 71 + .../elastic/search_aggs_bucket_global.go | 71 + .../elastic/search_aggs_bucket_global_test.go | 44 + .../elastic/search_aggs_bucket_histogram.go | 253 ++ .../search_aggs_bucket_histogram_test.go | 61 + .../elastic/search_aggs_bucket_missing.go | 81 + .../search_aggs_bucket_missing_test.go | 44 + .../elastic/search_aggs_bucket_nested.go | 82 + .../elastic/search_aggs_bucket_nested_test.go | 62 + .../elastic/search_aggs_bucket_range.go | 232 ++ .../elastic/search_aggs_bucket_range_test.go | 156 + .../elastic/search_aggs_bucket_sampler.go | 145 + .../search_aggs_bucket_sampler_test.go | 52 + .../search_aggs_bucket_significant_terms.go | 141 + ...arch_aggs_bucket_significant_terms_test.go | 86 + .../elastic/search_aggs_bucket_terms.go | 341 ++ .../elastic/search_aggs_bucket_terms_test.go | 104 + .../elastic/search_aggs_metrics_avg.go | 101 + .../elastic/search_aggs_metrics_avg_test.go | 61 + .../search_aggs_metrics_cardinality.go | 120 + .../search_aggs_metrics_cardinality_test.go | 78 + .../search_aggs_metrics_extended_stats.go | 99 + ...search_aggs_metrics_extended_stats_test.go | 44 + .../elastic/search_aggs_metrics_geo_bounds.go | 105 + .../search_aggs_metrics_geo_bounds_test.go | 61 + .../elastic/search_aggs_metrics_max.go | 99 + .../elastic/search_aggs_metrics_max_test.go | 61 + .../elastic/search_aggs_metrics_min.go | 100 + .../elastic/search_aggs_metrics_min_test.go | 61 + .../search_aggs_metrics_percentile_ranks.go | 131 + ...arch_aggs_metrics_percentile_ranks_test.go | 78 + .../search_aggs_metrics_percentiles.go | 130 + .../search_aggs_metrics_percentiles_test.go | 78 + .../elastic/search_aggs_metrics_stats.go | 99 + .../elastic/search_aggs_metrics_stats_test.go | 61 + .../elastic/search_aggs_metrics_sum.go | 99 + .../elastic/search_aggs_metrics_sum_test.go | 61 + .../elastic/search_aggs_metrics_top_hits.go | 143 + .../search_aggs_metrics_top_hits_test.go | 31 + .../search_aggs_metrics_value_count.go | 102 + .../search_aggs_metrics_value_count_test.go | 63 + .../search_aggs_pipeline_avg_bucket.go | 113 + .../search_aggs_pipeline_avg_bucket_test.go | 27 + .../search_aggs_pipeline_bucket_script.go | 132 + ...search_aggs_pipeline_bucket_script_test.go | 30 + .../search_aggs_pipeline_bucket_selector.go | 134 + ...arch_aggs_pipeline_bucket_selector_test.go | 29 + .../search_aggs_pipeline_cumulative_sum.go | 90 + ...earch_aggs_pipeline_cumulative_sum_test.go | 27 + .../search_aggs_pipeline_derivative.go | 124 + .../search_aggs_pipeline_derivative_test.go | 27 + .../search_aggs_pipeline_max_bucket.go | 114 + .../search_aggs_pipeline_max_bucket_test.go | 27 + .../search_aggs_pipeline_min_bucket.go | 114 + .../search_aggs_pipeline_min_bucket_test.go | 27 + .../elastic/search_aggs_pipeline_mov_avg.go | 393 +++ .../search_aggs_pipeline_mov_avg_test.go | 132 + .../search_aggs_pipeline_serial_diff.go | 124 + .../search_aggs_pipeline_serial_diff_test.go | 27 + .../search_aggs_pipeline_sum_bucket.go | 113 + .../search_aggs_pipeline_sum_bucket_test.go | 27 + .../elastic/search_aggs_pipeline_test.go | 1000 ++++++ .../olivere/elastic/search_aggs_test.go | 2996 +++++++++++++++++ .../olivere/elastic/search_queries_bool.go | 212 ++ .../elastic/search_queries_bool_test.go | 34 + .../elastic/search_queries_boosting.go | 97 + .../elastic/search_queries_boosting_test.go | 30 + .../elastic/search_queries_common_terms.go | 146 + .../search_queries_common_terms_test.go | 84 + .../elastic/search_queries_constant_score.go | 59 + .../search_queries_constant_score_test.go | 27 + .../olivere/elastic/search_queries_dis_max.go | 104 + .../elastic/search_queries_dis_max_test.go | 28 + .../olivere/elastic/search_queries_exists.go | 49 + .../elastic/search_queries_exists_test.go | 27 + .../olivere/elastic/search_queries_fsq.go | 172 + .../elastic/search_queries_fsq_score_funcs.go | 567 ++++ .../elastic/search_queries_fsq_test.go | 166 + .../olivere/elastic/search_queries_fuzzy.go | 120 + .../elastic/search_queries_fuzzy_test.go | 27 + .../search_queries_geo_bounding_box.go | 121 + .../search_queries_geo_bounding_box_test.go | 63 + .../elastic/search_queries_geo_distance.go | 116 + .../search_queries_geo_distance_test.go | 70 + .../elastic/search_queries_geo_polygon.go | 72 + .../search_queries_geo_polygon_test.go | 58 + .../elastic/search_queries_has_child.go | 129 + .../elastic/search_queries_has_child_test.go | 45 + .../elastic/search_queries_has_parent.go | 97 + .../elastic/search_queries_has_parent_test.go | 27 + .../olivere/elastic/search_queries_ids.go | 76 + .../elastic/search_queries_ids_test.go | 27 + .../olivere/elastic/search_queries_indices.go | 89 + .../elastic/search_queries_indices_test.go | 46 + .../olivere/elastic/search_queries_match.go | 214 ++ .../elastic/search_queries_match_all.go | 41 + .../elastic/search_queries_match_all_test.go | 44 + .../elastic/search_queries_match_test.go | 78 + .../olivere/elastic/search_queries_missing.go | 67 + .../elastic/search_queries_missing_test.go | 44 + .../elastic/search_queries_more_like_this.go | 412 +++ .../search_queries_more_like_this_test.go | 91 + .../elastic/search_queries_multi_match.go | 275 ++ .../search_queries_multi_match_test.go | 131 + .../olivere/elastic/search_queries_nested.go | 85 + .../elastic/search_queries_nested_test.go | 52 + .../olivere/elastic/search_queries_not.go | 45 + .../elastic/search_queries_not_test.go | 46 + .../olivere/elastic/search_queries_prefix.go | 67 + .../elastic/search_queries_prefix_test.go | 45 + .../elastic/search_queries_query_string.go | 349 ++ .../search_queries_query_string_test.go | 28 + .../olivere/elastic/search_queries_range.go | 145 + .../elastic/search_queries_range_test.go | 68 + .../olivere/elastic/search_queries_regexp.go | 82 + .../elastic/search_queries_regexp_test.go | 47 + .../olivere/elastic/search_queries_script.go | 51 + .../elastic/search_queries_script_test.go | 45 + .../search_queries_simple_query_string.go | 185 + ...search_queries_simple_query_string_test.go | 86 + .../elastic/search_queries_template_query.go | 84 + .../search_queries_template_query_test.go | 65 + .../olivere/elastic/search_queries_term.go | 58 + .../elastic/search_queries_term_test.go | 46 + .../olivere/elastic/search_queries_terms.go | 58 + .../elastic/search_queries_terms_test.go | 46 + .../olivere/elastic/search_queries_type.go | 26 + .../elastic/search_queries_type_test.go | 27 + .../elastic/search_queries_wildcard.go | 81 + .../elastic/search_queries_wildcard_test.go | 67 + .../olivere/elastic/search_request.go | 153 + .../olivere/elastic/search_request_test.go | 48 + .../olivere/elastic/search_source.go | 511 +++ .../olivere/elastic/search_source_test.go | 238 ++ .../olivere/elastic/search_suggester_test.go | 259 ++ .../olivere/elastic/search_template.go | 152 + .../olivere/elastic/search_templates_test.go | 98 + .../github.com/olivere/elastic/search_test.go | 885 +++++ .../github.com/olivere/elastic/setup_test.go | 232 ++ .../src/github.com/olivere/elastic/sort.go | 480 +++ .../github.com/olivere/elastic/sort_test.go | 214 ++ .../src/github.com/olivere/elastic/suggest.go | 143 + .../olivere/elastic/suggest_field.go | 100 + .../olivere/elastic/suggest_field_test.go | 30 + .../olivere/elastic/suggest_test.go | 131 + .../github.com/olivere/elastic/suggester.go | 15 + .../olivere/elastic/suggester_completion.go | 129 + .../elastic/suggester_completion_fuzzy.go | 179 + .../suggester_completion_fuzzy_test.go | 50 + .../elastic/suggester_completion_test.go | 29 + .../olivere/elastic/suggester_context.go | 11 + .../elastic/suggester_context_category.go | 99 + .../suggester_context_category_test.go | 97 + .../olivere/elastic/suggester_context_geo.go | 132 + .../elastic/suggester_context_geo_test.go | 48 + .../olivere/elastic/suggester_phrase.go | 554 +++ .../olivere/elastic/suggester_phrase_test.go | 169 + .../olivere/elastic/suggester_term.go | 233 ++ .../olivere/elastic/suggester_term_test.go | 29 + .../github.com/olivere/elastic/termvectors.go | 458 +++ .../olivere/elastic/termvectors_test.go | 165 + .../src/github.com/olivere/elastic/update.go | 300 ++ .../github.com/olivere/elastic/update_test.go | 312 ++ .../olivere/elastic/uritemplates/LICENSE | 18 + .../elastic/uritemplates/uritemplates.go | 359 ++ .../olivere/elastic/uritemplates/utils.go | 13 + .../elastic/uritemplates/utils_test.go | 105 + .../olivere/elastic.v3/CHANGELOG-3.0.md | 363 ++ .../olivere/elastic.v3/CONTRIBUTING.md | 40 + .../gopkg.in/olivere/elastic.v3/CONTRIBUTORS | 35 + .../src/gopkg.in/olivere/elastic.v3/LICENSE | 20 + .../src/gopkg.in/olivere/elastic.v3/README.md | 415 +++ .../olivere/elastic.v3/backoff/LICENSE | 22 + .../olivere/elastic.v3/backoff/backoff.go | 159 + .../elastic.v3/backoff/backoff_test.go | 146 + .../olivere/elastic.v3/backoff/retry.go | 53 + .../olivere/elastic.v3/backoff/retry_test.go | 44 + .../src/gopkg.in/olivere/elastic.v3/bulk.go | 314 ++ .../olivere/elastic.v3/bulk_delete_request.go | 112 + .../elastic.v3/bulk_delete_request_test.go | 42 + .../olivere/elastic.v3/bulk_index_request.go | 173 + .../elastic.v3/bulk_index_request_test.go | 63 + .../olivere/elastic.v3/bulk_processor.go | 515 +++ .../olivere/elastic.v3/bulk_processor_test.go | 406 +++ .../olivere/elastic.v3/bulk_request.go | 17 + .../gopkg.in/olivere/elastic.v3/bulk_test.go | 463 +++ .../olivere/elastic.v3/bulk_update_request.go | 219 ++ .../elastic.v3/bulk_update_request_test.go | 77 + .../olivere/elastic.v3/canonicalize.go | 28 + .../olivere/elastic.v3/canonicalize_test.go | 41 + .../olivere/elastic.v3/clear_scroll.go | 102 + .../olivere/elastic.v3/clear_scroll_test.go | 85 + .../src/gopkg.in/olivere/elastic.v3/client.go | 1551 +++++++++ .../olivere/elastic.v3/client_test.go | 899 +++++ .../olivere/elastic.v3/cluster-test/Makefile | 16 + .../olivere/elastic.v3/cluster-test/README.md | 63 + .../elastic.v3/cluster-test/cluster-test.go | 356 ++ .../olivere/elastic.v3/cluster_health.go | 244 ++ .../olivere/elastic.v3/cluster_health_test.go | 109 + .../olivere/elastic.v3/cluster_state.go | 284 ++ .../olivere/elastic.v3/cluster_state_test.go | 92 + .../olivere/elastic.v3/cluster_stats.go | 349 ++ .../olivere/elastic.v3/cluster_stats_test.go | 85 + .../elastic.v3/config/elasticsearch.yml | 103 + .../gopkg.in/olivere/elastic.v3/connection.go | 90 + .../src/gopkg.in/olivere/elastic.v3/count.go | 310 ++ .../gopkg.in/olivere/elastic.v3/count_test.go | 124 + .../gopkg.in/olivere/elastic.v3/decoder.go | 26 + .../olivere/elastic.v3/decoder_test.go | 49 + .../src/gopkg.in/olivere/elastic.v3/delete.go | 214 ++ .../olivere/elastic.v3/delete_by_query.go | 302 ++ .../elastic.v3/delete_by_query_test.go | 114 + .../olivere/elastic.v3/delete_template.go | 118 + .../elastic.v3/delete_template_test.go | 22 + .../olivere/elastic.v3/delete_test.go | 118 + .../src/gopkg.in/olivere/elastic.v3/doc.go | 51 + .../src/gopkg.in/olivere/elastic.v3/errors.go | 141 + .../olivere/elastic.v3/errors_test.go | 202 ++ .../olivere/elastic.v3/example_test.go | 547 +++ .../src/gopkg.in/olivere/elastic.v3/exists.go | 175 + .../olivere/elastic.v3/exists_test.go | 52 + .../gopkg.in/olivere/elastic.v3/explain.go | 330 ++ .../olivere/elastic.v3/explain_test.go | 41 + .../elastic.v3/fetch_source_context.go | 74 + .../elastic.v3/fetch_source_context_test.go | 125 + .../gopkg.in/olivere/elastic.v3/geo_point.go | 48 + .../olivere/elastic.v3/geo_point_test.go | 24 + .../src/gopkg.in/olivere/elastic.v3/get.go | 271 ++ .../olivere/elastic.v3/get_template.go | 113 + .../olivere/elastic.v3/get_template_test.go | 51 + .../gopkg.in/olivere/elastic.v3/get_test.go | 165 + .../gopkg.in/olivere/elastic.v3/highlight.go | 455 +++ .../olivere/elastic.v3/highlight_test.go | 192 ++ .../src/gopkg.in/olivere/elastic.v3/index.go | 284 ++ .../gopkg.in/olivere/elastic.v3/index_test.go | 279 ++ .../olivere/elastic.v3/indices_close.go | 153 + .../olivere/elastic.v3/indices_close_test.go | 81 + .../olivere/elastic.v3/indices_create.go | 129 + .../olivere/elastic.v3/indices_create_test.go | 60 + .../olivere/elastic.v3/indices_delete.go | 129 + .../elastic.v3/indices_delete_template.go | 122 + .../olivere/elastic.v3/indices_delete_test.go | 20 + .../elastic.v3/indices_delete_warmer.go | 131 + .../elastic.v3/indices_delete_warmer_test.go | 48 + .../olivere/elastic.v3/indices_exists.go | 149 + .../elastic.v3/indices_exists_template.go | 112 + .../indices_exists_template_test.go | 68 + .../olivere/elastic.v3/indices_exists_test.go | 20 + .../olivere/elastic.v3/indices_exists_type.go | 161 + .../elastic.v3/indices_exists_type_test.go | 134 + .../olivere/elastic.v3/indices_flush.go | 169 + .../olivere/elastic.v3/indices_flush_test.go | 69 + .../olivere/elastic.v3/indices_forcemerge.go | 200 ++ .../elastic.v3/indices_forcemerge_test.go | 56 + .../olivere/elastic.v3/indices_get.go | 202 ++ .../olivere/elastic.v3/indices_get_aliases.go | 155 + .../elastic.v3/indices_get_aliases_test.go | 146 + .../olivere/elastic.v3/indices_get_mapping.go | 170 + .../elastic.v3/indices_get_mapping_test.go | 50 + .../elastic.v3/indices_get_settings.go | 183 + .../elastic.v3/indices_get_settings_test.go | 81 + .../elastic.v3/indices_get_template.go | 128 + .../elastic.v3/indices_get_template_test.go | 41 + .../olivere/elastic.v3/indices_get_test.go | 97 + .../olivere/elastic.v3/indices_get_warmer.go | 194 ++ .../elastic.v3/indices_get_warmer_test.go | 83 + .../olivere/elastic.v3/indices_open.go | 157 + .../olivere/elastic.v3/indices_open_test.go | 20 + .../olivere/elastic.v3/indices_put_alias.go | 111 + .../elastic.v3/indices_put_alias_test.go | 123 + .../olivere/elastic.v3/indices_put_mapping.go | 221 ++ .../elastic.v3/indices_put_mapping_test.go | 82 + .../elastic.v3/indices_put_settings.go | 184 + .../elastic.v3/indices_put_settings_test.go | 92 + .../elastic.v3/indices_put_template.go | 179 + .../olivere/elastic.v3/indices_put_warmer.go | 222 ++ .../elastic.v3/indices_put_warmer_test.go | 100 + .../olivere/elastic.v3/indices_refresh.go | 94 + .../elastic.v3/indices_refresh_test.go | 47 + .../olivere/elastic.v3/indices_stats.go | 385 +++ .../olivere/elastic.v3/indices_stats_test.go | 85 + .../gopkg.in/olivere/elastic.v3/inner_hit.go | 160 + .../olivere/elastic.v3/inner_hit_test.go | 44 + .../src/gopkg.in/olivere/elastic.v3/logger.go | 10 + .../src/gopkg.in/olivere/elastic.v3/mget.go | 219 ++ .../gopkg.in/olivere/elastic.v3/mget_test.go | 95 + .../gopkg.in/olivere/elastic.v3/msearch.go | 96 + .../olivere/elastic.v3/msearch_test.go | 197 ++ .../gopkg.in/olivere/elastic.v3/nodes_info.go | 318 ++ .../olivere/elastic.v3/nodes_info_test.go | 40 + .../gopkg.in/olivere/elastic.v3/optimize.go | 130 + .../olivere/elastic.v3/optimize_test.go | 47 + .../gopkg.in/olivere/elastic.v3/percolate.go | 309 ++ .../olivere/elastic.v3/percolate_test.go | 92 + .../src/gopkg.in/olivere/elastic.v3/ping.go | 126 + .../gopkg.in/olivere/elastic.v3/ping_test.go | 64 + .../gopkg.in/olivere/elastic.v3/plugins.go | 38 + .../olivere/elastic.v3/plugins_test.go | 32 + .../src/gopkg.in/olivere/elastic.v3/query.go | 13 + .../gopkg.in/olivere/elastic.v3/reindexer.go | 270 ++ .../olivere/elastic.v3/reindexer_test.go | 285 ++ .../gopkg.in/olivere/elastic.v3/request.go | 123 + .../gopkg.in/olivere/elastic.v3/rescore.go | 44 + .../gopkg.in/olivere/elastic.v3/rescorer.go | 64 + .../gopkg.in/olivere/elastic.v3/response.go | 43 + .../src/gopkg.in/olivere/elastic.v3/scan.go | 359 ++ .../gopkg.in/olivere/elastic.v3/scan_test.go | 559 +++ .../src/gopkg.in/olivere/elastic.v3/script.go | 131 + .../olivere/elastic.v3/script_test.go | 78 + .../src/gopkg.in/olivere/elastic.v3/scroll.go | 208 ++ .../olivere/elastic.v3/scroll_test.go | 106 + .../src/gopkg.in/olivere/elastic.v3/search.go | 429 +++ .../olivere/elastic.v3/search_aggs.go | 1270 +++++++ .../elastic.v3/search_aggs_bucket_children.go | 76 + .../search_aggs_bucket_children_test.go | 46 + .../search_aggs_bucket_date_histogram.go | 285 ++ .../search_aggs_bucket_date_histogram_test.go | 49 + .../search_aggs_bucket_date_range.go | 234 ++ .../search_aggs_bucket_date_range_test.go | 130 + .../elastic.v3/search_aggs_bucket_filter.go | 77 + .../search_aggs_bucket_filter_test.go | 66 + .../elastic.v3/search_aggs_bucket_filters.go | 96 + .../search_aggs_bucket_filters_test.go | 68 + .../search_aggs_bucket_geo_distance.go | 194 ++ .../search_aggs_bucket_geo_distance_test.go | 71 + .../elastic.v3/search_aggs_bucket_global.go | 71 + .../search_aggs_bucket_global_test.go | 44 + .../search_aggs_bucket_histogram.go | 253 ++ .../search_aggs_bucket_histogram_test.go | 61 + .../elastic.v3/search_aggs_bucket_missing.go | 81 + .../search_aggs_bucket_missing_test.go | 44 + .../elastic.v3/search_aggs_bucket_nested.go | 82 + .../search_aggs_bucket_nested_test.go | 62 + .../elastic.v3/search_aggs_bucket_range.go | 232 ++ .../search_aggs_bucket_range_test.go | 156 + .../elastic.v3/search_aggs_bucket_sampler.go | 145 + .../search_aggs_bucket_sampler_test.go | 52 + .../search_aggs_bucket_significant_terms.go | 141 + ...arch_aggs_bucket_significant_terms_test.go | 86 + .../elastic.v3/search_aggs_bucket_terms.go | 341 ++ .../search_aggs_bucket_terms_test.go | 104 + .../elastic.v3/search_aggs_metrics_avg.go | 101 + .../search_aggs_metrics_avg_test.go | 61 + .../search_aggs_metrics_cardinality.go | 120 + .../search_aggs_metrics_cardinality_test.go | 78 + .../search_aggs_metrics_extended_stats.go | 99 + ...search_aggs_metrics_extended_stats_test.go | 44 + .../search_aggs_metrics_geo_bounds.go | 105 + .../search_aggs_metrics_geo_bounds_test.go | 61 + .../elastic.v3/search_aggs_metrics_max.go | 99 + .../search_aggs_metrics_max_test.go | 61 + .../elastic.v3/search_aggs_metrics_min.go | 100 + .../search_aggs_metrics_min_test.go | 61 + .../search_aggs_metrics_percentile_ranks.go | 131 + ...arch_aggs_metrics_percentile_ranks_test.go | 78 + .../search_aggs_metrics_percentiles.go | 130 + .../search_aggs_metrics_percentiles_test.go | 78 + .../elastic.v3/search_aggs_metrics_stats.go | 99 + .../search_aggs_metrics_stats_test.go | 61 + .../elastic.v3/search_aggs_metrics_sum.go | 99 + .../search_aggs_metrics_sum_test.go | 61 + .../search_aggs_metrics_top_hits.go | 143 + .../search_aggs_metrics_top_hits_test.go | 31 + .../search_aggs_metrics_value_count.go | 102 + .../search_aggs_metrics_value_count_test.go | 63 + .../search_aggs_pipeline_avg_bucket.go | 113 + .../search_aggs_pipeline_avg_bucket_test.go | 27 + .../search_aggs_pipeline_bucket_script.go | 132 + ...search_aggs_pipeline_bucket_script_test.go | 30 + .../search_aggs_pipeline_bucket_selector.go | 134 + ...arch_aggs_pipeline_bucket_selector_test.go | 29 + .../search_aggs_pipeline_cumulative_sum.go | 90 + ...earch_aggs_pipeline_cumulative_sum_test.go | 27 + .../search_aggs_pipeline_derivative.go | 124 + .../search_aggs_pipeline_derivative_test.go | 27 + .../search_aggs_pipeline_max_bucket.go | 114 + .../search_aggs_pipeline_max_bucket_test.go | 27 + .../search_aggs_pipeline_min_bucket.go | 114 + .../search_aggs_pipeline_min_bucket_test.go | 27 + .../search_aggs_pipeline_mov_avg.go | 393 +++ .../search_aggs_pipeline_mov_avg_test.go | 132 + .../search_aggs_pipeline_serial_diff.go | 124 + .../search_aggs_pipeline_serial_diff_test.go | 27 + .../search_aggs_pipeline_sum_bucket.go | 113 + .../search_aggs_pipeline_sum_bucket_test.go | 27 + .../elastic.v3/search_aggs_pipeline_test.go | 1000 ++++++ .../olivere/elastic.v3/search_aggs_test.go | 2996 +++++++++++++++++ .../olivere/elastic.v3/search_queries_bool.go | 212 ++ .../elastic.v3/search_queries_bool_test.go | 34 + .../elastic.v3/search_queries_boosting.go | 97 + .../search_queries_boosting_test.go | 30 + .../elastic.v3/search_queries_common_terms.go | 146 + .../search_queries_common_terms_test.go | 84 + .../search_queries_constant_score.go | 59 + .../search_queries_constant_score_test.go | 27 + .../elastic.v3/search_queries_dis_max.go | 104 + .../elastic.v3/search_queries_dis_max_test.go | 28 + .../elastic.v3/search_queries_exists.go | 49 + .../elastic.v3/search_queries_exists_test.go | 27 + .../olivere/elastic.v3/search_queries_fsq.go | 172 + .../search_queries_fsq_score_funcs.go | 567 ++++ .../elastic.v3/search_queries_fsq_test.go | 166 + .../elastic.v3/search_queries_fuzzy.go | 120 + .../elastic.v3/search_queries_fuzzy_test.go | 27 + .../search_queries_geo_bounding_box.go | 121 + .../search_queries_geo_bounding_box_test.go | 63 + .../elastic.v3/search_queries_geo_distance.go | 116 + .../search_queries_geo_distance_test.go | 70 + .../elastic.v3/search_queries_geo_polygon.go | 72 + .../search_queries_geo_polygon_test.go | 58 + .../elastic.v3/search_queries_has_child.go | 129 + .../search_queries_has_child_test.go | 45 + .../elastic.v3/search_queries_has_parent.go | 97 + .../search_queries_has_parent_test.go | 27 + .../olivere/elastic.v3/search_queries_ids.go | 76 + .../elastic.v3/search_queries_ids_test.go | 27 + .../elastic.v3/search_queries_indices.go | 89 + .../elastic.v3/search_queries_indices_test.go | 46 + .../elastic.v3/search_queries_match.go | 214 ++ .../elastic.v3/search_queries_match_all.go | 41 + .../search_queries_match_all_test.go | 44 + .../elastic.v3/search_queries_match_test.go | 78 + .../elastic.v3/search_queries_missing.go | 67 + .../elastic.v3/search_queries_missing_test.go | 44 + .../search_queries_more_like_this.go | 412 +++ .../search_queries_more_like_this_test.go | 91 + .../elastic.v3/search_queries_multi_match.go | 275 ++ .../search_queries_multi_match_test.go | 131 + .../elastic.v3/search_queries_nested.go | 85 + .../elastic.v3/search_queries_nested_test.go | 52 + .../olivere/elastic.v3/search_queries_not.go | 45 + .../elastic.v3/search_queries_not_test.go | 46 + .../elastic.v3/search_queries_prefix.go | 67 + .../elastic.v3/search_queries_prefix_test.go | 45 + .../elastic.v3/search_queries_query_string.go | 349 ++ .../search_queries_query_string_test.go | 28 + .../elastic.v3/search_queries_range.go | 145 + .../elastic.v3/search_queries_range_test.go | 68 + .../elastic.v3/search_queries_regexp.go | 82 + .../elastic.v3/search_queries_regexp_test.go | 47 + .../elastic.v3/search_queries_script.go | 51 + .../elastic.v3/search_queries_script_test.go | 45 + .../search_queries_simple_query_string.go | 185 + ...search_queries_simple_query_string_test.go | 86 + .../search_queries_template_query.go | 84 + .../search_queries_template_query_test.go | 65 + .../olivere/elastic.v3/search_queries_term.go | 58 + .../elastic.v3/search_queries_term_test.go | 46 + .../elastic.v3/search_queries_terms.go | 58 + .../elastic.v3/search_queries_terms_test.go | 46 + .../olivere/elastic.v3/search_queries_type.go | 26 + .../elastic.v3/search_queries_type_test.go | 27 + .../elastic.v3/search_queries_wildcard.go | 81 + .../search_queries_wildcard_test.go | 67 + .../olivere/elastic.v3/search_request.go | 153 + .../olivere/elastic.v3/search_request_test.go | 48 + .../olivere/elastic.v3/search_source.go | 511 +++ .../olivere/elastic.v3/search_source_test.go | 238 ++ .../elastic.v3/search_suggester_test.go | 259 ++ .../olivere/elastic.v3/search_template.go | 152 + .../elastic.v3/search_templates_test.go | 98 + .../olivere/elastic.v3/search_test.go | 885 +++++ .../gopkg.in/olivere/elastic.v3/setup_test.go | 232 ++ .../src/gopkg.in/olivere/elastic.v3/sort.go | 480 +++ .../gopkg.in/olivere/elastic.v3/sort_test.go | 214 ++ .../gopkg.in/olivere/elastic.v3/suggest.go | 143 + .../olivere/elastic.v3/suggest_field.go | 100 + .../olivere/elastic.v3/suggest_field_test.go | 30 + .../olivere/elastic.v3/suggest_test.go | 131 + .../gopkg.in/olivere/elastic.v3/suggester.go | 15 + .../elastic.v3/suggester_completion.go | 129 + .../elastic.v3/suggester_completion_fuzzy.go | 179 + .../suggester_completion_fuzzy_test.go | 50 + .../elastic.v3/suggester_completion_test.go | 29 + .../olivere/elastic.v3/suggester_context.go | 11 + .../elastic.v3/suggester_context_category.go | 99 + .../suggester_context_category_test.go | 97 + .../elastic.v3/suggester_context_geo.go | 132 + .../elastic.v3/suggester_context_geo_test.go | 48 + .../olivere/elastic.v3/suggester_phrase.go | 554 +++ .../elastic.v3/suggester_phrase_test.go | 169 + .../olivere/elastic.v3/suggester_term.go | 233 ++ .../olivere/elastic.v3/suggester_term_test.go | 29 + .../olivere/elastic.v3/termvectors.go | 458 +++ .../olivere/elastic.v3/termvectors_test.go | 165 + .../src/gopkg.in/olivere/elastic.v3/update.go | 300 ++ .../olivere/elastic.v3/update_test.go | 312 ++ .../olivere/elastic.v3/uritemplates/LICENSE | 18 + .../elastic.v3/uritemplates/uritemplates.go | 359 ++ .../olivere/elastic.v3/uritemplates/utils.go | 13 + .../elastic.v3/uritemplates/utils_test.go | 105 + 735 files changed, 99288 insertions(+), 8605 deletions(-) delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/baserequest.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/baseresponse.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catindexinfo.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catindexinfo_test.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catnodeinfo.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catnodeinfo_test.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catresponses.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catshardinfo.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catshardinfo_test.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterhealth.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterhealthresponses.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodeshotthreads.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesinfo.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesinfo_test.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesshutdown.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesstats.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterreroute.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterstate.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterstatresponses.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterupdatesettings.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/connection.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/connection_test.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corebulk.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corebulk_test.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corebulkudp.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corecount.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coredelete.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coredeletebyquery.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreexample_test.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreexplain.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreget.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreindex.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coremget.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coremorelikethis.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coremsearch.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corepercolate.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corepercolate_test.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coresearch.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coresearch_test.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coretest_test.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreupdate.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corevalidate.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/error.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesaliases.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesanalyze.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesclearcache.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicescreateindex.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdeleteindex.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdeletemapping.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdeletemapping_test.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdoc.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesflush.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesgetsettings.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesindicesexists.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesopencloseindex.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesoptimize.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesputmapping.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesputmapping_test.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesputsettings.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesrefresh.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicessegments.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicessnapshot.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesstats.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesstatus.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicestemplates.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesupdatesettings.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/request.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/request_test.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchaggregate.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchaggregate_test.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchdsl.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfacet.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfacet_test.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfilter.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfilter_test.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchhighlight.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchhighlight_test.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchquery.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchreadme delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchsearch.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchsearch_test.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchsort.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/setup_test.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/shared.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/shared_test.go delete mode 100644 services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/snapshot.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/CHANGELOG-3.0.md create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/CONTRIBUTING.md create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/CONTRIBUTORS create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/LICENSE create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/README.md create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/backoff/LICENSE create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/backoff/backoff.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/backoff/backoff_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/backoff/retry.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/backoff/retry_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/bulk.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/bulk_delete_request.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/bulk_delete_request_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/bulk_index_request.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/bulk_index_request_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/bulk_processor.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/bulk_processor_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/bulk_request.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/bulk_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/bulk_update_request.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/bulk_update_request_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/canonicalize.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/canonicalize_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/clear_scroll.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/clear_scroll_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/client.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/client_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/cluster-test/Makefile create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/cluster-test/README.md create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/cluster-test/cluster-test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/cluster_health.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/cluster_health_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/cluster_state.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/cluster_state_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/cluster_stats.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/cluster_stats_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/config/elasticsearch.yml create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/connection.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/count.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/count_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/decoder.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/decoder_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/delete.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/delete_by_query.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/delete_by_query_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/delete_template.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/delete_template_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/delete_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/doc.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/errors.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/errors_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/example_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/exists.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/exists_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/explain.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/explain_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/fetch_source_context.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/fetch_source_context_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/geo_point.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/geo_point_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/get.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/get_template.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/get_template_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/get_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/highlight.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/highlight_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/index.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/index_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_close.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_close_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_create.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_create_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_delete.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_template.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_warmer.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_warmer_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_exists.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_template.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_template_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_type.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_type_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_flush.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_flush_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_forcemerge.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_forcemerge_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_get.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_get_aliases.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_get_aliases_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_get_mapping.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_get_mapping_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_get_settings.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_get_settings_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_get_template.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_get_template_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_get_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_get_warmer.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_get_warmer_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_open.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_open_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_put_alias.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_put_alias_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_put_mapping.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_put_mapping_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_put_settings.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_put_settings_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_put_template.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_put_warmer.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_put_warmer_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_refresh.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_refresh_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_stats.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_stats_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/inner_hit.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/inner_hit_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/logger.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/mget.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/mget_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/msearch.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/msearch_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/nodes_info.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/nodes_info_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/optimize.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/optimize_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/percolate.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/percolate_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/ping.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/ping_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/plugins.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/plugins_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/query.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/reindexer.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/reindexer_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/request.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/rescore.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/rescorer.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/response.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/scan.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/scan_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/script.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/script_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/scroll.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/scroll_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_children.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_children_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_histogram.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_histogram_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_range.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_range_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filter.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filter_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filters.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filters_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_geo_distance.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_geo_distance_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_global.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_global_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_histogram.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_histogram_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_missing.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_missing_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_nested.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_nested_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_range.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_range_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_sampler.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_sampler_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_significant_terms.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_significant_terms_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_terms.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_terms_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_avg.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_avg_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_cardinality.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_cardinality_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_extended_stats.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_extended_stats_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_geo_bounds.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_geo_bounds_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_max.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_max_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_min.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_min_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentiles.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentiles_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_stats.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_stats_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_sum.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_sum_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_top_hits.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_top_hits_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_value_count.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_value_count_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_script.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_script_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_derivative.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_derivative_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_max_bucket.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_max_bucket_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_min_bucket.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_min_bucket_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_mov_avg.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_mov_avg_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_serial_diff.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_serial_diff_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_bool.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_bool_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_boosting.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_boosting_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_common_terms.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_common_terms_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_constant_score.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_constant_score_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_dis_max.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_dis_max_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_exists.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_exists_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fsq.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fsq_score_funcs.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fsq_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fuzzy.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fuzzy_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_bounding_box.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_bounding_box_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_distance.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_distance_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_polygon.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_polygon_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_child.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_child_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_parent.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_parent_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_ids.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_ids_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_indices.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_indices_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match_all.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match_all_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_missing.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_missing_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_more_like_this.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_more_like_this_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_multi_match.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_multi_match_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_nested.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_nested_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_not.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_not_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_prefix.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_prefix_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_query_string.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_query_string_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_range.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_range_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_regexp.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_regexp_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_script.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_script_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_simple_query_string.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_simple_query_string_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_template_query.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_template_query_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_term.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_term_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_terms.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_terms_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_type.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_type_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_wildcard.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_wildcard_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_request.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_request_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_source.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_source_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_suggester_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_template.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_templates_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/setup_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/sort.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/sort_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggest.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggest_field.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggest_field_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggest_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion_fuzzy.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion_fuzzy_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester_context.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_category.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_category_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_geo.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_geo_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester_phrase.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester_phrase_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester_term.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester_term_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/termvectors.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/termvectors_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/update.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/update_test.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/LICENSE create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/uritemplates.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/utils.go create mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/utils_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/CHANGELOG-3.0.md create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/CONTRIBUTING.md create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/CONTRIBUTORS create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/LICENSE create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/README.md create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/backoff/LICENSE create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/backoff/backoff.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/backoff/backoff_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/backoff/retry.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/backoff/retry_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_delete_request.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_delete_request_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_index_request.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_index_request_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_processor.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_processor_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_request.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_update_request.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_update_request_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/canonicalize.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/canonicalize_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/clear_scroll.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/clear_scroll_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/client.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/client_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster-test/Makefile create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster-test/README.md create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster-test/cluster-test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster_health.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster_health_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster_state.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster_state_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster_stats.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster_stats_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/config/elasticsearch.yml create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/connection.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/count.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/count_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/decoder.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/decoder_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/delete.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/delete_by_query.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/delete_by_query_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/delete_template.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/delete_template_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/delete_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/doc.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/errors.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/errors_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/example_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/exists.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/exists_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/explain.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/explain_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/fetch_source_context.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/fetch_source_context_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/geo_point.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/geo_point_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/get.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/get_template.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/get_template_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/get_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/highlight.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/highlight_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/index.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/index_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_close.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_close_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_create.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_create_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_delete.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_delete_template.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_delete_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_delete_warmer.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_delete_warmer_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_exists.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_exists_template.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_exists_template_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_exists_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_exists_type.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_exists_type_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_flush.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_flush_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_forcemerge.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_forcemerge_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_aliases.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_aliases_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_mapping.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_mapping_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_settings.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_settings_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_template.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_template_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_warmer.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_warmer_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_open.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_open_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_alias.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_alias_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_mapping.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_mapping_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_settings.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_settings_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_template.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_warmer.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_warmer_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_refresh.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_refresh_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_stats.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_stats_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/inner_hit.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/inner_hit_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/logger.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/mget.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/mget_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/msearch.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/msearch_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/nodes_info.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/nodes_info_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/optimize.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/optimize_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/percolate.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/percolate_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/ping.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/ping_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/plugins.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/plugins_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/query.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/reindexer.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/reindexer_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/request.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/rescore.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/rescorer.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/response.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/scan.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/scan_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/script.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/script_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/scroll.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/scroll_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_children.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_children_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_histogram.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_histogram_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_range.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_range_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filter.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filter_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filters.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filters_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_geo_distance.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_geo_distance_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_global.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_global_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_histogram.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_histogram_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_missing.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_missing_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_nested.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_nested_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_range.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_range_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_sampler.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_sampler_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_significant_terms.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_significant_terms_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_terms.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_terms_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_avg.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_avg_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_cardinality.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_cardinality_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_extended_stats.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_extended_stats_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_geo_bounds.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_geo_bounds_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_max.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_max_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_min.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_min_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentile_ranks.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentile_ranks_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentiles.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentiles_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_stats.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_stats_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_sum.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_sum_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_top_hits.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_top_hits_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_value_count.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_value_count_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_avg_bucket.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_avg_bucket_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_script.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_script_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_selector.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_selector_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_cumulative_sum.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_cumulative_sum_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_derivative.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_derivative_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_max_bucket.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_max_bucket_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_min_bucket.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_min_bucket_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_mov_avg.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_mov_avg_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_serial_diff.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_serial_diff_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_sum_bucket.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_sum_bucket_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_bool.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_bool_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_boosting.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_boosting_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_common_terms.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_common_terms_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_constant_score.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_constant_score_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_dis_max.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_dis_max_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_exists.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_exists_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_fsq.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_fsq_score_funcs.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_fsq_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_fuzzy.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_fuzzy_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_geo_bounding_box.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_geo_bounding_box_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_geo_distance.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_geo_distance_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_geo_polygon.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_geo_polygon_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_has_child.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_has_child_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_has_parent.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_has_parent_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_ids.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_ids_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_indices.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_indices_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_match.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_match_all.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_match_all_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_match_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_missing.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_missing_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_more_like_this.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_more_like_this_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_multi_match.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_multi_match_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_nested.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_nested_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_not.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_not_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_prefix.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_prefix_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_query_string.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_query_string_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_range.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_range_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_regexp.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_regexp_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_script.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_script_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_simple_query_string.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_simple_query_string_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_template_query.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_template_query_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_term.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_term_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_terms.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_terms_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_type.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_type_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_wildcard.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_wildcard_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_request.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_request_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_source.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_source_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_suggester_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_template.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_templates_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/setup_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/sort.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/sort_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggest.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggest_field.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggest_field_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggest_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_completion.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_completion_fuzzy.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_completion_fuzzy_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_completion_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_context.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_context_category.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_context_category_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_context_geo.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_context_geo_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_phrase.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_phrase_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_term.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_term_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/termvectors.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/termvectors_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/update.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/update_test.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/uritemplates/LICENSE create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/uritemplates/uritemplates.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/uritemplates/utils.go create mode 100644 services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/uritemplates/utils_test.go diff --git a/services/templeton/vendor/manifest b/services/templeton/vendor/manifest index 2d643f23b..52120fd92 100644 --- a/services/templeton/vendor/manifest +++ b/services/templeton/vendor/manifest @@ -19,19 +19,24 @@ "revision": "69552e54d2a9d4c6a2438926a774930f7bc398ec", "branch": "master" }, - { - "importpath": "github.com/mattbaird/elastigo/lib", - "repository": "https://github.com/mattbaird/elastigo", - "revision": "7dc47d261c9718f93000e2ed2b0b94b13d078e77", - "branch": "master", - "path": "/lib" - }, { "importpath": "github.com/olebedev/config", "repository": "https://github.com/olebedev/config", "revision": "e3edea7d68b76222b5118cc2e1cf3825e30abb80", "branch": "master" }, + { + "importpath": "github.com/olivere/elastic", + "repository": "https://github.com/olivere/elastic", + "revision": "a35245a5e2ecff49265ce16b1650cb8eccc3aea5", + "branch": "release-branch.v3" + }, + { + "importpath": "gopkg.in/olivere/elastic.v3", + "repository": "https://gopkg.in/olivere/elastic.v3", + "revision": "6d1954a560f885207ff84c32ed0aaaebf11ec599", + "branch": "master" + }, { "importpath": "gopkg.in/yaml.v2", "repository": "https://gopkg.in/yaml.v2", diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/baserequest.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/baserequest.go deleted file mode 100644 index f9a9dcbff..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/baserequest.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "log" - "time" -) - -func (c *Conn) DoCommand(method string, url string, args map[string]interface{}, data interface{}) ([]byte, error) { - var response map[string]interface{} - var body []byte - var httpStatusCode int - - query, err := Escape(args) - if err != nil { - return nil, err - } - req, err := c.NewRequest(method, url, query) - if err != nil { - return body, err - } - - if data != nil { - switch v := data.(type) { - case string: - req.SetBodyString(v) - case io.Reader: - req.SetBody(v) - case []byte: - req.SetBodyBytes(v) - default: - err = req.SetBodyJson(v) - if err != nil { - return body, err - } - } - } - - // uncomment this to print out the request that hits the wire - // (requires net/http/httputil) - //reqbuf, err := httputil.DumpRequest(req.Request, true) - //log.Println(fmt.Sprintf("\n========= req:\nURL: %s\n%s", req.URL, bytes.NewBuffer(reqbuf).String())) - - // Copy request body for tracer - if c.RequestTracer != nil { - rbody := "" - if req.Body != nil { - requestBody, err := ioutil.ReadAll(req.Body) - if err != nil { - return body, err - } - - req.SetBody(bytes.NewReader(requestBody)) - rbody = string(requestBody) - } - c.RequestTracer(req.Method, req.URL.String(), rbody) - } - - httpStatusCode, body, err = req.Do(&response) - if err != nil { - return body, err - } - if httpStatusCode > 304 { - - jsonErr := json.Unmarshal(body, &response) - if jsonErr == nil { - if res_err, ok := response["error"]; ok { - status, _ := response["status"] - return body, ESError{time.Now(), fmt.Sprintf("Error [%s] Status [%v]", res_err, status), httpStatusCode} - } - } - return body, jsonErr - } - return body, nil -} - -// ESError is an error implementation that includes a time, message, and code. -type ESError struct { - When time.Time - What string - Code int -} - -func (e ESError) Error() string { - return fmt.Sprintf("%v: %v [%v]", e.When, e.What, e.Code) -} - -// Exists allows the caller to check for the existence of a document using HEAD -// This appears to be broken in the current version of elasticsearch 0.19.10, currently -// returning nothing -func (c *Conn) Exists(index string, _type string, id string, args map[string]interface{}) (BaseResponse, error) { - var response map[string]interface{} - var body []byte - var url string - var retval BaseResponse - var httpStatusCode int - - query, err := Escape(args) - if err != nil { - return retval, err - } - - if len(_type) > 0 { - url = fmt.Sprintf("/%s/%s/%s", index, _type, id) - } else { - url = fmt.Sprintf("/%s/%s", index, id) - } - req, err := c.NewRequest("HEAD", url, query) - if err != nil { - // some sort of generic error handler - } - httpStatusCode, body, err = req.Do(&response) - if httpStatusCode > 304 { - if error, ok := response["error"]; ok { - status, _ := response["status"] - log.Printf("Error: %v (%v)\n", error, status) - } - } else { - // marshall into json - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - log.Println(jsonErr) - } - } - return retval, err -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/baseresponse.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/baseresponse.go deleted file mode 100644 index 41770dfa9..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/baseresponse.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" - "strconv" - "strings" -) - -type BaseResponse struct { - Ok bool `json:"ok"` - Index string `json:"_index,omitempty"` - Type string `json:"_type,omitempty"` - Id string `json:"_id,omitempty"` - Source *json.RawMessage `json:"_source,omitempty"` // depends on the schema you've defined - Version int `json:"_version,omitempty"` - Found bool `json:"found,omitempty"` - Exists bool `json:"exists,omitempty"` - Created bool `json:"created,omitempty"` - Matches []string `json:"matches,omitempty"` // percolate matches -} - -// StatusInt is required because /_optimize, at least, returns its status as -// strings instead of integers. -type StatusInt int - -func (self *StatusInt) UnmarshalJSON(b []byte) error { - s := "" - if json.Unmarshal(b, &s) == nil { - if i, err := strconv.Atoi(s); err == nil { - *self = StatusInt(i) - return nil - } - } - i := 0 - err := json.Unmarshal(b, &i) - if err == nil { - *self = StatusInt(i) - } - return err -} - -func (self *StatusInt) MarshalJSON() ([]byte, error) { - return json.Marshal(*self) -} - -// StatusBool is required because /_optimize, at least, returns its status as -// strings instead of booleans. -type StatusBool bool - -func (self *StatusBool) UnmarshalJSON(b []byte) error { - s := "" - if json.Unmarshal(b, &s) == nil { - switch s { - case "true": - *self = StatusBool(true) - return nil - case "false": - *self = StatusBool(false) - return nil - default: - } - } - b2 := false - err := json.Unmarshal(b, &b2) - if err == nil { - *self = StatusBool(b2) - } - return err -} - -func (self *StatusBool) MarshalJSON() ([]byte, error) { - return json.Marshal(*self) -} - -type Status struct { - Total StatusInt `json:"total"` - Successful StatusInt `json:"successful"` - Failed StatusInt `json:"failed"` - Failures []Failure `json:"failures,omitempty"` -} - -type Failure struct { - Index string `json:"index"` - Shard StatusInt `json:"shard"` - Reason string `json:"reason"` -} - -func (f Failure) String() string { - return fmt.Sprintf("Failed on shard %d on index %s:\n%s", f.Shard, f.Index, f.Reason) -} - -// failures is a convenience type to allow []Failure formated easily in the -// library -type failures []Failure - -func (f failures) String() string { - message := make([]string, len(f)) - for i, failure := range f { - message[i] = failure.String() - } - return strings.Join(message, "\n") -} - -type ExtendedStatus struct { - Ok StatusBool `json:"ok"` - ShardsStatus Status `json:"_shards"` -} - -type MatchRes struct { - Index string `json:"_index"` - Id string `json:"_id"` -} - -type Match struct { - OK bool `json:"ok"` - Matches []MatchRes `json:"matches"` - Explanation *Explanation `json:"explanation,omitempty"` -} - -type Explanation struct { - Value float32 `json:"value"` - Description string `json:"description"` - Details []*Explanation `json:"details,omitempty"` -} - -func ScrollDuration(duration string) string { - scrollString := "" - if duration != "" { - scrollString = "&scroll=" + duration - } - return scrollString -} - -// http://www.elasticsearch.org/guide/reference/api/search/search-type/ diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catindexinfo.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catindexinfo.go deleted file mode 100644 index 44ca2d80c..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catindexinfo.go +++ /dev/null @@ -1,80 +0,0 @@ -package elastigo - -import ( - "errors" - "strconv" - "strings" -) - -var ErrInvalidIndexLine = errors.New("Cannot parse indexline") - -//Create an IndexInfo from the string _cat/indices would produce -//EX: health status index pri rep docs.count docs.deleted store.size pri.store.size -//green open logs-2015-06-19 2 0 135389346 0 53048922233 53048922233 -func NewCatIndexInfo(indexLine string) (catIndex *CatIndexInfo, err error) { - split := strings.Fields(indexLine) - if len(split) < 5 { - return nil, ErrInvalidIndexLine - } - catIndex = &CatIndexInfo{} - catIndex.Store = CatIndexStore{} - catIndex.Docs = CatIndexDocs{} - catIndex.Health = split[0] - catIndex.Status = split[1] - catIndex.Name = split[2] - catIndex.Shards, err = strconv.Atoi(split[3]) - if err != nil { - catIndex.Shards = 0 - } - catIndex.Replicas, err = strconv.Atoi(split[4]) - if err != nil { - catIndex.Replicas = 0 - } - if len(split) == 5 { - return catIndex, nil - } - catIndex.Docs.Count, err = strconv.ParseInt(split[5], 10, 64) - if err != nil { - catIndex.Docs.Count = 0 - } - if len(split) == 6 { - return catIndex, nil - } - catIndex.Docs.Deleted, err = strconv.ParseInt(split[6], 10, 64) - if err != nil { - catIndex.Docs.Deleted = 0 - } - if len(split) == 7 { - return catIndex, nil - } - catIndex.Store.Size, err = strconv.ParseInt(split[7], 10, 64) - if err != nil { - catIndex.Store.Size = 0 - } - if len(split) == 8 { - return catIndex, nil - } - catIndex.Store.PriSize, err = strconv.ParseInt(split[8], 10, 64) - if err != nil { - catIndex.Store.PriSize = 0 - } - return catIndex, nil -} - -// Pull all the index info from the connection -func (c *Conn) GetCatIndexInfo(pattern string) (catIndices []CatIndexInfo) { - catIndices = make([]CatIndexInfo, 0) - //force it to only show the fileds we know about - args := map[string]interface{}{"bytes": "b", "h": "health,status,index,pri,rep,docs.count,docs.deleted,store.size,pri.store.size"} - indices, err := c.DoCommand("GET", "/_cat/indices/"+pattern, args, nil) - if err == nil { - indexLines := strings.Split(string(indices[:]), "\n") - for _, index := range indexLines { - ci, _ := NewCatIndexInfo(index) - if nil != ci { - catIndices = append(catIndices, *ci) - } - } - } - return catIndices -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catindexinfo_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catindexinfo_test.go deleted file mode 100644 index d09f6cd92..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catindexinfo_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package elastigo - -import ( - . "github.com/smartystreets/goconvey/convey" - "testing" -) - -func TestCatIndexInfo(t *testing.T) { - Convey("Create index line from a broken index listing", t, func() { - _, err := NewCatIndexInfo("red ") - So(err, ShouldNotBeNil) - }) - Convey("catIndex Create index line from a bad shards index listing", t, func() { - i, err := NewCatIndexInfo("green open logs-2015-06-19 2 1 135389346 20 53048922233 53048922233") - So(err, ShouldBeNil) - So(i.Health, ShouldEqual, "green") - So(i.Status, ShouldEqual, "open") - So(i.Name, ShouldEqual, "logs-2015-06-19") - So(i.Shards, ShouldEqual, 2) - So(i.Replicas, ShouldEqual, 1) - So(i.Docs.Count, ShouldEqual, 135389346) - So(i.Docs.Deleted, ShouldEqual, 20) - So(i.Store.Size, ShouldEqual, 53048922233) - So(i.Store.PriSize, ShouldEqual, 53048922233) - }) - Convey("catIndex Create index line from a bad replicas index listing", t, func() { - i, err := NewCatIndexInfo("red open foo-2000-01-01-bar 2 0 1234 3 11000 13000") - So(err, ShouldBeNil) - So(i.Health, ShouldEqual, "red") - So(i.Status, ShouldEqual, "open") - So(i.Name, ShouldEqual, "foo-2000-01-01-bar") - So(i.Shards, ShouldEqual, 2) - So(i.Replicas, ShouldEqual, 0) - So(i.Docs.Count, ShouldEqual, 1234) - So(i.Docs.Deleted, ShouldEqual, 3) - So(i.Store.Size, ShouldEqual, 11000) - So(i.Store.PriSize, ShouldEqual, 13000) - }) - Convey("catIndex Create index line from a complete index listing", t, func() { - i, err := NewCatIndexInfo("red closed foo-2000-01-01-bar 2 1 1234 3 11000 13000") - So(err, ShouldBeNil) - So(i.Status, ShouldEqual, "closed") - So(i.Health, ShouldEqual, "red") - So(i.Name, ShouldEqual, "foo-2000-01-01-bar") - So(i.Shards, ShouldEqual, 2) - So(i.Replicas, ShouldEqual, 1) - So(i.Docs.Count, ShouldEqual, 1234) - So(i.Docs.Deleted, ShouldEqual, 3) - So(i.Store.Size, ShouldEqual, 11000) - So(i.Store.PriSize, ShouldEqual, 13000) - }) - Convey("catIndex Create index line from a bad docs index listing", t, func() { - i, err := NewCatIndexInfo("red open foo-2000-01-01-bar 2 1 a 3 11000 13000") - So(err, ShouldBeNil) - So(i.Health, ShouldEqual, "red") - So(i.Status, ShouldEqual, "open") - So(i.Name, ShouldEqual, "foo-2000-01-01-bar") - So(i.Shards, ShouldEqual, 2) - So(i.Replicas, ShouldEqual, 1) - So(i.Docs.Count, ShouldEqual, 0) - So(i.Docs.Deleted, ShouldEqual, 3) - So(i.Store.Size, ShouldEqual, 11000) - So(i.Store.PriSize, ShouldEqual, 13000) - }) - Convey("catIndex Create index line from a bad deletes index listing", t, func() { - i, err := NewCatIndexInfo("red open foo-2000-01-01-bar 2 1 1234 a 11000 13000") - So(err, ShouldBeNil) - So(i.Health, ShouldEqual, "red") - So(i.Status, ShouldEqual, "open") - So(i.Name, ShouldEqual, "foo-2000-01-01-bar") - So(i.Shards, ShouldEqual, 2) - So(i.Replicas, ShouldEqual, 1) - So(i.Docs.Count, ShouldEqual, 1234) - So(i.Docs.Deleted, ShouldEqual, 0) - So(i.Store.Size, ShouldEqual, 11000) - So(i.Store.PriSize, ShouldEqual, 13000) - }) - Convey("catIndex Create index line from a kinda short index listing", t, func() { - i, err := NewCatIndexInfo("red open foo-2000-01-01-bar 2 1 1234") - So(err, ShouldBeNil) - So(i.Health, ShouldEqual, "red") - So(i.Status, ShouldEqual, "open") - So(i.Name, ShouldEqual, "foo-2000-01-01-bar") - So(i.Shards, ShouldEqual, 2) - So(i.Replicas, ShouldEqual, 1) - So(i.Docs.Count, ShouldEqual, 1234) - So(i.Docs.Deleted, ShouldEqual, 0) - So(i.Store.Size, ShouldEqual, 0) - So(i.Store.PriSize, ShouldEqual, 0) - }) - Convey("catIndex Create index line from a kinda sorta short index listing", t, func() { - i, err := NewCatIndexInfo("red open foo-2000-01-01-bar 2 1 1234 3") - So(err, ShouldBeNil) - So(i.Health, ShouldEqual, "red") - So(i.Status, ShouldEqual, "open") - So(i.Name, ShouldEqual, "foo-2000-01-01-bar") - So(i.Shards, ShouldEqual, 2) - So(i.Replicas, ShouldEqual, 1) - So(i.Docs.Count, ShouldEqual, 1234) - So(i.Docs.Deleted, ShouldEqual, 3) - So(i.Store.Size, ShouldEqual, 0) - So(i.Store.PriSize, ShouldEqual, 0) - }) - Convey("catIndex Create index line from a short index listing", t, func() { - i, err := NewCatIndexInfo("red open foo-2000-01-01-bar 2 1") - So(err, ShouldBeNil) - So(i.Health, ShouldEqual, "red") - So(i.Status, ShouldEqual, "open") - So(i.Name, ShouldEqual, "foo-2000-01-01-bar") - So(i.Shards, ShouldEqual, 2) - So(i.Replicas, ShouldEqual, 1) - So(i.Docs.Count, ShouldEqual, 0) - So(i.Docs.Deleted, ShouldEqual, 0) - So(i.Store.Size, ShouldEqual, 0) - So(i.Store.PriSize, ShouldEqual, 0) - }) -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catnodeinfo.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catnodeinfo.go deleted file mode 100644 index cee737748..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catnodeinfo.go +++ /dev/null @@ -1,249 +0,0 @@ -package elastigo - -import ( - "fmt" - "strconv" - "strings" -) - - - -// newCatNodeInfo returns an instance of CatNodeInfo populated with the -// the information in the cat output indexLine which contains the -// specified fields. An err is returned if a field is not known. -func newCatNodeInfo(fields []string, indexLine string) (catNode *CatNodeInfo, err error) { - - split := strings.Fields(indexLine) - catNode = &CatNodeInfo{} - - // Check the fields length compared to the number of stats - lf, ls := len(fields), len(split) - if lf > ls { - return nil, fmt.Errorf("Number of fields (%d) greater than number of stats (%d)", lf, ls) - } - - // Populate the apropriate field in CatNodeInfo - for i, field := range fields { - - switch field { - case "id", "nodeId": - catNode.Id = split[i] - case "pid", "p": - catNode.PID = split[i] - case "host", "h": - catNode.Host = split[i] - case "ip", "i": - catNode.IP = split[i] - case "port", "po": - catNode.Port = split[i] - case "version", "v": - catNode.Version = split[i] - case "build", "b": - catNode.Build = split[i] - case "jdk", "j": - catNode.JDK = split[i] - case "disk.avail", "d", "disk", "diskAvail": - catNode.DiskAvail = split[i] - case "heap.current", "hc", "heapCurrent": - catNode.HeapCur = split[i] - case "heap.percent", "hp", "heapPercent": - catNode.HeapPerc = split[i] - case "heap.max", "hm", "heapMax": - catNode.HeapMax = split[i] - case "ram.current", "rc", "ramCurrent": - catNode.RamCur = split[i] - case "ram.percent", "rp", "ramPercent": - val, err := strconv.Atoi(split[i]) - if err != nil { - return nil, err - } - catNode.RamPerc = int16(val) - case "ram.max", "rm", "ramMax": - catNode.RamMax = split[i] - case "file_desc.current", "fdc", "fileDescriptorCurrent": - catNode.FileDescCur = split[i] - case "file_desc.percent", "fdp", "fileDescriptorPercent": - catNode.FileDescPerc = split[i] - case "file_desc.max", "fdm", "fileDescriptorMax": - catNode.FileDescMax = split[i] - case "load", "l": - catNode.Load = split[i] - case "uptime", "u": - catNode.UpTime = split[i] - case "node.role", "r", "role", "dc", "nodeRole": - catNode.NodeRole = split[i] - case "master", "m": - catNode.Master = split[i] - case "name", "n": - catNode.Name = strings.Join(split[i:], " ") - case "completion.size", "cs", "completionSize": - catNode.CmpltSize = split[i] - case "fielddata.memory_size", "fm", "fielddataMemory": - val, err := strconv.Atoi(split[i]) - if err != nil { - return nil, err - } - catNode.FieldMem = val - case "fielddata.evictions", "fe", "fieldataEvictions": - val, err := strconv.Atoi(split[i]) - if err != nil { - return nil, err - } - catNode.FieldEvict = val - case "filter_cache.memory_size", "fcm", "filterCacheMemory": - val, err := strconv.Atoi(split[i]) - if err != nil { - return nil, err - } - catNode.FiltMem = val - case "filter_cache.evictions", "fce", "filterCacheEvictions": - val, err := strconv.Atoi(split[i]) - if err != nil { - return nil, err - } - catNode.FiltEvict = val - case "flush.total", "ft", "flushTotal": - val, err := strconv.Atoi(split[i]) - if err != nil { - return nil, err - } - catNode.FlushTotal = val - case "flush.total_time", "ftt", "flushTotalTime": - catNode.FlushTotalTime = split[i] - case "get.current", "gc", "getCurrent": - catNode.GetCur = split[i] - case "get.time", "gti", "getTime": - catNode.GetTime = split[i] - case "get.total", "gto", "getTotal": - catNode.GetTotal = split[i] - case "get.exists_time", "geti", "getExistsTime": - catNode.GetExistsTime = split[i] - case "get.exists_total", "geto", "getExistsTotal": - catNode.GetExistsTotal = split[i] - case "get.missing_time", "gmti", "getMissingTime": - catNode.GetMissingTime = split[i] - case "get.missing_total", "gmto", "getMissingTotal": - catNode.GetMissingTotal = split[i] - case "id_cache.memory_size", "im", "idCacheMemory": - val, err := strconv.Atoi(split[i]) - if err != nil { - return nil, err - } - catNode.IDCacheMemory = val - case "indexing.delete_current", "idc", "indexingDeleteCurrent": - catNode.IdxDelCur = split[i] - case "indexing.delete_time", "idti", "indexingDeleteime": - catNode.IdxDelTime = split[i] - case "indexing.delete_total", "idto", "indexingDeleteTotal": - catNode.IdxDelTotal = split[i] - case "indexing.index_current", "iic", "indexingIndexCurrent": - catNode.IdxIdxCur = split[i] - case "indexing.index_time", "iiti", "indexingIndexTime": - catNode.IdxIdxTime = split[i] - case "indexing.index_total", "iito", "indexingIndexTotal": - catNode.IdxIdxTotal = split[i] - case "merges.current", "mc", "mergesCurrent": - catNode.MergCur = split[i] - case "merges.current_docs", "mcd", "mergesCurrentDocs": - catNode.MergCurDocs = split[i] - case "merges.current_size", "mcs", "mergesCurrentSize": - catNode.MergCurSize = split[i] - case "merges.total", "mt", "mergesTotal": - catNode.MergTotal = split[i] - case "merges.total_docs", "mtd", "mergesTotalDocs": - catNode.MergTotalDocs = split[i] - case "merges.total_size", "mts", "mergesTotalSize": - catNode.MergTotalSize = split[i] - case "merges.total_time", "mtt", "mergesTotalTime": - catNode.MergTotalTime = split[i] - case "percolate.current", "pc", "percolateCurrent": - catNode.PercCur = split[i] - case "percolate.memory_size", "pm", "percolateMemory": - catNode.PercMem = split[i] - case "percolate.queries", "pq", "percolateQueries": - catNode.PercQueries = split[i] - case "percolate.time", "pti", "percolateTime": - catNode.PercTime = split[i] - case "percolate.total", "pto", "percolateTotal": - catNode.PercTotal = split[i] - case "refesh.total", "rto", "refreshTotal": - catNode.RefreshTotal = split[i] - case "refresh.time", "rti", "refreshTime": - catNode.RefreshTime = split[i] - case "search.fetch_current", "sfc", "searchFetchCurrent": - catNode.SearchFetchCur = split[i] - case "search.fetch_time", "sfti", "searchFetchTime": - catNode.SearchFetchTime = split[i] - case "search.fetch_total", "sfto", "searchFetchTotal": - catNode.SearchFetchTotal = split[i] - case "search.open_contexts", "so", "searchOpenContexts": - catNode.SearchOpenContexts = split[i] - case "search.query_current", "sqc", "searchQueryCurrent": - catNode.SearchQueryCur = split[i] - case "search.query_time", "sqti", "searchQueryTime": - catNode.SearchQueryTime = split[i] - case "search.query_total", "sqto", "searchQueryTotal": - catNode.SearchQueryTotal = split[i] - case "segments.count", "sc", "segmentsCount": - catNode.SegCount = split[i] - case "segments.memory", "sm", "segmentsMemory": - catNode.SegMem = split[i] - case "segments.index_writer_memory", "siwm", "segmentsIndexWriterMemory": - catNode.SegIdxWriterMem = split[i] - case "segments.index_writer_max_memory", "siwmx", "segmentsIndexWriterMaxMemory": - catNode.SegIdxWriterMax = split[i] - case "segments.version_map_memory", "svmm", "segmentsVersionMapMemory": - catNode.SegVerMapMem = split[i] - default: - return nil, fmt.Errorf("Invalid cat nodes field: %s", field) - } - } - - return catNode, nil -} - -// GetCatNodeInfo issues an elasticsearch cat nodes request with the specified -// fields and returns a list of CatNodeInfos, one for each node, whose requested -// members are populated with statistics. If fields is nil or empty, the default -// cat output is used. -// NOTE: if you include the name field, make sure it is the last field in the -// list, because name values can contain spaces which screw up the parsing -func (c *Conn) GetCatNodeInfo(fields []string) (catNodes []CatNodeInfo, err error) { - - catNodes = make([]CatNodeInfo, 0) - - // If no fields have been specified, use the "default" arrangement - if len(fields) < 1 { - fields = []string{"host", "ip", "heap.percent", "ram.percent", "load", - "node.role", "master", "name"} - } - - // Issue a request for stats on the requested fields - args := map[string]interface{}{ - "bytes": "b", - "h": strings.Join(fields, ","), - } - indices, err := c.DoCommand("GET", "/_cat/nodes/", args, nil) - if err != nil { - return catNodes, err - } - - // Create a CatIndexInfo for each line in the response - indexLines := strings.Split(string(indices[:]), "\n") - for _, index := range indexLines { - - // Ignore empty output lines - if len(index) < 1 { - continue - } - - // Create a CatNodeInfo and append it to the result - info, err := newCatNodeInfo(fields, index) - if info != nil { - catNodes = append(catNodes, *info) - } else if err != nil { - return catNodes, err - } - } - return catNodes, nil -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catnodeinfo_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catnodeinfo_test.go deleted file mode 100644 index 441de2442..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catnodeinfo_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package elastigo - -import ( - . "github.com/smartystreets/goconvey/convey" - "testing" -) - -func TestCatNode(t *testing.T) { - - c := NewTestConn() - - Convey("Basic cat nodes", t, func() { - - fields := []string{"fm", "fe", "fcm", "fce", "ft", "ftt", "im", "rp", "n"} - catNodes, err := c.GetCatNodeInfo(fields) - - So(err, ShouldBeNil) - So(catNodes, ShouldNotBeNil) - So(len(catNodes), ShouldBeGreaterThan, 0) - - for _, catNode := range catNodes { - So(catNode.FieldMem, ShouldNotBeEmpty) - So(catNode.FiltMem, ShouldNotBeEmpty) - So(catNode.IDCacheMemory, ShouldNotBeEmpty) - So(catNode.RamPerc, ShouldNotBeEmpty) - So(catNode.Name, ShouldNotBeEmpty) - } - }) - - Convey("Cat nodes with default arguments", t, func() { - - fields := []string{} - catNodes, err := c.GetCatNodeInfo(fields) - - So(err, ShouldBeNil) - So(catNodes, ShouldNotBeNil) - So(len(catNodes), ShouldBeGreaterThan, 0) - - for _, catNode := range catNodes { - So(catNode.Host, ShouldNotBeEmpty) - So(catNode.IP, ShouldNotBeEmpty) - So(catNode.NodeRole, ShouldNotBeEmpty) - So(catNode.Name, ShouldNotBeEmpty) - } - }) - - Convey("Invalid field error behavior", t, func() { - - fields := []string{"fm", "bogus"} - catNodes, err := c.GetCatNodeInfo(fields) - - So(err, ShouldNotBeNil) - - for _, catNode := range catNodes { - So(catNode.FieldMem, ShouldNotBeEmpty) - } - }) -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catresponses.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catresponses.go deleted file mode 100644 index 17129549f..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catresponses.go +++ /dev/null @@ -1,105 +0,0 @@ -package elastigo - -type CatIndexInfo struct { - Health string - Status string - Name string - Shards int - Replicas int - Docs CatIndexDocs - Store CatIndexStore -} - -type CatIndexDocs struct { - Count int64 - Deleted int64 -} - -type CatIndexStore struct { - Size int64 - PriSize int64 -} - -type CatShardInfo struct { - IndexName string - Shard int - Primary string - State string - Docs int64 - Store int64 - NodeIP string - NodeName string -} - -type CatNodeInfo struct { - Id string - PID string - Host string - IP string - Port string - Version string - Build string - JDK string - DiskAvail string - HeapCur string - HeapPerc string - HeapMax string - RamCur string - RamPerc int16 - RamMax string - FileDescCur string - FileDescPerc string - FileDescMax string - Load string - UpTime string - NodeRole string - Master string - Name string - CmpltSize string - FieldMem int - FieldEvict int - FiltMem int - FiltEvict int - FlushTotal int - FlushTotalTime string - GetCur string - GetTime string - GetTotal string - GetExistsTime string - GetExistsTotal string - GetMissingTime string - GetMissingTotal string - IDCacheMemory int - IdxDelCur string - IdxDelTime string - IdxDelTotal string - IdxIdxCur string - IdxIdxTime string - IdxIdxTotal string - MergCur string - MergCurDocs string - MergCurSize string - MergTotal string - MergTotalDocs string - MergTotalSize string - MergTotalTime string - PercCur string - PercMem string - PercQueries string - PercTime string - PercTotal string - RefreshTotal string - RefreshTime string - SearchFetchCur string - SearchFetchTime string - SearchFetchTotal string - SearchOpenContexts string - SearchQueryCur string - SearchQueryTime string - SearchQueryTotal string - SegCount string - SegMem string - SegIdxWriterMem string - SegIdxWriterMax string - SegVerMapMem string -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catshardinfo.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catshardinfo.go deleted file mode 100644 index c93366b94..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catshardinfo.go +++ /dev/null @@ -1,106 +0,0 @@ -package elastigo - -import ( - "bytes" - "errors" - "fmt" - "strconv" - "strings" -) - -type CatShards []CatShardInfo - -// Stringify the shards -func (s *CatShards) String() string { - var buffer bytes.Buffer - - if s != nil { - for _, cs := range *s { - buffer.WriteString(fmt.Sprintf("%v\n", cs)) - } - } - return buffer.String() -} - -var ErrInvalidShardLine = errors.New("Cannot parse shardline") - -// Create a CatShard from a line of the raw output of a _cat/shards -func NewCatShardInfo(rawCat string) (catshard *CatShardInfo, err error) { - - split := strings.Fields(rawCat) - if len(split) < 4 { - return nil, ErrInvalidShardLine - } - catshard = &CatShardInfo{} - catshard.IndexName = split[0] - catshard.Shard, err = strconv.Atoi(split[1]) - if err != nil { - catshard.Shard = -1 - } - catshard.Primary = split[2] - catshard.State = split[3] - if len(split) == 4 { - return catshard, nil - } - - catshard.Docs, err = strconv.ParseInt(split[4], 10, 64) - if err != nil { - catshard.Docs = 0 - } - if len(split) == 5 { - return catshard, nil - } - catshard.Store, err = strconv.ParseInt(split[5], 10, 64) - if err != nil { - catshard.Store = 0 - } - if len(split) == 6 { - return catshard, nil - } - catshard.NodeIP = split[6] - if len(split) == 7 { - return catshard, nil - } - catshard.NodeName = split[7] - if len(split) > 8 { - loop: - for i, moreName := range split { - if i > 7 { - if moreName == "->" { - break loop - } - catshard.NodeName += " " - catshard.NodeName += moreName - } - } - } - - return catshard, nil -} - -// Print shard info -func (s *CatShardInfo) String() string { - if s == nil { - return ":::::::" - } - return fmt.Sprintf("%v:%v:%v:%v:%v:%v:%v:%v", s.IndexName, s.Shard, s.Primary, - s.State, s.Docs, s.Store, s.NodeIP, s.NodeName) -} - -// Get all the shards, even the bad ones -func (c *Conn) GetCatShards() (shards CatShards) { - shards = make(CatShards, 0) - //force it to only respond with the columns we know about and in a forced order - args := map[string]interface{}{"bytes": "b", "h": "index,shard,prirep,state,docs,store,ip,node"} - s, err := c.DoCommand("GET", "/_cat/shards", args, nil) - if err == nil { - catShardLines := strings.Split(string(s[:]), "\n") - for _, shardLine := range catShardLines { - shard, _ := NewCatShardInfo(shardLine) - if nil != shard { - shards = append(shards, *shard) - } - } - } - return shards -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catshardinfo_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catshardinfo_test.go deleted file mode 100644 index dd6aaaa4f..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/catshardinfo_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package elastigo - -import ( - . "github.com/smartystreets/goconvey/convey" - "testing" -) - -func TestCatShardInfo(t *testing.T) { - Convey("Create cat shard from started shard", t, func() { - c, err := NewCatShardInfo("foo-2000-01-01-bar 0 p STARTED 1234 121 127.0.0.1 Ultra Man") - So(err, ShouldBeNil) - So(c, ShouldNotBeNil) - So(c.IndexName, ShouldEqual, "foo-2000-01-01-bar") - So(c.Primary, ShouldEqual, "p") - So(c.State, ShouldEqual, "STARTED") - So(c.Docs, ShouldEqual, 1234) - So(c.Store, ShouldEqual, 121) - So(c.NodeIP, ShouldEqual, "127.0.0.1") - So(c.NodeName, ShouldEqual, "Ultra Man") - - }) - Convey("Create cat shard from realocating shard", t, func() { - c, err := NewCatShardInfo("foo-2000-01-01-bar 0 p RELOCATING 1234 121 127.0.0.1 Ultra Man -> 10.0.0.1 Super Man") - So(err, ShouldBeNil) - So(c, ShouldNotBeNil) - So(c.IndexName, ShouldEqual, "foo-2000-01-01-bar") - So(c.Primary, ShouldEqual, "p") - So(c.State, ShouldEqual, "RELOCATING") - So(c.Docs, ShouldEqual, 1234) - So(c.Store, ShouldEqual, 121) - So(c.NodeIP, ShouldEqual, "127.0.0.1") - So(c.NodeName, ShouldEqual, "Ultra Man") - }) - Convey("Create cat shard from unallocated shard", t, func() { - c, err := NewCatShardInfo("foo-2000-01-01-bar 0 p UNASSIGNED") - So(err, ShouldBeNil) - So(c, ShouldNotBeNil) - So(c.IndexName, ShouldEqual, "foo-2000-01-01-bar") - So(c.Primary, ShouldEqual, "p") - So(c.State, ShouldEqual, "UNASSIGNED") - So(c.Docs, ShouldEqual, 0) - So(c.Store, ShouldEqual, 0) - So(c.NodeIP, ShouldEqual, "") - So(c.NodeName, ShouldEqual, "") - }) - Convey("Create cat shard from invalid shard", t, func() { - c, err := NewCatShardInfo("foo-2000-01-01-bar 0 p") - So(err, ShouldEqual, ErrInvalidShardLine) - So(c, ShouldBeNil) - }) - Convey("Create cat shard from garbled shard", t, func() { - c, err := NewCatShardInfo("foo-2000-01-01-bar a p STARTED abc 121 127.0.0.1 Ultra Man") - So(err, ShouldBeNil) - So(c, ShouldNotBeNil) - So(c.Shard, ShouldEqual, -1) - So(c.IndexName, ShouldEqual, "foo-2000-01-01-bar") - So(c.Primary, ShouldEqual, "p") - So(c.State, ShouldEqual, "STARTED") - So(c.Docs, ShouldEqual, 0) - So(c.Store, ShouldEqual, 121) - So(c.NodeIP, ShouldEqual, "127.0.0.1") - So(c.NodeName, ShouldEqual, "Ultra Man") - }) - Convey("Print cat shard from started shard", t, func() { - c, _ := NewCatShardInfo("foo-2000-01-01-bar 0 p STARTED 1234 121 127.0.0.1 Ultra Man") - s := c.String() - So(s, ShouldContainSubstring, "foo-2000-01-01-bar:") - So(s, ShouldContainSubstring, ":Ultra Man") - c = nil - s = c.String() - So(s, ShouldEqual, ":::::::") - }) - Convey("Print cat shard from short shard", t, func() { - c, _ := NewCatShardInfo("foo-2000-01-01-bar 0 p STARTED 1234") - s := c.String() - So(s, ShouldContainSubstring, "foo-2000-01-01-bar:0:p:STARTED:1234") - c, _ = NewCatShardInfo("foo-2000-01-01-bar 0 p STARTED 1234 121") - s = c.String() - So(s, ShouldContainSubstring, "oo-2000-01-01-bar:0:p:STARTED:1234:121") - c, _ = NewCatShardInfo("foo-2000-01-01-bar 0 p STARTED 1234 121 127.0.0.1") - s = c.String() - So(s, ShouldContainSubstring, "oo-2000-01-01-bar:0:p:STARTED:1234:121:127.0.0.1") - }) - -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterhealth.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterhealth.go deleted file mode 100644 index ee06210ba..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterhealth.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package elastigo - -import ( - "encoding/json" - "fmt" - "strings" -) - -// The cluster health API allows to get a very simple status on the health of the cluster. -// see http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cluster-health.html -// TODO: implement wait_for_status, timeout, wait_for_relocating_shards, wait_for_nodes -// TODO: implement level (Can be one of cluster, indices or shards. Controls the details level of the health -// information returned. Defaults to cluster.) -func (c *Conn) Health(indices ...string) (ClusterHealthResponse, error) { - var url string - var retval ClusterHealthResponse - if len(indices) > 0 { - url = fmt.Sprintf("/_cluster/health/%s", strings.Join(indices, ",")) - } else { - url = "/_cluster/health" - } - body, err := c.DoCommand("GET", url, nil, nil) - if err != nil { - return retval, err - } - if err == nil { - // marshall into json - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - return retval, err -} - -func (c *Conn) WaitForStatus(status string, timeout int, indices ...string) (ClusterHealthResponse, error) { - var url string - var retval ClusterHealthResponse - if len(indices) > 0 { - url = fmt.Sprintf("/_cluster/health/%s", strings.Join(indices, ",")) - } else { - url = "/_cluster/health" - } - - body, err := c.DoCommand("GET", url, map[string]interface{}{ - "wait_for_status": status, - "timout": timeout, - }, nil) - - if err != nil { - return retval, err - } - - if err == nil { - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - return retval, err -} - -type ClusterStateFilter struct { - FilterNodes bool - FilterRoutingTable bool - FilterMetadata bool - FilterBlocks bool - FilterIndices []string -} - -func (f ClusterStateFilter) Parameterize() []string { - var parts []string - - if f.FilterNodes { - parts = append(parts, "filter_nodes=true") - } - - if f.FilterRoutingTable { - parts = append(parts, "filter_routing_table=true") - } - - if f.FilterMetadata { - parts = append(parts, "filter_metadata=true") - } - - if f.FilterBlocks { - parts = append(parts, "filter_blocks=true") - } - - if f.FilterIndices != nil && len(f.FilterIndices) > 0 { - parts = append(parts, strings.Join([]string{"filter_indices=", strings.Join(f.FilterIndices, ",")}, "")) - } - - return parts -} - -func (c *Conn) ClusterState(filter ClusterStateFilter) (ClusterStateResponse, error) { - var parameters []string - var url string - var retval ClusterStateResponse - - parameters = filter.Parameterize() - - url = fmt.Sprintf("/_cluster/state?%s", strings.Join(parameters, "&")) - - body, err := c.DoCommand("GET", url, nil, nil) - if err != nil { - return retval, err - } - if err == nil { - // marshall into json - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - return retval, err -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterhealthresponses.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterhealthresponses.go deleted file mode 100644 index 80a53f5e0..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterhealthresponses.go +++ /dev/null @@ -1,45 +0,0 @@ -package elastigo - -type ClusterHealthResponse struct { - ClusterName string `json:"cluster_name"` - Status string `json:"status"` - TimedOut bool `json:"timed_out"` - NumberOfNodes int `json:"number_of_nodes"` - NumberOfDataNodes int `json:"number_of_data_nodes"` - ActivePrimaryShards int `json:"active_primary_shards"` - ActiveShards int `json:"active_shards"` - RelocatingShards int `json:"relocating_shards"` - InitializingShards int `json:"initializing_shards"` - UnassignedShards int `json:"unassigned_shards"` -} - -type ClusterStateResponse struct { - ClusterName string `json:"cluster_name"` - MasterNode string `json:"master_node"` - Nodes map[string]ClusterStateNodeResponse `json:"nodes"` - Metadata ClusterStateMetadataResponse `json:"metadata"` - // TODO: Routing Table - // TODO: Routing Nodes - // TODO: Allocations - -} - -type ClusterStateNodeResponse struct { - Name string `json:"name"` - TransportAddress string `json:"transport_address"` - // TODO: Attributes -} - -type ClusterStateMetadataResponse struct { - // TODO: templates - Indices map[string]ClusterStateIndiceResponse `json:"indices"` -} - -type ClusterStateIndiceResponse struct { - State string `json:"state"` -} - -type ClusterStateRoutingTableResponse struct { - // TODO: unassigned - // -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodeshotthreads.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodeshotthreads.go deleted file mode 100644 index e8227651a..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodeshotthreads.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesinfo.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesinfo.go deleted file mode 100644 index 767c6d35e..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesinfo.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package elastigo - -import ( - "encoding/json" - "fmt" - "strings" -) - -// The cluster nodes info API allows to retrieve one or more (or all) of the cluster nodes information. -// information can be one of jvm, process -func (c *Conn) AllNodesInfo() (NodeInfo, error) { - return c.NodesInfo([]string{"_all"}, "_all") -} - -func (c *Conn) NodesInfo(information []string, nodes ...string) (NodeInfo, error) { - var url string - var retval NodeInfo - url = fmt.Sprintf("/_nodes/%s/%s", strings.Join(nodes, ","), strings.Join(information, ",")) - body, err := c.DoCommand("GET", url, nil, nil) - if err != nil { - return retval, err - } - // marshall into json - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - return retval, err -} - -type NodeInfo struct { - ClusterName string `json:"cluster_name"` - Nodes map[string]Node `json:"nodes"` // node name is random string -} - -type Node struct { - Name string `json:"name,omitempty"` - TransportAddress string `json:"transport_address,omitempty"` - Host string `json:"host,omitempty"` - Ip string `json:"ip,omitempty"` - Version string `json:"version,omitempty"` - Build string `json:"build,omitempty"` - Hostname string `json:"hostname,omitempty"` - HttpAddress string `json:"http_address,omitempty"` - Settings *Settings `json:"settings,omitempty"` - OS *OS `json:"os,omitempty"` - Process *Process `json:"process,omitempty"` - JVM *JVM `json:"jvm,omitempty"` - ThreadPool *ThreadPool `json:"thread_pool,omitempty"` - Network *Network `json:"network,omitempty"` - Transport *Transport `json:"transport,omitempty"` - Http *Http `json:"http,omitempty"` - Plugins []*Plugin `json:"plugins,omitempty"` -} - -type Settings struct { - Path *Path `json:"path,omitempty"` - Foreground string `json:"foreground,omitempty"` - Name string `json:"name,omitempty"` -} - -type Path struct { - Logs string `json:"logs,omitempty"` - home string `json:"home,omitempty"` -} - -type Cluster struct { - Name string `json:"name"` -} - -type OS struct { - RefreshInterval int `json:"refresh_interval,omitempty"` - AvailableProcessors int `json:"available_processors,omitempty"` - CPU *CPU `json:"cpu,omitempty"` -} - -type CPU struct { - Vendor string `json:"vendor,omitempty"` - Model string `json:"model,omitempty"` - Mhz int `json:"mhz,omitempty"` - TotalCores int `json:"total_cores,omitempty"` - TotalSockets int `json:"total_sockets,omitempty"` - CoresPerSocket int `json:"cores_per_socket,omitempty"` - CacheSizeInBytes int `json:"cache_size_in_bytes,omitempty"` -} - -type MEM struct { - TotalInBytes int `json:"total_in_bytes,omitempty"` -} - -type SWAP struct { - TotalInBytes int `json:"total_in_bytes,omitempty"` -} - -type Process struct { - RefreshInterval int `json:"refresh_interval,omitempty"` - Id int `json:"id,omitempty"` - MaxFileDescriptors int `json:"max_file_descriptors,omitempty"` - Mlockall bool `json:"mlockall,omitempty"` -} - -type JVM struct { - Pid int `json:"pid,omitempty"` - Version string `json:"version,omitempty"` - VMName string `json:"vm_name,omitempty"` - VMVersion string `json:"vm_version,omitempty"` - VMVendor string `json:"vm_vendor,omitempty"` - StartTime int `json:"start_time,omitempty"` - Mem *JvmMem `json:"mem,omitempty"` - GcCollectors []string `json:"gc_collectors,omitempty"` - MemoryPools []string `json:"memory_pools,omitempty"` -} - -type JvmMem struct { - HeapInitInBytes int `json:"heap_init_in_bytes,omitempty"` - HeapMaxInBytes int `json:"heap_max_in_bytes,omitempty"` - NonHeapInitInBytes int `json:"non_heap_init_in_bytes,omitempty"` - NonHeapMaxInBytes int `json:"non_heap_max_in_bytes,omitempty"` - DirectMaxInBytes int `json:"direct_max_in_bytes,omitempty"` -} - -type ThreadPool struct { - Generic *ThreadPoolConfig `json:"generic,omitempty"` - Index *ThreadPoolConfig `json:"index,omitempty"` - Get *ThreadPoolConfig `json:"get,omitempty"` - Snapshot *ThreadPoolConfig `json:"snapshot,omitempty"` - Merge *ThreadPoolConfig `json:"merge,omitempty"` - Suggest *ThreadPoolConfig `json:"suggest,omitempty"` - Bulk *ThreadPoolConfig `json:"bulk,omitempty"` - Optimize *ThreadPoolConfig `json:"optimize,omitempty"` - Warmer *ThreadPoolConfig `json:"warmer,omitempty"` - Flush *ThreadPoolConfig `json:"flush,omitempty"` - Search *ThreadPoolConfig `json:"search,omitempty"` - Percolate *ThreadPoolConfig `json:"percolate,omitempty"` - Management *ThreadPoolConfig `json:"management,omitempty"` - Refresh *ThreadPoolConfig `json:"refresh,omitempty"` -} - -type ThreadPoolConfig struct { - Type string `json:"type,omitempty"` - Min int `json:"min,omitempty"` - Max int `json:"max,omitempty"` - QueueSize interface{} `json:"queue_size,omitempty"` // Either string or -1 - KeepAlive string `json:"keep_alive,omitempty"` -} - -type Network struct { - RefreshInterval int `json:"refresh_interval,omitempty"` - PrimaryInterface *Interface `json:"primary_interface,omitempty"` -} - -type Interface struct { - Address string `json:"address,omitempty"` - Name string `json:"name,omitempty"` - MacAddress string `json:"mac_address,omitempty"` -} - -type Transport struct { - BoundAddress string `json:"bound_address,omitempty"` - PublishAddress string `json:"publish_address,omitempty"` -} - -type Http struct { - BoundAddress string `json:"bound_address,omitempty"` - PublishAddress string `json:"publish_address,omitempty"` -} - -type Plugin struct { - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - Site bool `json:"site,omitempty"` - Jvm bool `json:"jvm,omitempty"` - Url string `json:"url,omitempty"` -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesinfo_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesinfo_test.go deleted file mode 100644 index 5ee6b5d7b..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesinfo_test.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "fmt" - "github.com/bmizerany/assert" - "testing" -) - -func TestGetAll(t *testing.T) { - InitTests(true) - c := NewTestConn() - nodesInfo, err := c.AllNodesInfo() - assert.T(t, err == nil, fmt.Sprintf("should not have gotten error, received: %v", err)) - assert.T(t, nodesInfo.ClusterName != "", fmt.Sprintf("clustername should have been not empty. received: %q", nodesInfo.ClusterName)) - for _, node := range nodesInfo.Nodes { - assert.T(t, node.Settings != nil, fmt.Sprintf("Settings should not have been null")) - assert.T(t, node.OS != nil, fmt.Sprintf("OS should not have been null")) - assert.T(t, node.Process != nil, fmt.Sprintf("Process should not have been null")) - assert.T(t, node.JVM != nil, fmt.Sprintf("JVM should not have been null")) - assert.T(t, node.ThreadPool != nil, fmt.Sprintf("ThreadPool should not have been null")) - assert.T(t, node.Network != nil, fmt.Sprintf("Network should not have been null")) - assert.T(t, node.Transport != nil, fmt.Sprintf("Transport should not have been null")) - assert.T(t, node.Http != nil, fmt.Sprintf("Http should not have been null")) - assert.T(t, node.Plugins != nil, fmt.Sprintf("Plugins should not have been null")) - } -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesshutdown.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesshutdown.go deleted file mode 100644 index dacb47e1c..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesshutdown.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "fmt" - "net/url" - "strconv" - "strings" -) - -// NodesShutdown allows the caller to shutdown between one and all nodes in the cluster -// delay is a integer representing number of seconds -// passing "" or "_all" for the nodes parameter will shut down all nodes -// see http://www.elasticsearch.org/guide/reference/api/admin-cluster-nodes-shutdown/ -func (c *Conn) NodesShutdown(delay int, nodes ...string) error { - shutdownUrl := fmt.Sprintf("/_cluster/nodes/%s/_shutdown", strings.Join(nodes, ",")) - if delay > 0 { - var values url.Values = url.Values{} - values.Add("delay", strconv.Itoa(delay)) - shutdownUrl += "?" + values.Encode() - } - _, err := c.DoCommand("POST", shutdownUrl, nil, nil) - if err != nil { - return err - } - return nil -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesstats.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesstats.go deleted file mode 100644 index f4bf12bb0..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusternodesstats.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2015 Niels Freier -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" -) - -func (c *Conn) NodesStats() (NodeStatsResponse, error) { - var retval NodeStatsResponse - - body, err := c.DoCommand("GET", "/_nodes/stats", nil, nil) - if err != nil { - return retval, err - } - // marshall into json - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - return retval, err -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterreroute.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterreroute.go deleted file mode 100644 index 00c6aa942..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterreroute.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "errors" - "fmt" -) - -// The cluster health API allows to get a very simple status on the health of the cluster. -// see http://www.elasticsearch.org/guide/reference/api/admin-cluster-health.html -// information returned. Defaults to cluster.) -func (c *Conn) Reroute(dryRun bool, commands Commands) (ClusterHealthResponse, error) { - var url string - var retval ClusterHealthResponse - - if len(commands.Commands) > 0 { - url = fmt.Sprintf("/_cluster/reroute%s&%s", dryRunOption(dryRun)) - } else { - return retval, errors.New("Must pass at least one command") - } - m := map[string]interface{}{"commands": commands.Commands} - body, err := c.DoCommand("POST", url, m, nil) - if err != nil { - return retval, err - } - if err == nil { - // marshall into json - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - return retval, err -} - -func dryRunOption(isDryRun bool) string { - if isDryRun { - return "dry_run" - } - return "" -} - -// supported commands are -// move (index, shard, from_node, to_node) -// cancel (index, shard, node, allow_primary) -// allocate (index, shard, node, allow_primary) - -type Commands struct { - Commands []interface{} `json:"commands"` -} - -type MoveCommand struct { - Index string `json:"index"` - Shard string `json:"shard"` - FromNode string `json:"from_node"` - ToNode string `json:"to_node"` -} - -type CancelCommand struct { - Index string `json:"index"` - Shard string `json:"shard"` - Node string `json:"node"` - AllowPrimary bool `json:"allow_primary,omitempty"` -} -type AllocateCommand struct { - Index string `json:"index"` - Shard string `json:"shard"` - Node string `json:"node"` - AllowPrimary bool `json:"allow_primary,omitempty"` -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterstate.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterstate.go deleted file mode 100644 index d0e2a76eb..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterstate.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" -) - -// State gets the comprehensive state information for the whole cluster -// see http://www.elasticsearch.org/guide/reference/api/admin-cluster-state/ -func (c *Conn) UpdateSetting(args map[string]interface{}, filter_indices ...string) (ClusterStateResponse, error) { - var url string - var retval ClusterStateResponse - - url = "/_cluster/state" - - body, err := c.DoCommand("GET", url, args, nil) - if err != nil { - return retval, err - } - if err == nil { - // marshall into json - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - return retval, err -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterstatresponses.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterstatresponses.go deleted file mode 100644 index e9a296686..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterstatresponses.go +++ /dev/null @@ -1,299 +0,0 @@ -package elastigo - -type NodeStatsResponse struct { - ClusterName string `json:"cluster_name"` - Nodes map[string]NodeStatsNodeResponse -} - -type NodeStatsNodeResponse struct { - Name string `json:"name"` - Timestamp int64 `json:"timestamp"` - TransportAddress string `json:"transport_address"` - Hostname string `json:"hostname"` - Host string `json:"host"` - IP []string `json:"ip"` - Attributes NodeStatsNodeAttributes `json:"attributes"` - Indices NodeStatsIndicesResponse `json:"indices"` - OS NodeStatsOSResponse `json:"os"` - Process NodeStatsProcessResponse `json:"process"` - JVM NodeStatsJVMResponse `json:"jvm"` - Network NodeStatsNetworkResponse `json:"network"` - FS NodeStatsFSResponse `json:"fs"` - ThreadPool map[string]NodeStatsThreadPoolPoolResponse `json:"thread_pool"` - Transport NodeStatsTransportResponse `json:"transport"` - FieldDataBreaker NodeStatsFieldDataBreakerResponse `json:"fielddata_breaker"` -} - -type NodeStatsNodeAttributes struct { - Data string `json:"data"` - Client string `json:"client"` -} -type NodeStatsNetworkResponse struct { - TCP NodeStatsTCPResponse `json:"tcp"` -} - -type NodeStatsFieldDataBreakerResponse struct { - MaximumSizeInBytes int64 `json:"maximum_size_in_bytes"` - MaximumSize string `json:"maximum_size"` - EstimatedSizeInBytes int64 `json:"estimated_size_in_bytes"` - EstimatedSize string `json:"estimated_size"` - Overhead float64 `json:"overhead"` - Tripped int64 `json:"tripped"` -} -type NodeStatsTransportResponse struct { - ServerOpen int64 `json:"server_open"` - RxCount int64 `json:"rx_count"` - RxSize int64 `json:"rx_size_in_bytes"` - TxCount int64 `json:"tx_count"` - TxSize int64 `json:"tx_size_in_bytes"` -} - -type NodeStatsThreadPoolPoolResponse struct { - Threads int64 `json:"threads"` - Queue int64 `json:"queue"` - Active int64 `json:"active"` - Rejected int64 `json:"rejected"` - Largest int64 `json:"largest"` - Completed int64 `json:"completed"` -} - -type NodeStatsTCPResponse struct { - ActiveOpens int64 `json:"active_opens"` - PassiveOpens int64 `json:"passive_opens"` - CurrEstab int64 `json:"curr_estab"` - InSegs int64 `json:"in_segs"` - OutSegs int64 `json:"out_segs"` - RetransSegs int64 `json:"retrans_segs"` - EstabResets int64 `json:"estab_resets"` - AttemptFails int64 `json:"attempt_fails"` - InErrs int64 `json:"in_errs"` - OutRsts int64 `json:"out_rsts"` -} - -type NodeStatsIndicesResponse struct { - Docs NodeStatsIndicesDocsResponse `json:"docs"` - Store NodeStatsIndicesStoreResponse `json:"store"` - Indexing NodeStatsIndicesIndexingResponse `json:"indexing"` - Get NodeStatsIndicesGetResponse `json:"get"` - Search NodeStatsIndicesSearchResponse `json:"search"` - Merges NodeStatsIndicesMergesResponse `json:"merges"` - Refresh NodeStatsIndicesRefreshResponse `json:"refresh"` - Flush NodeStatsIndicesFlushResponse `json:"flush"` - Warmer NodeStatsIndicesWarmerResponse `json:"warmer"` - FilterCache NodeStatsIndicesFilterCacheResponse `json:"filter_cache"` - IdCache NodeStatsIndicesIdCacheResponse `json:"id_cache"` - FieldData NodeStatsIndicesFieldDataResponse `json:"fielddata"` - Percolate NodeStatsIndicesPercolateResponse `json:"percolate"` - Completion NodeStatsIndicesCompletionResponse `json:"completion"` - Segments NodeStatsIndicesSegmentsResponse `json:"segments"` - Translog NodeStatsIndicesTranslogResponse `json:"translog"` - Suggest NodeStatsIndicesSuggestResponse `json:"suggest"` -} - -type NodeStatsIndicesDocsResponse struct { - Count int64 `json:"count"` - Deleted int64 `json:"deleted"` -} - -type NodeStatsIndicesStoreResponse struct { - Size int64 `json:"size_in_bytes"` - ThrottleTime int64 `json:"throttle_time_in_millis"` -} - -type NodeStatsIndicesIndexingResponse struct { - IndexTotal int64 `json:"index_total"` - IndexTime int64 `json:"index_time_in_millis"` - IndexCurrent int64 `json:"index_current"` - DeleteTotal int64 `json:"delete_total"` - DeleteTime int64 `json:"delete_time_in_millis"` - DeleteCurrent int64 `json:"delete_current"` -} - -type NodeStatsIndicesGetResponse struct { - Total int64 `json:"total"` - Time int64 `json:"time_in_millis"` - ExistsTotal int64 `json:"exists_total"` - ExistsTime int64 `json:"exists_time_in_millis"` - MissingTotal int64 `json:"missing_total"` - MissingTime int64 `json:"missing_time_in_millis"` - Current int64 `json:"current"` -} - -type NodeStatsIndicesSearchResponse struct { - OpenContext int64 `json:"open_contexts"` - QueryTotal int64 `json:"query_total"` - QueryTime int64 `json:"query_time_in_millis"` - QueryCurrent int64 `json:"query_current"` - FetchTotal int64 `json:"fetch_total"` - FetchTime int64 `json:"fetch_time_in_millis"` - FetchCurrent int64 `json:"fetch_current"` -} -type NodeStatsIndicesMergesResponse struct { - Current int64 `json:"current"` - CurrentDocs int64 `json:"current_docs"` - CurrentSizeInBytes int64 `json:"current_size_in_bytes"` - Total int64 `json:"total"` - TotalTimeInMs int64 `json:"total_time_in_millis"` - TotalDocs int64 `json:"total_docs"` - TotalSizeInBytes int64 `json:"total_size_in_bytes"` -} -type NodeStatsIndicesRefreshResponse struct { - Total int64 `json:"total"` - TotalTimeInMs int64 `json:"total_time_in_millis"` -} -type NodeStatsIndicesFlushResponse struct { - Total int64 `json:"total"` - TotalTimeInMs int64 `json:"total_time_in_millis"` -} -type NodeStatsIndicesWarmerResponse struct { - Current int64 `json:"current"` - Total int64 `json:"total"` - TotalTimeInMs int64 `json:"total_time_in_millis"` -} -type NodeStatsIndicesFilterCacheResponse struct { - MemorySizeInBytes int64 `json:"memory_size_in_bytes"` - Evictions int64 `json:"evictions"` -} -type NodeStatsIndicesIdCacheResponse struct { - MemorySizeInBytes int64 `json:"memory_size_in_bytes"` -} -type NodeStatsIndicesFieldDataResponse struct { - MemorySizeInBytes int64 `json:"memory_size_in_bytes"` - Evictions int64 `json:"evictions"` -} -type NodeStatsIndicesPercolateResponse struct { - Total int64 `json:"total"` - TimeInMs int64 `json:"time_in_millis"` - Current int64 `json:"current"` - MemorySizeInBytes int64 `json:"memory_size_in_bytes"` - MemorySize string `json:"memory_size"` - Queries int64 `json:"queries"` -} -type NodeStatsIndicesCompletionResponse struct { - SizeInBytes int64 `json:"size_in_bytes"` -} -type NodeStatsIndicesSegmentsResponse struct { - Count int64 `json:"count"` - MemoryInBytes int64 `json:"memory_in_bytes"` - IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes"` - VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes"` -} -type NodeStatsIndicesTranslogResponse struct { - Operations int64 `json:"operations"` - SizeInBytes int64 `json:"size_in_bytes"` -} -type NodeStatsIndicesSuggestResponse struct { - Total int64 `json:"total"` - TimeInMs int64 `json:"time_in_millis"` - Current int64 `json:"current"` -} -type NodeStatsOSResponse struct { - Timestamp int64 `json:"timestamp"` - Uptime int64 `json:"uptime_in_millis"` - LoadAvg []float64 `json:"load_average"` - CPU NodeStatsOSCPUResponse `json:"cpu"` - Mem NodeStatsOSMemResponse `json:"mem"` - Swap NodeStatsOSSwapResponse `json:"swap"` -} - -type NodeStatsOSMemResponse struct { - Free int64 `json:"free_in_bytes"` - Used int64 `json:"used_in_bytes"` - ActualFree int64 `json:"actual_free_in_bytes"` - ActualUsed int64 `json:"actual_used_in_bytes"` -} - -type NodeStatsOSSwapResponse struct { - Used int64 `json:"used_in_bytes"` - Free int64 `json:"free_in_bytes"` -} - -type NodeStatsOSCPUResponse struct { - Sys int64 `json:"sys"` - User int64 `json:"user"` - Idle int64 `json:"idle"` - Steal int64 `json:"stolen"` -} - -type NodeStatsProcessResponse struct { - Timestamp int64 `json:"timestamp"` - OpenFD int64 `json:"open_file_descriptors"` - CPU NodeStatsProcessCPUResponse `json:"cpu"` - Memory NodeStatsProcessMemResponse `json:"mem"` -} - -type NodeStatsProcessMemResponse struct { - Resident int64 `json:"resident_in_bytes"` - Share int64 `json:"share_in_bytes"` - TotalVirtual int64 `json:"total_virtual_in_bytes"` -} - -type NodeStatsProcessCPUResponse struct { - Percent int64 `json:"percent"` - Sys int64 `json:"sys_in_millis"` - User int64 `json:"user_in_millis"` - Total int64 `json:"total_in_millis"` -} - -type NodeStatsJVMResponse struct { - Timestame int64 `json:"timestamp"` - UptimeInMs int64 `json:"uptime_in_millis"` - Mem NodeStatsJVMMemResponse `json:"mem"` - Threads NodeStatsJVMThreadsResponse `json:"threads"` - GC NodeStatsJVMGCResponse `json:"gc"` - BufferPools map[string]NodeStatsJVMBufferPoolsResponse `json:"buffer_pools"` -} - -type NodeStatsJVMMemResponse struct { - HeapUsedInBytes int64 `json:"heap_used_in_bytes"` - HeapUsedPercent int64 `json:"heap_used_percent"` - HeapCommitedInBytes int64 `json:"heap_commited_in_bytes"` - HeapMaxInBytes int64 `json:"heap_max_in_bytes"` - NonHeapUsedInBytes int64 `json:"non_heap_used_in_bytes"` - NonHeapCommittedInBytes int64 `json:"non_heap_committed_in_bytes"` - Pools map[string]NodeStatsJVMMemPoolsResponse `json:"pools"` -} -type NodeStatsJVMMemPoolsResponse struct { - UsedInBytes int64 `json:"used_in_bytes"` - MaxInBytes int64 `json:"max_in_bytes"` - PeakUsedInBytes int64 `json:"peak_used_in_bytes"` - PeakMaxInBytes int64 `json:"peak_max_in_bytes"` -} -type NodeStatsJVMThreadsResponse struct { - Count int64 `json:"count"` - PeakCount int64 `json:"peak_count"` -} -type NodeStatsJVMGCResponse struct { - Collectors map[string]NodeStatsJVMGCCollectorsAgeResponse `json:"collectors"` -} -type NodeStatsJVMGCCollectorsAgeResponse struct { - Count int64 `json:"collection_count"` - TimeInMs int64 `json:"collection_time_in_millis"` -} -type NodeStatsJVMBufferPoolsResponse struct { - Count int64 `json:"count"` - UsedInBytes int64 `json:"used_in_bytes"` - TotalCapacityInBytes int64 `json:"total_capacity_in_bytes"` -} -type NodeStatsHTTPResponse struct { - CurrentOpen int64 `json:"current_open"` - TotalOpen int64 `json:"total_open"` -} - -type NodeStatsFSResponse struct { - Timestamp int64 `json:"timestamp"` - Data []NodeStatsFSDataResponse `json:"data"` -} - -type NodeStatsFSDataResponse struct { - Path string `json:"path"` - Mount string `json:"mount"` - Device string `json:"dev"` - Total int64 `json:"total_in_bytes"` - Free int64 `json:"free_in_bytes"` - Available int64 `json:"available_in_bytes"` - DiskReads int64 `json:"disk_reads"` - DiskWrites int64 `json:"disk_writes"` - DiskReadSize int64 `json:"disk_read_size_in_bytes"` - DiskWriteSize int64 `json:"disk_write_size_in_bytes"` -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterupdatesettings.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterupdatesettings.go deleted file mode 100644 index f0efa1661..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/clusterupdatesettings.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" -) - -// UpdateSettings allows to update cluster wide specific settings. Defaults to Transient setting -// Settings updated can either be persistent (applied cross restarts) or transient (will not survive a full cluster restart). -// http://www.elasticsearch.org/guide/reference/api/admin-cluster-update-settings.html -func (c *Conn) UpdateSettings(settingType string, key string, value int) (ClusterSettingsResponse, error) { - var retval ClusterSettingsResponse - if settingType != "transient" && settingType != "persistent" { - return retval, fmt.Errorf("settingType must be one of transient or persistent, you passed %s", settingType) - } - var url string = "/_cluster/state" - m := map[string]map[string]int{settingType: map[string]int{key: value}} - body, err := c.DoCommand("PUT", url, nil, m) - if err != nil { - return retval, err - } - if err == nil { - // marshall into json - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - return retval, err -} - -type ClusterSettingsResponse struct { - Transient map[string]int `json:"transient"` - Persistent map[string]int `json:"persistent"` -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/connection.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/connection.go deleted file mode 100644 index 895eb4fcb..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/connection.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "errors" - "fmt" - hostpool "github.com/bitly/go-hostpool" - "net/http" - "net/url" - "runtime" - "strings" - "sync" - "time" -) - -const ( - Version = "0.0.2" - DefaultProtocol = "http" - DefaultDomain = "localhost" - DefaultPort = "9200" - // A decay duration of zero results in the default behaviour - DefaultDecayDuration = 0 -) - -type Conn struct { - // Maintain these for backwards compatibility - Protocol string - Domain string - ClusterDomains []string - Port string - Username string - Password string - Hosts []string - RequestTracer func(method, url, body string) - hp hostpool.HostPool - once sync.Once - - // To compute the weighting scores, we perform a weighted average of recent response times, - // over the course of `DecayDuration`. DecayDuration may be set to 0 to use the default - // value of 5 minutes. The EpsilonValueCalculator uses this to calculate a score - // from the weighted average response time. - DecayDuration time.Duration -} - -func NewConn() *Conn { - return &Conn{ - // Maintain these for backwards compatibility - Protocol: DefaultProtocol, - Domain: DefaultDomain, - ClusterDomains: []string{DefaultDomain}, - Port: DefaultPort, - DecayDuration: time.Duration(DefaultDecayDuration * time.Second), - } -} - -func (c *Conn) SetFromUrl(u string) error { - if u == "" { - return errors.New("Url is empty") - } - - parsedUrl, err := url.Parse(u) - if err != nil { - return err - } - - c.Protocol = parsedUrl.Scheme - host, portNum := splitHostnamePartsFromHost(parsedUrl.Host, c.Port) - c.Port = portNum - c.Domain = host - - if parsedUrl.User != nil { - c.Username = parsedUrl.User.Username() - password, passwordIsSet := parsedUrl.User.Password() - if passwordIsSet { - c.Password = password - } - } - - return nil -} - -func (c *Conn) SetPort(port string) { - c.Port = port -} - -func (c *Conn) SetHosts(newhosts []string) { - - // Store the new host list - c.Hosts = newhosts - - // Reinitialise the host pool Pretty naive as this will nuke the current - // hostpool, and therefore reset any scoring - c.initializeHostPool() -} - -// Set up the host pool to be used -func (c *Conn) initializeHostPool() { - - // If no hosts are set, fallback to defaults - if len(c.Hosts) == 0 { - c.Hosts = append(c.Hosts, fmt.Sprintf("%s:%s", c.Domain, c.Port)) - } - - // Epsilon Greedy is an algorithm that allows HostPool not only to - // track failure state, but also to learn about "better" options in - // terms of speed, and to pick from available hosts based on how well - // they perform. This gives a weighted request rate to better - // performing hosts, while still distributing requests to all hosts - // (proportionate to their performance). The interface is the same as - // the standard HostPool, but be sure to mark the HostResponse - // immediately after executing the request to the host, as that will - // stop the implicitly running request timer. - // - // A good overview of Epsilon Greedy is here http://stevehanov.ca/blog/index.php?id=132 - if c.hp != nil { - c.hp.Close() - } - c.hp = hostpool.NewEpsilonGreedy( - c.Hosts, c.DecayDuration, &hostpool.LinearEpsilonValueCalculator{}) -} - -func (c *Conn) Close() { - c.hp.Close() -} - -func (c *Conn) NewRequest(method, path, query string) (*Request, error) { - // Setup the hostpool on our first run - c.once.Do(c.initializeHostPool) - - // Get a host from the host pool - hr := c.hp.Get() - - // Get the final host and port - host, portNum := splitHostnamePartsFromHost(hr.Host(), c.Port) - - // Build request - var uri string - // If query parameters are provided, the add them to the URL, - // otherwise, leave them out - if len(query) > 0 { - uri = fmt.Sprintf("%s://%s:%s%s?%s", c.Protocol, host, portNum, path, query) - } else { - uri = fmt.Sprintf("%s://%s:%s%s", c.Protocol, host, portNum, path) - } - req, err := http.NewRequest(method, uri, nil) - if err != nil { - return nil, err - } - req.Header.Add("Accept", "application/json") - req.Header.Add("User-Agent", "elasticSearch/"+Version+" ("+runtime.GOOS+"-"+runtime.GOARCH+")") - - if c.Username != "" || c.Password != "" { - req.SetBasicAuth(c.Username, c.Password) - } - - newRequest := &Request{ - Request: req, - hostResponse: hr, - } - return newRequest, nil -} - -// Split apart the hostname on colon -// Return the host and a default port if there is no separator -func splitHostnamePartsFromHost(fullHost string, defaultPortNum string) (string, string) { - - h := strings.Split(fullHost, ":") - - if len(h) == 2 { - return h[0], h[1] - } - - return h[0], defaultPortNum -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/connection_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/connection_test.go deleted file mode 100644 index 5719b017a..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/connection_test.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "fmt" - "testing" - - "github.com/bmizerany/assert" -) - -func TestSetFromUrl(t *testing.T) { - c := NewConn() - - err := c.SetFromUrl("http://localhost") - exp := "localhost" - assert.T(t, c.Domain == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, c.Domain)) - - c = NewConn() - - err = c.SetFromUrl("http://localhost:9200") - exp = "9200" - assert.T(t, c.Port == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, c.Port)) - - c = NewConn() - - err = c.SetFromUrl("http://localhost:9200") - exp = "localhost" - assert.T(t, c.Domain == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, c.Domain)) - - c = NewConn() - - err = c.SetFromUrl("http://someuser@localhost:9200") - exp = "someuser" - assert.T(t, c.Username == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, c.Username)) - - c = NewConn() - - err = c.SetFromUrl("http://someuser:password@localhost:9200") - exp = "password" - assert.T(t, c.Password == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, c.Password)) - - c = NewConn() - - err = c.SetFromUrl("http://someuser:password@localhost:9200") - exp = "someuser" - assert.T(t, c.Username == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, c.Username)) - - c = NewConn() - - err = c.SetFromUrl("") - exp = "Url is empty" - assert.T(t, err != nil && err.Error() == exp, fmt.Sprintf("Expected %s, got: %s", exp, err.Error())) -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corebulk.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corebulk.go deleted file mode 100644 index 35a194fbb..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corebulk.go +++ /dev/null @@ -1,414 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" - "sync" - "sync/atomic" - "time" -) - -const ( - // Max buffer size in bytes before flushing to elasticsearch - BulkMaxBuffer = 16384 - // Max number of Docs to hold in buffer before forcing flush - BulkMaxDocs = 100 - // Max delay before forcing a flush to Elasticearch - BulkDelaySeconds = 5 - // maximum wait shutdown seconds - MAX_SHUTDOWN_SECS = 5 -) - -type ErrorBuffer struct { - Err error - Buf *bytes.Buffer -} - -// A bulk indexer creates goroutines, and channels for connecting and sending data -// to elasticsearch in bulk, using buffers. -type BulkIndexer struct { - conn *Conn - - // We are creating a variable defining the func responsible for sending - // to allow a mock sendor for test purposes - Sender func(*bytes.Buffer) error - - // The refresh parameter can be set to true in order to refresh the - // relevant primary and replica shards immediately after the bulk - // operation has occurred - Refresh bool - - // If we encounter an error in sending, we are going to retry for this long - // before returning an error - // if 0 it will not retry - RetryForSeconds int - - // channel for getting errors - ErrorChannel chan *ErrorBuffer - - // channel for sending to background indexer - bulkChannel chan []byte - - // numErrors is a running total of errors seen - numErrors uint64 - - // shutdown channel - shutdownChan chan chan struct{} - // channel to shutdown timer - timerDoneChan chan struct{} - - // Channel to send a complete byte.Buffer to the http sendor - sendBuf chan *bytes.Buffer - // byte buffer for docs that have been converted to bytes, but not yet sent - buf *bytes.Buffer - // Buffer for Max number of time before forcing flush - BufferDelayMax time.Duration - // Max buffer size in bytes before flushing to elasticsearch - BulkMaxBuffer int // 1048576 - // Max number of Docs to hold in buffer before forcing flush - BulkMaxDocs int // 100 - - // Number of documents we have send through so far on this session - docCt int - // Max number of http conns in flight at one time - maxConns int - // If we are indexing enough docs per bufferdelaymax, we won't need to do time - // based eviction, else we do. - needsTimeBasedFlush bool - // Lock for document writes/operations - mu sync.Mutex - // Wait Group for the http sends - sendWg *sync.WaitGroup -} - -func (b *BulkIndexer) NumErrors() uint64 { - return atomic.LoadUint64(&b.numErrors) -} - -func (c *Conn) NewBulkIndexer(maxConns int) *BulkIndexer { - b := BulkIndexer{conn: c, sendBuf: make(chan *bytes.Buffer, maxConns)} - b.needsTimeBasedFlush = true - b.buf = new(bytes.Buffer) - b.maxConns = maxConns - b.BulkMaxBuffer = BulkMaxBuffer - b.BulkMaxDocs = BulkMaxDocs - b.BufferDelayMax = time.Duration(BulkDelaySeconds) * time.Second - b.bulkChannel = make(chan []byte, 100) - b.sendWg = new(sync.WaitGroup) - b.timerDoneChan = make(chan struct{}) - return &b -} - -// A bulk indexer with more control over error handling -// @maxConns is the max number of in flight http requests -// @retrySeconds is # of seconds to wait before retrying falied requests -// -// done := make(chan bool) -// BulkIndexerGlobalRun(100, done) -func (c *Conn) NewBulkIndexerErrors(maxConns, retrySeconds int) *BulkIndexer { - b := c.NewBulkIndexer(maxConns) - b.RetryForSeconds = retrySeconds - b.ErrorChannel = make(chan *ErrorBuffer, 20) - return b -} - -// Starts this bulk Indexer running, this Run opens a go routine so is -// Non blocking -func (b *BulkIndexer) Start() { - b.shutdownChan = make(chan chan struct{}) - - go func() { - // XXX(j): Refactor this stuff to use an interface. - if b.Sender == nil { - b.Sender = b.Send - } - // Backwards compatibility - b.startHttpSender() - b.startDocChannel() - b.startTimer() - ch := <-b.shutdownChan - time.Sleep(2 * time.Millisecond) - b.Flush() - b.shutdown() - ch <- struct{}{} - }() -} - -// Stop stops the bulk indexer, blocking the caller until it is complete. -func (b *BulkIndexer) Stop() { - ch := make(chan struct{}) - b.shutdownChan <- ch - select { - case <-ch: - // done - case <-time.After(time.Second * time.Duration(MAX_SHUTDOWN_SECS)): - // timeout! - } -} - -func (b *BulkIndexer) PendingDocuments() int { - return b.docCt -} - -// Flush all current documents to ElasticSearch -func (b *BulkIndexer) Flush() { - b.mu.Lock() - if b.docCt > 0 { - b.send(b.buf) - } - b.mu.Unlock() -} - -func (b *BulkIndexer) startHttpSender() { - - // this sends http requests to elasticsearch it uses maxConns to open up that - // many goroutines, each of which will synchronously call ElasticSearch - // in theory, the whole set will cause a backup all the way to IndexBulk if - // we have consumed all maxConns - for i := 0; i < b.maxConns; i++ { - b.sendWg.Add(1) - go func() { - for buf := range b.sendBuf { - // Copy for the potential re-send. - bufCopy := bytes.NewBuffer(buf.Bytes()) - err := b.Sender(buf) - - // Perhaps a b.FailureStrategy(err) ?? with different types of strategies - // 1. Retry, then panic - // 2. Retry then return error and let runner decide - // 3. Retry, then log to disk? retry later? - if err != nil { - buf = bytes.NewBuffer(bufCopy.Bytes()) - if b.RetryForSeconds > 0 { - time.Sleep(time.Second * time.Duration(b.RetryForSeconds)) - err = b.Sender(bufCopy) - if err == nil { - // Successfully re-sent with no error - continue - } - } - if b.ErrorChannel != nil { - b.ErrorChannel <- &ErrorBuffer{err, buf} - } - } - } - b.sendWg.Done() - }() - } -} - -// start a timer for checking back and forcing flush ever BulkDelaySeconds seconds -// even if we haven't hit max messages/size -func (b *BulkIndexer) startTimer() { - ticker := time.NewTicker(b.BufferDelayMax) - go func() { - for { - select { - case <-ticker.C: - b.mu.Lock() - // don't send unless last sendor was the time, - // otherwise an indication of other thresholds being hit - // where time isn't needed - if b.buf.Len() > 0 && b.needsTimeBasedFlush { - b.needsTimeBasedFlush = true - b.send(b.buf) - } else if b.buf.Len() > 0 { - b.needsTimeBasedFlush = true - } - b.mu.Unlock() - case <-b.timerDoneChan: - // shutdown this go routine - ticker.Stop() - return - } - - } - }() -} - -func (b *BulkIndexer) startDocChannel() { - // This goroutine accepts incoming byte arrays from the IndexBulk function and - // writes to buffer - go func() { - for docBytes := range b.bulkChannel { - b.mu.Lock() - b.docCt += 1 - b.buf.Write(docBytes) - if b.buf.Len() >= b.BulkMaxBuffer || b.docCt >= b.BulkMaxDocs { - b.needsTimeBasedFlush = false - //log.Printf("Send due to size: docs=%d bufsize=%d", b.docCt, b.buf.Len()) - b.send(b.buf) - } - b.mu.Unlock() - } - }() -} - -func (b *BulkIndexer) send(buf *bytes.Buffer) { - //b2 := *b.buf - b.sendBuf <- buf - b.buf = new(bytes.Buffer) - // b.buf.Reset() - b.docCt = 0 -} - -func (b *BulkIndexer) shutdown() { - // This must be called after Flush() - close(b.timerDoneChan) - close(b.sendBuf) - close(b.bulkChannel) - b.sendWg.Wait() -} - -// The index bulk API adds or updates a typed JSON document to a specific index, making it searchable. -// it operates by buffering requests, and ocassionally flushing to elasticsearch -// http://www.elasticsearch.org/guide/reference/api/bulk.html -func (b *BulkIndexer) Index(index string, _type string, id, parent, ttl string, date *time.Time, data interface{}) error { - //{ "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } } - by, err := WriteBulkBytes("index", index, _type, id, parent, ttl, date, data) - if err != nil { - return err - } - b.bulkChannel <- by - return nil -} - -func (b *BulkIndexer) Update(index string, _type string, id, parent, ttl string, date *time.Time, data interface{}) error { - //{ "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } } - by, err := WriteBulkBytes("update", index, _type, id, parent, ttl, date, data) - if err != nil { - return err - } - b.bulkChannel <- by - return nil -} - -func (b *BulkIndexer) Delete(index, _type, id string) { - queryLine := fmt.Sprintf("{\"delete\":{\"_index\":%q,\"_type\":%q,\"_id\":%q}}\n", index, _type, id) - b.bulkChannel <- []byte(queryLine) - return -} - -func (b *BulkIndexer) UpdateWithWithScript(index string, _type string, id, parent, ttl string, date *time.Time, script string) error { - - var data map[string]interface{} = make(map[string]interface{}) - data["script"] = script - return b.Update(index, _type, id, parent, ttl, date, data) -} - -func (b *BulkIndexer) UpdateWithPartialDoc(index string, _type string, id, parent, ttl string, date *time.Time, partialDoc interface{}, upsert bool) error { - - var data map[string]interface{} = make(map[string]interface{}) - - data["doc"] = partialDoc - if upsert { - data["doc_as_upsert"] = true - } - return b.Update(index, _type, id, parent, ttl, date, data) -} - -// This does the actual send of a buffer, which has already been formatted -// into bytes of ES formatted bulk data -func (b *BulkIndexer) Send(buf *bytes.Buffer) error { - type responseStruct struct { - Took int64 `json:"took"` - Errors bool `json:"errors"` - Items []map[string]interface{} `json:"items"` - } - - response := responseStruct{} - - body, err := b.conn.DoCommand("POST", fmt.Sprintf("/_bulk?refresh=%t", b.Refresh), nil, buf) - - if err != nil { - atomic.AddUint64(&b.numErrors, 1) - return err - } - // check for response errors, bulk insert will give 200 OK but then include errors in response - jsonErr := json.Unmarshal(body, &response) - if jsonErr == nil { - if response.Errors { - atomic.AddUint64(&b.numErrors, uint64(len(response.Items))) - return fmt.Errorf("Bulk Insertion Error. Failed item count [%d]", len(response.Items)) - } - } - return nil -} - -// Given a set of arguments for index, type, id, data create a set of bytes that is formatted for bulkd index -// http://www.elasticsearch.org/guide/reference/api/bulk.html -func WriteBulkBytes(op string, index string, _type string, id, parent, ttl string, date *time.Time, data interface{}) ([]byte, error) { - // only index and update are currently supported - if op != "index" && op != "update" { - return nil, errors.New(fmt.Sprintf("Operation '%s' is not yet supported", op)) - } - - // First line - buf := bytes.Buffer{} - buf.WriteString(fmt.Sprintf(`{"%s":{"_index":"`, op)) - buf.WriteString(index) - buf.WriteString(`","_type":"`) - buf.WriteString(_type) - buf.WriteString(`"`) - if len(id) > 0 { - buf.WriteString(`,"_id":"`) - buf.WriteString(id) - buf.WriteString(`"`) - } - - if len(parent) > 0 { - buf.WriteString(`,"_parent":"`) - buf.WriteString(parent) - buf.WriteString(`"`) - } - - if op == "update" { - buf.WriteString(`,"_retry_on_conflict":3`) - } - - if len(ttl) > 0 { - buf.WriteString(`,"ttl":"`) - buf.WriteString(ttl) - buf.WriteString(`"`) - } - if date != nil { - buf.WriteString(`,"_timestamp":"`) - buf.WriteString(strconv.FormatInt(date.UnixNano()/1e6, 10)) - buf.WriteString(`"`) - } - - buf.WriteString(`}}`) - buf.WriteRune('\n') - //buf.WriteByte('\n') - switch v := data.(type) { - case *bytes.Buffer: - io.Copy(&buf, v) - case []byte: - buf.Write(v) - case string: - buf.WriteString(v) - default: - body, jsonErr := json.Marshal(data) - if jsonErr != nil { - return nil, jsonErr - } - buf.Write(body) - } - buf.WriteRune('\n') - return buf.Bytes(), nil -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corebulk_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corebulk_test.go deleted file mode 100644 index e352ae338..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corebulk_test.go +++ /dev/null @@ -1,399 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "bytes" - "crypto/rand" - "encoding/json" - "flag" - "fmt" - "log" - "net/url" - "strconv" - "sync" - "testing" - "time" - - "github.com/araddon/gou" - "github.com/bmizerany/assert" -) - -// go test -bench=".*" -// go test -bench="Bulk" - -type sharedBuffer struct { - mu sync.Mutex - Buffer []*bytes.Buffer -} - -func NewSharedBuffer() *sharedBuffer { - return &sharedBuffer{ - Buffer: make([]*bytes.Buffer, 0), - } -} - -func (b *sharedBuffer) Append(buf *bytes.Buffer) { - b.mu.Lock() - defer b.mu.Unlock() - b.Buffer = append(b.Buffer, buf) -} - -func (b *sharedBuffer) Length() int { - b.mu.Lock() - defer b.mu.Unlock() - return len(b.Buffer) -} - -func init() { - flag.Parse() - if testing.Verbose() { - gou.SetupLogging("debug") - } -} - -// take two ints, compare, need to be within 5% -func closeInt(a, b int) bool { - c := float64(a) / float64(b) - if c >= .95 && c <= 1.05 { - return true - } - return false -} - -func TestBulkIndexerBasic(t *testing.T) { - testIndex := "users" - var ( - buffers = NewSharedBuffer() - totalBytesSent int - messageSets int - ) - - InitTests(true) - c := NewTestConn() - - c.DeleteIndex(testIndex) - - indexer := c.NewBulkIndexer(3) - indexer.Sender = func(buf *bytes.Buffer) error { - messageSets += 1 - totalBytesSent += buf.Len() - buffers.Append(buf) - //log.Printf("buffer:%s", string(buf.Bytes())) - return indexer.Send(buf) - } - indexer.Start() - - date := time.Unix(1257894000, 0) - data := map[string]interface{}{ - "name": "smurfs", - "age": 22, - "date": "yesterday", - } - - err := indexer.Index(testIndex, "user", "1", "", "", &date, data) - waitFor(func() bool { - return buffers.Length() > 0 - }, 5) - - // part of request is url, so lets factor that in - //totalBytesSent = totalBytesSent - len(*eshost) - assert.T(t, buffers.Length() == 1, fmt.Sprintf("Should have sent one operation but was %d", buffers.Length())) - assert.T(t, indexer.NumErrors() == 0 && err == nil, fmt.Sprintf("Should not have any errors. NumErrors: %v, err: %v", indexer.NumErrors(), err)) - expectedBytes := 129 - assert.T(t, totalBytesSent == expectedBytes, fmt.Sprintf("Should have sent %v bytes but was %v", expectedBytes, totalBytesSent)) - - err = indexer.Index(testIndex, "user", "2", "", "", nil, data) - waitFor(func() bool { - return buffers.Length() > 1 - }, 5) - - // this will test to ensure that Flush actually catches a doc - indexer.Flush() - totalBytesSent = totalBytesSent - len(*eshost) - assert.T(t, err == nil, fmt.Sprintf("Should have nil error =%v", err)) - assert.T(t, buffers.Length() == 2, fmt.Sprintf("Should have another buffer ct=%d", buffers.Length())) - - assert.T(t, indexer.NumErrors() == 0, fmt.Sprintf("Should not have any errors %d", indexer.NumErrors())) - expectedBytes = 220 - assert.T(t, closeInt(totalBytesSent, expectedBytes), fmt.Sprintf("Should have sent %v bytes but was %v", expectedBytes, totalBytesSent)) - - indexer.Stop() -} - -func TestRefreshParam(t *testing.T) { - requrlChan := make(chan *url.URL, 1) - InitTests(true) - c := NewTestConn() - c.RequestTracer = func(method, urlStr, body string) { - requrl, _ := url.Parse(urlStr) - requrlChan <- requrl - } - date := time.Unix(1257894000, 0) - data := map[string]interface{}{"name": "smurfs", "age": 22, "date": date} - - // Now tests small batches - indexer := c.NewBulkIndexer(1) - indexer.Refresh = true - - indexer.Start() - <-time.After(time.Millisecond * 20) - - indexer.Index("users", "user", "2", "", "", &date, data) - - <-time.After(time.Millisecond * 200) - // indexer.Flush() - indexer.Stop() - requrl := <-requrlChan - assert.T(t, requrl.Query().Get("refresh") == "true", "Should have set refresh query param to true") -} - -func TestWithoutRefreshParam(t *testing.T) { - requrlChan := make(chan *url.URL, 1) - InitTests(true) - c := NewTestConn() - c.RequestTracer = func(method, urlStr, body string) { - requrl, _ := url.Parse(urlStr) - requrlChan <- requrl - } - date := time.Unix(1257894000, 0) - data := map[string]interface{}{"name": "smurfs", "age": 22, "date": date} - - // Now tests small batches - indexer := c.NewBulkIndexer(1) - - indexer.Start() - <-time.After(time.Millisecond * 20) - - indexer.Index("users", "user", "2", "", "", &date, data) - - <-time.After(time.Millisecond * 200) - // indexer.Flush() - indexer.Stop() - requrl := <-requrlChan - assert.T(t, requrl.Query().Get("refresh") == "false", "Should have set refresh query param to false") -} - -// currently broken in drone.io -func XXXTestBulkUpdate(t *testing.T) { - var ( - buffers = NewSharedBuffer() - totalBytesSent int - messageSets int - ) - - InitTests(true) - c := NewTestConn() - c.Port = "9200" - indexer := c.NewBulkIndexer(3) - indexer.Sender = func(buf *bytes.Buffer) error { - messageSets += 1 - totalBytesSent += buf.Len() - buffers.Append(buf) - return indexer.Send(buf) - } - indexer.Start() - - date := time.Unix(1257894000, 0) - user := map[string]interface{}{ - "name": "smurfs", "age": 22, "date": date, "count": 1, - } - - // Lets make sure the data is in the index ... - _, err := c.Index("users", "user", "5", nil, user) - - // script and params - data := map[string]interface{}{ - "script": "ctx._source.count += 2", - } - err = indexer.Update("users", "user", "5", "", "", &date, data) - // So here's the deal. Flushing does seem to work, you just have to give the - // channel a moment to recieve the message ... - // <- time.After(time.Millisecond * 20) - // indexer.Flush() - - waitFor(func() bool { - return buffers.Length() > 0 - }, 5) - - indexer.Stop() - - assert.T(t, indexer.NumErrors() == 0 && err == nil, fmt.Sprintf("Should not have any errors, bulkErrorCt:%v, err:%v", indexer.NumErrors(), err)) - - response, err := c.Get("users", "user", "5", nil) - assert.T(t, err == nil, fmt.Sprintf("Should not have any errors %v", err)) - m := make(map[string]interface{}) - json.Unmarshal([]byte(*response.Source), &m) - newCount := m["count"] - assert.T(t, newCount.(float64) == 3, - fmt.Sprintf("Should have update count: %#v ... %#v", m["count"], response)) -} - -func TestBulkSmallBatch(t *testing.T) { - var ( - messageSets int - ) - - InitTests(true) - c := NewTestConn() - - date := time.Unix(1257894000, 0) - data := map[string]interface{}{"name": "smurfs", "age": 22, "date": date} - - // Now tests small batches - indexer := c.NewBulkIndexer(1) - indexer.BufferDelayMax = 100 * time.Millisecond - indexer.BulkMaxDocs = 2 - messageSets = 0 - indexer.Sender = func(buf *bytes.Buffer) error { - messageSets += 1 - return indexer.Send(buf) - } - indexer.Start() - <-time.After(time.Millisecond * 20) - - indexer.Index("users", "user", "2", "", "", &date, data) - indexer.Index("users", "user", "3", "", "", &date, data) - indexer.Index("users", "user", "4", "", "", &date, data) - <-time.After(time.Millisecond * 200) - // indexer.Flush() - indexer.Stop() - assert.T(t, messageSets == 2, fmt.Sprintf("Should have sent 2 message sets %d", messageSets)) - -} - -func TestBulkDelete(t *testing.T) { - InitTests(true) - var lock sync.Mutex - c := NewTestConn() - indexer := c.NewBulkIndexer(1) - sentBytes := []byte{} - - indexer.Sender = func(buf *bytes.Buffer) error { - lock.Lock() - sentBytes = append(sentBytes, buf.Bytes()...) - lock.Unlock() - return nil - } - - indexer.Start() - - indexer.Delete("fake", "fake_type", "1") - - indexer.Flush() - indexer.Stop() - - lock.Lock() - sent := string(sentBytes) - lock.Unlock() - - expected := `{"delete":{"_index":"fake","_type":"fake_type","_id":"1"}} -` - asExpected := sent == expected - assert.T(t, asExpected, fmt.Sprintf("Should have sent '%s' but actually sent '%s'", expected, sent)) -} - -func XXXTestBulkErrors(t *testing.T) { - // lets set a bad port, and hope we get a conn refused error? - c := NewTestConn() - c.Port = "27845" - defer func() { - c.Port = "9200" - }() - indexer := c.NewBulkIndexerErrors(10, 1) - indexer.Start() - errorCt := 0 - go func() { - for i := 0; i < 20; i++ { - date := time.Unix(1257894000, 0) - data := map[string]interface{}{"name": "smurfs", "age": 22, "date": date} - indexer.Index("users", "user", strconv.Itoa(i), "", "", &date, data) - } - }() - var errBuf *ErrorBuffer - for errBuf = range indexer.ErrorChannel { - errorCt++ - break - } - if errBuf.Buf.Len() > 0 { - gou.Debug(errBuf.Err) - } - assert.T(t, errorCt > 0, fmt.Sprintf("ErrorCt should be > 0 %d", errorCt)) - indexer.Stop() -} - -/* -BenchmarkSend 18:33:00 bulk_test.go:131: Sent 1 messages in 0 sets totaling 0 bytes -18:33:00 bulk_test.go:131: Sent 100 messages in 1 sets totaling 145889 bytes -18:33:01 bulk_test.go:131: Sent 10000 messages in 100 sets totaling 14608888 bytes -18:33:05 bulk_test.go:131: Sent 20000 messages in 99 sets totaling 14462790 bytes - 20000 234526 ns/op - -*/ -func BenchmarkSend(b *testing.B) { - InitTests(true) - c := NewTestConn() - b.StartTimer() - totalBytes := 0 - sets := 0 - indexer := c.NewBulkIndexer(1) - indexer.Sender = func(buf *bytes.Buffer) error { - totalBytes += buf.Len() - sets += 1 - //log.Println("got bulk") - return indexer.Send(buf) - } - for i := 0; i < b.N; i++ { - about := make([]byte, 1000) - rand.Read(about) - data := map[string]interface{}{"name": "smurfs", "age": 22, "date": time.Unix(1257894000, 0), "about": about} - indexer.Index("users", "user", strconv.Itoa(i), "", "", nil, data) - } - log.Printf("Sent %d messages in %d sets totaling %d bytes \n", b.N, sets, totalBytes) - if indexer.NumErrors() != 0 { - b.Fail() - } -} - -/* -TODO: this should be faster than above - -BenchmarkSendBytes 18:33:05 bulk_test.go:169: Sent 1 messages in 0 sets totaling 0 bytes -18:33:05 bulk_test.go:169: Sent 100 messages in 2 sets totaling 292299 bytes -18:33:09 bulk_test.go:169: Sent 10000 messages in 99 sets totaling 14473800 bytes - 10000 373529 ns/op - -*/ -func BenchmarkSendBytes(b *testing.B) { - InitTests(true) - c := NewTestConn() - about := make([]byte, 1000) - rand.Read(about) - data := map[string]interface{}{"name": "smurfs", "age": 22, "date": time.Unix(1257894000, 0), "about": about} - body, _ := json.Marshal(data) - b.StartTimer() - totalBytes := 0 - sets := 0 - indexer := c.NewBulkIndexer(1) - indexer.Sender = func(buf *bytes.Buffer) error { - totalBytes += buf.Len() - sets += 1 - return indexer.Send(buf) - } - for i := 0; i < b.N; i++ { - indexer.Index("users", "user", strconv.Itoa(i), "", "", nil, body) - } - log.Printf("Sent %d messages in %d sets totaling %d bytes \n", b.N, sets, totalBytes) - if indexer.NumErrors() != 0 { - b.Fail() - } -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corebulkudp.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corebulkudp.go deleted file mode 100644 index e8227651a..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corebulkudp.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corecount.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corecount.go deleted file mode 100644 index 279376d43..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corecount.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" -) - -type CountResponse struct { - Count int `json:"count"` - Shard Status `json:"_shards"` -} - -// Count allows the caller to easily execute a query and get the number of matches for that query. -// It can be executed across one or more indices and across one or more types. -// The query can either be provided using a simple query string as a parameter, -// or using the Query DSL defined within the request body. -// http://www.elasticsearch.org/guide/reference/api/count.html -func (c *Conn) Count(index string, _type string, args map[string]interface{}, query interface{}) (CountResponse, error) { - var url string - var retval CountResponse - url = fmt.Sprintf("/%s/%s/_count", index, _type) - body, err := c.DoCommand("GET", url, args, query) - if err != nil { - return retval, err - } - if err == nil { - // marshall into json - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - return retval, err -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coredelete.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coredelete.go deleted file mode 100644 index e8af69d0c..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coredelete.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" -) - -// Delete API allows to delete a typed JSON document from a specific index based on its id. -// http://www.elasticsearch.org/guide/reference/api/delete.html -func (c *Conn) Delete(index string, _type string, id string, args map[string]interface{}) (BaseResponse, error) { - var url string - var retval BaseResponse - url = fmt.Sprintf("/%s/%s/%s", index, _type, id) - body, err := c.DoCommand("DELETE", url, args, nil) - if err != nil { - return retval, err - } - if err == nil { - // marshall into json - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - return retval, err -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coredeletebyquery.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coredeletebyquery.go deleted file mode 100644 index 7d99adf4b..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coredeletebyquery.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" - "strings" -) - -// DeleteByQuery allows the caller to delete documents from one or more indices and one or more types based on a query. -// The query can either be provided using a simple query string as a parameter, or using the Query DSL defined within -// the request body. -// see: http://www.elasticsearch.org/guide/reference/api/delete-by-query.html -func (c *Conn) DeleteByQuery(indices []string, types []string, args map[string]interface{}, query interface{}) (BaseResponse, error) { - var url string - var retval BaseResponse - if len(indices) > 0 && len(types) > 0 { - url = fmt.Sprintf("/%s/%s/_query", strings.Join(indices, ","), strings.Join(types, ",")) - } else if len(indices) > 0 { - url = fmt.Sprintf("/%s/_query", strings.Join(indices, ",")) - } - body, err := c.DoCommand("DELETE", url, args, query) - if err != nil { - return retval, err - } - if err == nil { - // marshall into json - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - return retval, err -} - -func buildQuery() string { - return "" -} - -type DeleteByQueryResponse struct { - Status bool `json:"ok"` - Indicies map[string]IndexStatus `json:"_indices"` -} - -type IndexStatus struct { - Shards Status `json:"_shards"` -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreexample_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreexample_test.go deleted file mode 100644 index fed31dfee..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreexample_test.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo_test - -import ( - "bytes" - "fmt" - "strconv" - - elastigo "github.com/mattbaird/elastigo/lib" -) - -// The simplest usage of background bulk indexing -func ExampleBulkIndexer_simple() { - c := elastigo.NewConn() - - indexer := c.NewBulkIndexerErrors(10, 60) - indexer.Start() - indexer.Index("twitter", "user", "1", "", "", nil, `{"name":"bob"}`) - indexer.Stop() -} - -// The inspecting the response -func ExampleBulkIndexer_responses() { - c := elastigo.NewConn() - - indexer := c.NewBulkIndexer(10) - // Create a custom Sender Func, to allow inspection of response/error - indexer.Sender = func(buf *bytes.Buffer) error { - // @buf is the buffer of docs about to be written - respJson, err := c.DoCommand("POST", "/_bulk", nil, buf) - if err != nil { - // handle it better than this - fmt.Println(string(respJson)) - } - return err - } - indexer.Start() - for i := 0; i < 20; i++ { - indexer.Index("twitter", "user", strconv.Itoa(i), "", "", nil, `{"name":"bob"}`) - } - indexer.Stop() -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreexplain.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreexplain.go deleted file mode 100644 index 82ec69556..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreexplain.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" -) - -// Explain computes a score explanation for a query and a specific document. -// This can give useful feedback whether a document matches or didn’t match a specific query. -// This feature is available from version 0.19.9 and up. -// see http://www.elasticsearch.org/guide/reference/api/explain.html -func (c *Conn) Explain(index string, _type string, id string, args map[string]interface{}, query string) (Match, error) { - var url string - var retval Match - if len(_type) > 0 { - url = fmt.Sprintf("/%s/%s/_explain", index, _type) - } else { - url = fmt.Sprintf("/%s/_explain", index) - } - body, err := c.DoCommand("GET", url, args, query) - if err != nil { - return retval, err - } - if err == nil { - // marshall into json - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - return retval, err -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreget.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreget.go deleted file mode 100644 index b0d699582..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreget.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" - "net/http" -) - -// Get allows caller to get a typed JSON document from the index based on its id. -// GET - retrieves the doc -// HEAD - checks for existence of the doc -// http://www.elasticsearch.org/guide/reference/api/get.html -// TODO: make this implement an interface -func (c *Conn) get(index string, _type string, id string, args map[string]interface{}, source *json.RawMessage) (BaseResponse, error) { - var url string - retval := BaseResponse{Source: source} - if len(_type) > 0 { - url = fmt.Sprintf("/%s/%s/%s", index, _type, id) - } else { - url = fmt.Sprintf("/%s/%s", index, id) - } - body, err := c.DoCommand("GET", url, args, nil) - if err != nil { - return retval, err - } - if err == nil { - // marshall into json - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - return retval, err -} - -// The get API allows to get a typed JSON document from the index based on its id. -// GET - retrieves the doc -// HEAD - checks for existence of the doc -// http://www.elasticsearch.org/guide/reference/api/get.html -// TODO: make this implement an interface -func (c *Conn) Get(index string, _type string, id string, args map[string]interface{}) (BaseResponse, error) { - return c.get(index, _type, id, args, nil) -} - -// Same as Get but with custom source type. -func (c *Conn) GetCustom(index string, _type string, id string, args map[string]interface{}, source *json.RawMessage) (BaseResponse, error) { - return c.get(index, _type, id, args, source) -} - -// GetSource retrieves the document by id and converts it to provided interface -func (c *Conn) GetSource(index string, _type string, id string, args map[string]interface{}, source interface{}) error { - url := fmt.Sprintf("/%s/%s/%s/_source", index, _type, id) - body, err := c.DoCommand("GET", url, args, nil) - if err == nil { - err = json.Unmarshal(body, &source) - } - return err -} - -// ExistsBool allows caller to check for the existence of a document using HEAD -// TODO(shutej): This looks redundant with the Exists function in -// baserequest.go, check with mattbaird@. -func (c *Conn) ExistsBool(index string, _type string, id string, args map[string]interface{}) (bool, error) { - - var url string - - query, err := Escape(args) - if err != nil { - return false, err - } - - if len(_type) > 0 { - url = fmt.Sprintf("/%s/%s/%s", index, _type, id) - } else { - url = fmt.Sprintf("/%s/%s", index, id) - } - - req, err := c.NewRequest("HEAD", url, query) - if err != nil { - return false, err - } - - httpStatusCode, _, err := req.Do(nil) - - // RecordNotFound is the expected response for a non-existent document, - // so we don't return an error to our caller - if err == RecordNotFound { - return false, nil - } - - return httpStatusCode == http.StatusOK, err -} - -// ExistsIndex allows caller to check for the existence of an index or a type using HEAD -func (c *Conn) ExistsIndex(index string, _type string, args map[string]interface{}) (bool, error) { - var url string - - query, err := Escape(args) - if err != nil { - return false, err - } - - if len(_type) > 0 { - url = fmt.Sprintf("/%s/%s", index, _type) - } else { - url = fmt.Sprintf("/%s", index) - } - req, err := c.NewRequest("HEAD", url, query) - httpStatusCode, _, err := req.Do(nil) - - if err != nil { - return false, err - } - if httpStatusCode == http.StatusOK { - return true, err - } - return false, err -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreindex.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreindex.go deleted file mode 100644 index 7ba5031e0..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreindex.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "errors" - "fmt" - "net/url" - "strconv" -) - -// Index adds or updates a typed JSON document in a specific index, making it searchable, creating an index -// if it did not exist. -// if id is omited, op_type 'create' will be passed and http method will default to "POST" -// _type is optional -// id is optional -// parentId is optional -// version is optional -// op_type is optional -// routing is optional -// timestamp is optional -// ttl is optional -// percolate is optional -// timeout is optional -// http://www.elasticsearch.org/guide/reference/api/index_.html -func (c *Conn) Index(index string, _type string, id string, args map[string]interface{}, data interface{}) (BaseResponse, error) { - return c.IndexWithParameters(index, _type, id, "", 0, "", "", "", 0, "", "", false, args, data) -} - -// IndexWithParameters takes all the potential parameters available -func (c *Conn) IndexWithParameters(index string, _type string, id string, parentId string, version int, op_type string, - routing string, timestamp string, ttl int, percolate string, timeout string, refresh bool, - args map[string]interface{}, data interface{}) (BaseResponse, error) { - var url string - var retval BaseResponse - url, err := GetIndexUrl(index, _type, id, parentId, version, op_type, routing, timestamp, ttl, percolate, timeout, refresh) - if err != nil { - return retval, err - } - var method string - if len(id) == 0 { - method = "POST" - } else { - method = "PUT" - } - body, err := c.DoCommand(method, url, args, data) - if err != nil { - return retval, err - } - if err == nil { - // marshall into json - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - return retval, err -} - -func GetIndexUrl(index string, _type string, id string, parentId string, version int, op_type string, - routing string, timestamp string, ttl int, percolate string, timeout string, refresh bool) (retval string, e error) { - - if len(index) == 0 { - return "", errors.New("index can not be blank") - } - var partialURL string - var values url.Values = url.Values{} - if len(_type) == 0 && len(id) > 0 { - e = errors.New("Can't specify id when _type is blank") - return - } - if len(_type) > 0 && len(id) > 0 { - partialURL = fmt.Sprintf("/%s/%s/%s", index, _type, id) - } else if len(_type) > 0 { - partialURL = fmt.Sprintf("/%s/%s", index, _type) - } else { - partialURL = fmt.Sprintf("/%s", index) - } - // A child document can be indexed by specifying it’s parent when indexing. - if len(parentId) > 0 { - values.Add("parent", parentId) - } - // versions start at 1, so if greater than 0 - if version > 0 { - values.Add("version", strconv.Itoa(version)) - } - if len(op_type) > 0 { - if len(id) == 0 { - //if id is omited, op_type defaults to 'create' - values.Add("op_type", "create") - } else { - values.Add("op_type", op_type) - } - } - if len(routing) > 0 { - values.Add("routing", routing) - } - // A document can be indexed with a timestamp associated with it. - // The timestamp value of a document can be set using the timestamp parameter. - if len(timestamp) > 0 { - values.Add("timestamp", timestamp) - } - // A document can be indexed with a ttl (time to live) associated with it. Expired documents - // will be expunged automatically. - if ttl > 0 { - values.Add("ttl", strconv.Itoa(ttl)) - } - if len(percolate) > 0 { - values.Add("percolate", percolate) - } - // example 5m - if len(timeout) > 0 { - values.Add("timeout", timeout) - } - - if refresh { - values.Add("refresh", "true") - } - - partialURL += "?" + values.Encode() - return partialURL, nil -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coremget.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coremget.go deleted file mode 100644 index 1fece7fc3..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coremget.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" -) - -// MGet allows the caller to get multiple documents based on an index, type (optional) and id (and possibly routing). -// The response includes a docs array with all the fetched documents, each element similar in structure to a document -// provided by the get API. -// see http://www.elasticsearch.org/guide/reference/api/multi-get.html -func (c *Conn) MGet(index string, _type string, mgetRequest MGetRequestContainer, args map[string]interface{}) (MGetResponseContainer, error) { - var url string - var retval MGetResponseContainer - if len(index) <= 0 { - url = fmt.Sprintf("/_mget") - } - if len(_type) > 0 && len(index) > 0 { - url = fmt.Sprintf("/%s/%s/_mget", index, _type) - } else if len(index) > 0 { - url = fmt.Sprintf("/%s/_mget", index) - } - body, err := c.DoCommand("GET", url, args, mgetRequest) - if err != nil { - return retval, err - } - if err == nil { - // marshall into json - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - return retval, err -} - -type MGetRequestContainer struct { - Docs []MGetRequest `json:"docs"` -} - -type MGetRequest struct { - Index string `json:"_index"` - Type string `json:"_type"` - ID string `json:"_id"` - IDS []string `json:"_ids,omitempty"` - Fields []string `json:"fields,omitempty"` -} - -type MGetResponseContainer struct { - Docs []BaseResponse `json:"docs"` -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coremorelikethis.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coremorelikethis.go deleted file mode 100644 index 0c2fc4c36..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coremorelikethis.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" -) - -// MoreLikeThis allows the caller to get documents that are “like” a specified document. -// http://www.elasticsearch.org/guide/reference/api/more-like-this.html -func (c *Conn) MoreLikeThis(index string, _type string, id string, args map[string]interface{}, query MoreLikeThisQuery) (BaseResponse, error) { - var url string - var retval BaseResponse - url = fmt.Sprintf("/%s/%s/%s/_mlt", index, _type, id) - body, err := c.DoCommand("GET", url, args, query) - if err != nil { - return retval, err - } - if err == nil { - // marshall into json - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - return retval, err -} - -type MoreLikeThisQuery struct { - MoreLikeThis MLT `json:"more_like_this"` -} - -type MLT struct { - Fields []string `json:"fields"` - LikeText string `json:"like_text"` - PercentTermsToMatch float32 `json:"percent_terms_to_match"` - MinTermFrequency int `json:"min_term_freq"` - MaxQueryTerms int `json:"max_query_terms"` - StopWords []string `json:"stop_words"` - MinDocFrequency int `json:"min_doc_freq"` - MaxDocFrequency int `json:"max_doc_freq"` - MinWordLength int `json:"min_word_len"` - MaxWordLength int `json:"max_word_len"` - BoostTerms int `json:"boost_terms"` - Boost float32 `json:"boost"` - Analyzer string `json:"analyzer"` -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coremsearch.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coremsearch.go deleted file mode 100644 index e8227651a..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coremsearch.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corepercolate.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corepercolate.go deleted file mode 100644 index cc38bfa21..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corepercolate.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" -) - -type PercolatorResult struct { - SearchResult - Matches []PercolatorMatch `json:"matches"` -} - -type PercolatorMatch struct { - Index string `json:"_index"` - Id string `json:"_id"` -} - -// See http://www.elasticsearch.org/guide/reference/api/percolate.html -func (c *Conn) RegisterPercolate(index string, id string, data interface{}) (BaseResponse, error) { - var url string - var retval BaseResponse - url = fmt.Sprintf("/%s/.percolator/%s", index, id) - body, err := c.DoCommand("PUT", url, nil, data) - if err != nil { - return retval, err - } - if err == nil { - // marshall into json - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - return retval, err -} - -func (c *Conn) Percolate(index string, _type string, name string, args map[string]interface{}, doc string) (PercolatorResult, error) { - var url string - var retval PercolatorResult - url = fmt.Sprintf("/%s/%s/_percolate", index, _type) - body, err := c.DoCommand("GET", url, args, doc) - if err != nil { - return retval, err - } - if err == nil { - // marshall into json - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - return retval, err -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corepercolate_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corepercolate_test.go deleted file mode 100644 index 55a0713a1..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corepercolate_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package elastigo - -import ( - . "github.com/smartystreets/goconvey/convey" - "testing" -) - -const ( - percIndexName = "test-perc-index" -) - -func TestPercolate(t *testing.T) { - Convey("With a registered percolator", t, func() { - c := NewTestConn() - _, createErr := c.CreateIndex(percIndexName) - So(createErr, ShouldBeNil) - defer c.DeleteIndex(percIndexName) - - options := `{ - "percType": { - "properties": { - "message": { - "type": "string" - } - } - } - }` - - err := c.PutMappingFromJSON(percIndexName, "percType", []byte(options)) - So(err, ShouldBeNil) - - data := `{ - "query": { - "match": { - "message": "bonsai tree" - } - } - }` - - _, err = c.RegisterPercolate(percIndexName, "PERCID", data) - So(err, ShouldBeNil) - - Convey("That matches the document", func() { - // Should return the percolator id (registered query) - doc := `{"doc": { "message": "A new bonsai tree in the office" }}` - - result, err := c.Percolate(percIndexName, "percType", "", nil, doc) - So(err, ShouldBeNil) - So(len(result.Matches), ShouldEqual, 1) - match := result.Matches[0] - So(match.Id, ShouldEqual, "PERCID") - So(match.Index, ShouldEqual, percIndexName) - }) - - Convey("That does not match the document", func() { - // Should NOT return the percolator id (registered query) - doc := `{"doc": { "message": "Barren wasteland with no matches" }}` - - result, err := c.Percolate(percIndexName, "percType", "", nil, doc) - So(err, ShouldBeNil) - So(len(result.Matches), ShouldEqual, 0) - }) - }) -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coresearch.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coresearch.go deleted file mode 100644 index 0d83c9c90..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coresearch.go +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" - "strconv" - "strings" -) - -// Search performs a very basic search on an index via the request URI API. -// -// params: -// @index: the elasticsearch index -// @_type: optional ("" if not used) search specific type in this index -// @args: a map of URL parameters. Allows all the URI-request parameters allowed by ElasticSearch. -// @query: this can be one of 3 types: -// 1) string value that is valid elasticsearch -// 2) io.Reader that can be set in body (also valid elasticsearch string syntax..) -// 3) other type marshalable to json (also valid elasticsearch json) -// -// out, err := Search(true, "github", map[string]interface{} {"from" : 10}, qryType) -// -// http://www.elasticsearch.org/guide/reference/api/search/uri-request.html -func (c *Conn) Search(index string, _type string, args map[string]interface{}, query interface{}) (SearchResult, error) { - var uriVal string - var retval SearchResult - if len(_type) > 0 && _type != "*" { - uriVal = fmt.Sprintf("/%s/%s/_search", index, _type) - } else { - uriVal = fmt.Sprintf("/%s/_search", index) - } - body, err := c.DoCommand("POST", uriVal, args, query) - if err != nil { - return retval, err - } - if err == nil { - // marshall into json - jsonErr := json.Unmarshal([]byte(body), &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - retval.RawJSON = body - return retval, err -} - -func (c *Conn) Suggest(index string, args map[string]interface{}, query interface{}) (SuggestResults, error) { - uriVal := fmt.Sprintf("/%s/_suggest", index) - body, err := c.DoCommand("POST", uriVal, args, query) - var retval SuggestResults - if err != nil { - return retval, err - } - jsonErr := json.Unmarshal([]byte(body), &retval.body) - if jsonErr != nil { - return retval, jsonErr - } - shards := retval.body["_shards"] - if shards == nil { - return retval, fmt.Errorf("Expect response to contain _shards field, got: %s", body) - } - jsonErr = json.Unmarshal(shards, &retval.ShardStatus) - if jsonErr != nil { - return retval, jsonErr - } - if len(retval.ShardStatus.Failures) > 0 { - return retval, fmt.Errorf("Got the following errors:\n%s", failures(retval.ShardStatus.Failures)) - } - return retval, nil -} - -type SuggestResults struct { - body map[string]json.RawMessage - ShardStatus Status -} - -func (s SuggestResults) Result(suggestName string) ([]Suggestion, error) { - var suggestions []Suggestion - query := s.body[suggestName] - if query == nil { - return nil, fmt.Errorf("No such suggest name found") - } - err := json.Unmarshal(query, &suggestions) - if err != nil { - return nil, err - } - return suggestions, nil -} - -// SearchUri performs the simplest possible query in url string -// params: -// @index: the elasticsearch index -// @_type: optional ("" if not used) search specific type in this index -// @args: a map of URL parameters. Most important one is q -// -// out, err := SearchUri("github","", map[string]interface{} { "q" : `user:kimchy`}) -// -// produces a request like this: host:9200/github/_search?q=user:kimchy" -// -// http://www.elasticsearch.org/guide/reference/api/search/uri-request.html -func (c *Conn) SearchUri(index, _type string, args map[string]interface{}) (SearchResult, error) { - var uriVal string - var retval SearchResult - if len(_type) > 0 && _type != "*" { - uriVal = fmt.Sprintf("/%s/%s/_search", index, _type) - } else { - uriVal = fmt.Sprintf("/%s/_search", index) - } - //log.Println(uriVal) - body, err := c.DoCommand("GET", uriVal, args, nil) - if err != nil { - return retval, err - } - if err == nil { - // marshall into json - jsonErr := json.Unmarshal([]byte(body), &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - retval.RawJSON = body - return retval, err -} - -func (c *Conn) Scroll(args map[string]interface{}, scroll_id string) (SearchResult, error) { - var url string - var retval SearchResult - - if _, ok := args["scroll"]; !ok { - return retval, fmt.Errorf("Cannot call scroll without 'scroll' in arguments") - } - - url = "/_search/scroll" - - body, err := c.DoCommand("POST", url, args, scroll_id) - if err != nil { - return retval, err - } - if err == nil { - // marshall into json - jsonErr := json.Unmarshal([]byte(body), &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - return retval, err -} - -type SuggestionOption struct { - Payload json.RawMessage `json:"payload"` - Score Float32Nullable `json:"score,omitempty"` - Text string `json:"text"` -} - -type Suggestion struct { - Length int `json:"length"` - Offset int `json:"offset"` - Options []SuggestionOption `json:"options"` - Text string `json:"text"` -} - -type Suggestions map[string][]Suggestion - -type SearchResult struct { - RawJSON []byte - Took int `json:"took"` - TimedOut bool `json:"timed_out"` - ShardStatus Status `json:"_shards"` - Hits Hits `json:"hits"` - Facets json.RawMessage `json:"facets,omitempty"` // structure varies on query - ScrollId string `json:"_scroll_id,omitempty"` - Aggregations json.RawMessage `json:"aggregations,omitempty"` // structure varies on query - Suggestions Suggestions `json:"suggest,omitempty"` -} - -func (s *SearchResult) String() string { - return fmt.Sprintf("", s.Took, s.TimedOut, s.Hits.Total) -} - -type Hits struct { - Total int `json:"total"` - // MaxScore float32 `json:"max_score"` - Hits []Hit `json:"hits"` -} - -func (h *Hits) Len() int { - return len(h.Hits) -} - -type Highlight map[string][]string - -type Hit struct { - Index string `json:"_index"` - Type string `json:"_type,omitempty"` - Id string `json:"_id"` - Score Float32Nullable `json:"_score,omitempty"` // Filters (no query) dont have score, so is null - Source *json.RawMessage `json:"_source"` // marshalling left to consumer - Fields *json.RawMessage `json:"fields"` // when a field arg is passed to ES, instead of _source it returns fields - Explanation *Explanation `json:"_explanation,omitempty"` - Highlight *Highlight `json:"highlight,omitempty"` - Sort []interface{} `json:"sort,omitempty"` -} - -func (e *Explanation) String(indent string) string { - if len(e.Details) == 0 { - return fmt.Sprintf("%s>>> %v = %s", indent, e.Value, strings.Replace(e.Description, "\n", "", -1)) - } else { - detailStrs := make([]string, 0) - for _, detail := range e.Details { - detailStrs = append(detailStrs, fmt.Sprintf("%s", detail.String(indent+"| "))) - } - return fmt.Sprintf("%s%v = %s(\n%s\n%s)", indent, e.Value, strings.Replace(e.Description, "\n", "", -1), strings.Join(detailStrs, "\n"), indent) - } -} - -// Elasticsearch returns some invalid (according to go) json, with floats having... -// -// json: cannot unmarshal null into Go value of type float32 (see last field.) -// -// "hits":{"total":6808,"max_score":null, -// "hits":[{"_index":"10user","_type":"user","_id":"751820","_score":null, -type Float32Nullable float32 - -func (i *Float32Nullable) UnmarshalJSON(data []byte) error { - if len(data) == 0 || string(data) == "null" { - return nil - } - - if in, err := strconv.ParseFloat(string(data), 32); err != nil { - return err - } else { - *i = Float32Nullable(in) - } - return nil -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coresearch_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coresearch_test.go deleted file mode 100644 index a16d6fec2..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coresearch_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - . "github.com/smartystreets/goconvey/convey" - "testing" -) - -type SuggestTest struct { - Completion string `json:"completion"` -} - -type hash map[string]interface{} - -func TestCoreSearch(t *testing.T) { - - c := NewTestConn() - c.CreateIndex("github") - waitFor(func() bool { return false }, 5) - - defer func() { - c.DeleteIndex("github") - }() - - Convey("Convert a search result to JSON", t, func() { - - qry := map[string]interface{}{ - "query": map[string]interface{}{ - "wildcard": map[string]string{"actor": "a*"}, - }, - } - var args map[string]interface{} - out, err := c.Search("github", "", args, qry) - So(err, ShouldBeNil) - - _, err = json.Marshal(out.Hits.Hits) - So(err, ShouldBeNil) - }) - - Convey("Update a document and verify that it is reflected", t, func() { - mappingOpts := MappingOptions{Properties: hash{ - "completion": hash{ - "type": "completion", - }, - }} - err := c.PutMapping("github", "SuggestTest", SuggestTest{}, mappingOpts) - So(err, ShouldBeNil) - - _, err = c.UpdateWithPartialDoc("github", "SuggestTest", "1", nil, SuggestTest{"foobar"}, true) - So(err, ShouldBeNil) - - query := hash{"completion_completion": hash{ - "text": "foo", - "completion": hash{ - "size": 10, - "field": "completion", - }, - }} - - _, err = c.Refresh("github") - So(err, ShouldBeNil) - - res, err := c.Suggest("github", nil, query) - So(err, ShouldBeNil) - - opts, err := res.Result("completion_completion") - So(err, ShouldBeNil) - - So(len(opts[0].Options), ShouldBeGreaterThan, 0) - So(opts[0].Options[0].Text, ShouldEqual, "foobar") - }) -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coretest_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coretest_test.go deleted file mode 100644 index 37aa3fc98..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coretest_test.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "bufio" - "bytes" - "compress/gzip" - "crypto/md5" - "encoding/json" - "flag" - "fmt" - "io" - "log" - "net/http" - "time" -) - -/* - -usage: - - test -v -host eshost -loaddata - -*/ - -const ( - testIndex = "github" -) - -var ( - bulkStarted bool - hasStartedTesting bool - hasLoadedData bool - sleepAfterLoad bool - loadData *bool = flag.Bool("loaddata", false, "This loads a bunch of test data into elasticsearch for testing") - sleep *int = flag.Int("sleep", 0, "Post bulk loading sleep test to make drone.io work") -) - -func InitTests(startIndexer bool) *Conn { - c := NewConn() - - if !hasStartedTesting { - flag.Parse() - hasStartedTesting = true - log.SetFlags(log.Ltime | log.Lshortfile) - c.Domain = *eshost - } - if startIndexer && !bulkStarted { - bulkStarted = true - b := c.NewBulkIndexer(100) - b.Start() - if *loadData && !hasLoadedData { - log.Println("loading test data ") - hasLoadedData = true - LoadTestData() - } - b.Stop() - } - c.Flush("_all") - c.Refresh("_all") - if !sleepAfterLoad { - time.Sleep(time.Duration(*sleep) * time.Second) - } - sleepAfterLoad = true - return c -} - -func NewTestConn() *Conn { - c := NewConn() - c.Domain = *eshost - return c -} - -// Wait for condition (defined by func) to be true, a utility to create a ticker -// checking every 100 ms to see if something (the supplied check func) is done -// -// waitFor(func() bool { -// return ctr.Ct == 0 -// }, 10) -// -// @timeout (in seconds) is the last arg -func waitFor(check func() bool, timeoutSecs int) { - timer := time.NewTicker(100 * time.Millisecond) - tryct := 0 - for _ = range timer.C { - if check() { - timer.Stop() - break - } - if tryct >= timeoutSecs*10 { - timer.Stop() - break - } - tryct++ - } -} - -type GithubEvent struct { - Url string - Created time.Time `json:"created_at"` - Type string -} - -// This loads test data from github archives (~6700 docs) -func LoadTestData() { - c := NewConn() - c.Domain = *eshost - - c.DeleteIndex(testIndex) - - docCt := 0 - errCt := 0 - indexer := c.NewBulkIndexer(1) - indexer.Sender = func(buf *bytes.Buffer) error { - // log.Printf("Sent %d bytes total %d docs sent", buf.Len(), docCt) - req, err := c.NewRequest("POST", "/_bulk", "") - if err != nil { - errCt += 1 - log.Fatalf("ERROR: %v", err) - return err - } - req.SetBody(buf) - // res, err := http.DefaultClient.Do(*(api.Request(req))) - var response map[string]interface{} - httpStatusCode, _, err := req.Do(&response) - if err != nil { - errCt += 1 - log.Fatalf("ERROR: %v", err) - return err - } - if httpStatusCode != 200 { - log.Fatalf("Not 200! %d %q\n", httpStatusCode, buf.String()) - } - return nil - } - indexer.Start() - resp, err := http.Get("http://data.githubarchive.org/2012-12-10-15.json.gz") - if err != nil || resp == nil { - panic("Could not download data") - } - defer resp.Body.Close() - if err != nil { - log.Println(err) - return - } - gzReader, err := gzip.NewReader(resp.Body) - defer gzReader.Close() - if err != nil { - panic(err) - } - r := bufio.NewReader(gzReader) - var ge GithubEvent - docsm := make(map[string]bool) - h := md5.New() - for { - line, err := r.ReadBytes('\n') - if err != nil { - if err == io.EOF { - indexer.Flush() - break - } - log.Fatalf("could not read line: %v", err) - } - if err := json.Unmarshal(line, &ge); err == nil { - // create an "ID" - h.Write(line) - id := fmt.Sprintf("%x", h.Sum(nil)) - if _, ok := docsm[id]; ok { - log.Println("HM, already exists? ", ge.Url) - } - docsm[id] = true - indexer.Index(testIndex, ge.Type, id, "", "", &ge.Created, line) - docCt++ - } else { - log.Println("ERROR? ", string(line)) - } - } - if errCt != 0 { - log.Println("FATAL, could not load ", errCt) - } - // lets wait a bit to ensure that elasticsearch finishes? - indexer.Stop() - if len(docsm) != docCt { - panic(fmt.Sprintf("Docs didn't match? %d:%d", len(docsm), docCt)) - } - c.Flush(testIndex) - c.Refresh(testIndex) -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreupdate.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreupdate.go deleted file mode 100644 index f6abbc14c..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/coreupdate.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" -) - -// Update updates a document based on a script provided. The operation gets the document -// (collocated with the shard) from the index, runs the script (with optional script language and parameters), -// and index back the result (also allows to delete, or ignore the operation). It uses versioning to make sure -// no updates have happened during the “get” and “reindex”. (available from 0.19 onwards). -// Note, this operation still means full reindex of the document, it just removes some network roundtrips -// and reduces chances of version conflicts between the get and the index. The _source field need to be enabled -// for this feature to work. -// -// http://www.elasticsearch.org/guide/reference/api/update.html -// TODO: finish this, it's fairly complex -func (c *Conn) Update(index string, _type string, id string, args map[string]interface{}, data interface{}) (BaseResponse, error) { - var url string - var retval BaseResponse - - url = fmt.Sprintf("/%s/%s/%s/_update", index, _type, id) - body, err := c.DoCommand("POST", url, args, data) - if err != nil { - return retval, err - } - if err == nil { - // marshall into json - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - return retval, err -} - -// UpdateWithPartialDoc updates a document based on partial document provided. The update API also -// support passing a partial document (since 0.20), which will be merged into the existing -// document (simple recursive merge, inner merging of objects, replacing core "keys/values" and arrays). -// If both doc and script is specified, then doc is ignored. Best is to put your field pairs of the partial -// document in the script itself. -// -// http://www.elasticsearch.org/guide/reference/api/update.html -func (c *Conn) UpdateWithPartialDoc(index string, _type string, id string, args map[string]interface{}, doc interface{}, upsert bool) (BaseResponse, error) { - switch v := doc.(type) { - case string: - upsertStr := "" - if upsert { - upsertStr = ", \"doc_as_upsert\":true" - } - content := fmt.Sprintf("{\"doc\":%s %s}", v, upsertStr) - return c.Update(index, _type, id, args, content) - } - var data map[string]interface{} = make(map[string]interface{}) - data["doc"] = doc - if upsert { - data["doc_as_upsert"] = true - } - return c.Update(index, _type, id, args, data) -} - -// UpdateWithScript updates a document based on a script provided. -// The operation gets the document (collocated with the shard) from the index, runs the script -// (with optional script language and parameters), and index back the result (also allows to -// delete, or ignore the operation). It uses versioning to make sure no updates have happened -// during the "get" and "reindex". (available from 0.19 onwards). -// -// Note, this operation still means full reindex of the document, it just removes some network -// roundtrips and reduces chances of version conflicts between the get and the index. The _source -// field need to be enabled for this feature to work. -// http://www.elasticsearch.org/guide/reference/api/update.html -func (c *Conn) UpdateWithScript(index string, _type string, id string, args map[string]interface{}, script string, params interface{}) (BaseResponse, error) { - switch v := params.(type) { - case string: - paramsPart := fmt.Sprintf("{\"params\":%s}", v) - data := fmt.Sprintf("{\"script\":\"%s\", \"params\":%s}", script, paramsPart) - return c.Update(index, _type, id, args, data) - } - var data map[string]interface{} = make(map[string]interface{}) - data["params"] = params - data["script"] = script - return c.Update(index, _type, id, args, data) -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corevalidate.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corevalidate.go deleted file mode 100644 index d1881584f..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/corevalidate.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" -) - -// Validate allows a user to validate a potentially expensive query without executing it. -// see http://www.elasticsearch.org/guide/reference/api/validate.html -func (c *Conn) Validate(index string, _type string, args map[string]interface{}) (BaseResponse, error) { - var url string - var retval BaseResponse - if len(_type) > 0 { - url = fmt.Sprintf("/%s/%s/_validate/", index, _type) - } else { - url = fmt.Sprintf("/%s/_validate/", index) - } - body, err := c.DoCommand("GET", url, args, nil) - if err != nil { - return retval, err - } - if err == nil { - // marshall into json - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - return retval, err -} - -type Validation struct { - Valid bool `json:"valid"` - Shards Status `json:"_shards"` - Explainations []Explaination `json:"explanations,omitempty"` -} - -type Explaination struct { - Index string `json:"index"` - Valid bool `json:"valid"` - Error string `json:"error"` -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/error.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/error.go deleted file mode 100644 index a3a54c660..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/error.go +++ /dev/null @@ -1,8 +0,0 @@ -package elastigo - -import ( - "errors" -) - -// 404 Response. -var RecordNotFound = errors.New("record not found") diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesaliases.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesaliases.go deleted file mode 100644 index 0328ed9c2..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesaliases.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http:www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" -) - -type JsonAliases struct { - Actions []JsonAliasAdd `json:"actions"` -} - -type JsonAliasAdd struct { - Add JsonAlias `json:"add"` -} - -type JsonAlias struct { - Index string `json:"index"` - Alias string `json:"alias"` -} - -// The API allows you to create an index alias through an API. -func (c *Conn) AddAlias(index string, alias string) (BaseResponse, error) { - var url string - var retval BaseResponse - - if len(index) > 0 { - url = "/_aliases" - } else { - return retval, fmt.Errorf("You must specify an index to create the alias on") - } - - jsonAliases := JsonAliases{} - jsonAliasAdd := JsonAliasAdd{} - jsonAliasAdd.Add.Alias = alias - jsonAliasAdd.Add.Index = index - jsonAliases.Actions = append(jsonAliases.Actions, jsonAliasAdd) - requestBody, err := json.Marshal(jsonAliases) - - if err != nil { - return retval, err - } - - body, err := c.DoCommand("POST", url, nil, requestBody) - if err != nil { - return retval, err - } - - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - - return retval, err -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesanalyze.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesanalyze.go deleted file mode 100644 index 70d9e7ef2..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesanalyze.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "errors" - "fmt" -) - -// AnalyzeIndices performs the analysis process on a text and return the tokens breakdown of the text. -// http://www.elasticsearch.org/guide/reference/api/admin-indices-analyze/ -func (c *Conn) AnalyzeIndices(index string, args map[string]interface{}) (AnalyzeResponse, error) { - var retval AnalyzeResponse - if len(args["text"].(string)) == 0 { - return retval, errors.New("text to analyze must not be blank") - } - var analyzeUrl string = "/_analyze" - if len(index) > 0 { - analyzeUrl = fmt.Sprintf("/%s/%s", index, analyzeUrl) - } - - body, err := c.DoCommand("GET", analyzeUrl, args, nil) - if err != nil { - return retval, err - } - if err == nil { - // marshall into json - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - return retval, err -} - -type AnalyzeResponse struct { - Tokens []Token `json:"tokens"` -} -type Token struct { - Name string `json:"token"` - StartOffset int `json:"start_offset"` - EndOffset int `json:"end_offset"` - Type string `json:"type"` - Position int `json:"position"` -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesclearcache.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesclearcache.go deleted file mode 100644 index 93806ac71..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesclearcache.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" - "strings" -) - -// ClearCache allows to clear either all caches or specific cached associated with one ore more indices. -// see http://www.elasticsearch.org/guide/reference/api/admin-indices-clearcache/ -func (c *Conn) ClearCache(clearId bool, clearBloom bool, args map[string]interface{}, indices ...string) (ExtendedStatus, error) { - var retval ExtendedStatus - var clearCacheUrl string - if len(indices) > 0 { - clearCacheUrl = fmt.Sprintf("/%s/_cache/clear", strings.Join(indices, ",")) - - } else { - clearCacheUrl = fmt.Sprintf("/_cache/clear") - } - - body, err := c.DoCommand("POST", clearCacheUrl, args, nil) - if err != nil { - return retval, err - } - if err == nil { - // marshall into json - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - return retval, err -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicescreateindex.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicescreateindex.go deleted file mode 100644 index e9b876ef5..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicescreateindex.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" - "reflect" -) - -// The create API allows you to create an indices through an API. -func (c *Conn) CreateIndex(index string) (BaseResponse, error) { - var url string - var retval BaseResponse - - if len(index) > 0 { - url = fmt.Sprintf("/%s", index) - } else { - return retval, fmt.Errorf("You must specify an index to create") - } - - body, err := c.DoCommand("PUT", url, nil, nil) - if err != nil { - return retval, err - } - - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - - return retval, err -} - -// The create API allows you to create an indices through an API. -func (c *Conn) CreateIndexWithSettings(index string, settings interface{}) (BaseResponse, error) { - var url string - var retval BaseResponse - - settingsType := reflect.TypeOf(settings).Kind() - if settingsType != reflect.Struct && settingsType != reflect.Map { - return retval, fmt.Errorf("Settings kind was not struct or map") - } - - requestBody, err := json.Marshal(settings) - - if err != nil { - return retval, err - } - - if len(index) > 0 { - url = fmt.Sprintf("/%s", index) - } else { - return retval, fmt.Errorf("You must specify an index to create") - } - - body, err := c.DoCommand("PUT", url, nil, requestBody) - if err != nil { - return retval, err - } - - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - - return retval, err -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdeleteindex.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdeleteindex.go deleted file mode 100644 index 082ff07d4..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdeleteindex.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" -) - -// The delete API allows you to delete one or more indices through an API. This operation may fail -// if the elasitsearch configuration has been set to forbid deleting indexes. -func (c *Conn) DeleteIndex(index string) (BaseResponse, error) { - var url string - var retval BaseResponse - - if len(index) > 0 { - url = fmt.Sprintf("/%s", index) - } else { - return retval, fmt.Errorf("You must specify at least one index to delete") - } - - body, err := c.DoCommand("DELETE", url, nil, nil) - if err != nil { - return retval, err - } - - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - - return retval, err -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdeletemapping.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdeletemapping.go deleted file mode 100644 index f0ac94d21..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdeletemapping.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" -) - -// The delete API allows you to delete a mapping through an API. -func (c *Conn) DeleteMapping(index string, typeName string) (BaseResponse, error) { - var retval BaseResponse - - if len(index) == 0 { - return retval, fmt.Errorf("You must specify at least one index to delete a mapping from") - } - - if len(typeName) == 0 { - return retval, fmt.Errorf("You must specify at least one mapping to delete") - } - - // As documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-delete-mapping.html - url := fmt.Sprintf("/%s/%s", index, typeName) - - body, err := c.DoCommand("DELETE", url, nil, nil) - if err != nil { - return retval, err - } - - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - - return retval, err -} \ No newline at end of file diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdeletemapping_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdeletemapping_test.go deleted file mode 100644 index 5f6575f9c..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdeletemapping_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package elastigo - -import ( - "testing" - "net/http/httptest" - "net/http" - "net/url" - "strings" -) - -func TestDeleteMapping(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method != "DELETE" { - t.Errorf("Expected HTTP Verb, DELETE") - } - - if r.URL.Path == "/this/exists" { - w.Write([]byte(`{"acknowledged": true}`)) - } else if r.URL.Path == "/this/not_exists" { - w.WriteHeader(http.StatusNotFound) - w.Write([]byte(`{"error": "TypeMissingException[[_all] type[[not_exists]] missing: No index has the type.]","status": 404}`)) - } else { - t.Errorf("Unexpected request path, %s", r.URL.Path) - } - })) - defer ts.Close() - - serverURL, _ := url.Parse(ts.URL) - - c := NewTestConn() - - c.Domain = strings.Split(serverURL.Host, ":")[0] - c.Port = strings.Split(serverURL.Host, ":")[1] - - _, err := c.DeleteMapping("this","exists") - if err != nil { - t.Errorf("Expected no error and got, %s", err) - } - - _, err = c.DeleteMapping("this", "not_exists") - if err == nil { - t.Errorf("Expected error and got none deleting /this/not_exists") - } - - _, err = c.DeleteMapping("", "two") - if err == nil { - t.Errorf("Expected error for no index and got none") - } - - _, err = c.DeleteMapping("one", "") - if err == nil { - t.Errorf("Expected error for no mapping and got none") - } -} \ No newline at end of file diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdoc.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdoc.go deleted file mode 100644 index e8227651a..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesdoc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesflush.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesflush.go deleted file mode 100644 index 9fe7bc99c..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesflush.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" - "strings" -) - -// Flush flushes one or more indices through an API. The flush process of an index basically -// frees memory from the index by flushing data to the index storage and clearing the internal transaction -// log. By default, ElasticSearch uses memory heuristics in order to automatically trigger flush operations -// as required in order to clear memory. -// http://www.elasticsearch.org/guide/reference/api/admin-indices-flush.html -// TODO: add Shards to response -func (c *Conn) Flush(indices ...string) (BaseResponse, error) { - var url string - var retval BaseResponse - if len(indices) > 0 { - url = fmt.Sprintf("/%s/_flush", strings.Join(indices, ",")) - } else { - url = "/_flush" - } - body, err := c.DoCommand("POST", url, nil, nil) - if err != nil { - return retval, err - } - if err == nil { - // marshall into json - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - return retval, err -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesgetsettings.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesgetsettings.go deleted file mode 100644 index e8227651a..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesgetsettings.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesindicesexists.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesindicesexists.go deleted file mode 100644 index b1a7ea30c..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesindicesexists.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "fmt" - "strings" -) - -// IndicesExists checks for the existence of indices. uses RecordNotFound message if it doesn't exist and -// "no error" situation if it exists. If there is some other error, gives the error and says it exists -// just in case -// see http://www.elasticsearch.org/guide/reference/api/admin-indices-indices-exists/ -func (c *Conn) IndicesExists(indices ...string) (bool, error) { - var url string - if len(indices) > 0 { - url = fmt.Sprintf("/%s", strings.Join(indices, ",")) - } - _, err := c.DoCommand("HEAD", url, nil, nil) - if err != nil { - if err == RecordNotFound { - return false, nil - } else { - return true, err - } - } - return true, nil -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesopencloseindex.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesopencloseindex.go deleted file mode 100644 index 256d776a9..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesopencloseindex.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" -) - -func (c *Conn) OpenIndices() (BaseResponse, error) { - return c.openCloseOperation("_all", "_open") -} - -func (c *Conn) CloseIndices() (BaseResponse, error) { - return c.openCloseOperation("_all", "_close") -} - -func (c *Conn) OpenIndex(index string) (BaseResponse, error) { - return c.openCloseOperation(index, "_open") -} - -func (c *Conn) CloseIndex(index string) (BaseResponse, error) { - return c.openCloseOperation(index, "_close") -} - -func (c *Conn) openCloseOperation(index, mode string) (BaseResponse, error) { - var url string - var retval BaseResponse - - if len(index) > 0 { - url = fmt.Sprintf("/%s/%s", index, mode) - } else { - url = fmt.Sprintf("/%s", mode) - } - - body, errDo := c.DoCommand("POST", url, nil, nil) - if errDo != nil { - return retval, errDo - } - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - return retval, errDo -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesoptimize.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesoptimize.go deleted file mode 100644 index 559bea0eb..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesoptimize.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" - "strings" -) - -// AnalyzeIndices performs the analysis process on a text and return the tokens breakdown of the text. -// http://www.elasticsearch.org/guide/reference/api/admin-indices-analyze/ -func (c *Conn) OptimizeIndices(args map[string]interface{}, indices ...string) (ExtendedStatus, error) { - var retval ExtendedStatus - var optimizeUrl string = "/_optimize" - if len(indices) > 0 { - optimizeUrl = fmt.Sprintf("/%s%s", strings.Join(indices, ","), optimizeUrl) - } - - body, err := c.DoCommand("POST", optimizeUrl, args, nil) - if err != nil { - return retval, err - } - if err == nil { - // marshall into json - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - return retval, err -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesputmapping.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesputmapping.go deleted file mode 100644 index aab0d228c..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesputmapping.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" - "reflect" - "strings" -) - -type Mapping map[string]MappingOptions - -type MappingOptions struct { - Id IdOptions `json:"_id"` - Timestamp TimestampOptions `json:"_timestamp"` - Analyzer *AnalyzerOptions `json:"_analyzer,omitempty"` - Parent *ParentOptions `json:"_parent,omitempty"` - Routing *RoutingOptions `json:"_routing,omitempty"` - Size *SizeOptions `json:"_size,omitempty"` - Source *SourceOptions `json:"_source,omitempty"` - TTL *TTLOptions `json:"_ttl,omitempty"` - Type *TypeOptions `json:"_type,omitempty"` - Properties map[string]interface{} `json:"properties"` -} - -type TimestampOptions struct { - Enabled bool `json:"enabled"` -} - -type AnalyzerOptions struct { - Path string `json:"path,omitempty"` - Index string `json:"index,omitempty"` -} - -type ParentOptions struct { - Type string `json:"type"` -} - -type RoutingOptions struct { - Required bool `json:"required,omitempty"` - Path string `json:"path,omitempty"` -} - -type SizeOptions struct { - Enabled bool `json:"enabled,omitempty"` - Store bool `json:"store,omitempty"` -} - -type SourceOptions struct { - Enabled bool `json:"enabled,omitempty"` - Includes []string `json:"includes,omitempty"` - Excludes []string `json:"excludes,omitempty"` -} - -type TypeOptions struct { - Store bool `json:"store,omitempty"` - Index string `json:"index,omitempty"` -} - -type TTLOptions struct { - Enabled bool `json:"enabled"` - Default string `json:"default,omitempty"` -} - -type IdOptions struct { - Index string `json:"index,omitempty"` - Path string `json:"path,omitempty"` -} - -func (m_ Mapping) Options() MappingOptions { - m := map[string]MappingOptions(m_) - for _, v := range m { - return v - } - panic(fmt.Errorf("Malformed input: %v", m_)) -} - -func MappingForType(typeName string, opts MappingOptions) Mapping { - return map[string]MappingOptions{typeName: opts} -} - -func (c *Conn) PutMapping(index string, typeName string, instance interface{}, opt MappingOptions) error { - instanceType := reflect.TypeOf(instance) - if instanceType.Kind() != reflect.Struct { - return fmt.Errorf("instance kind was not struct") - } - - if opt.Properties == nil { - opt.Properties = make(map[string]interface{}) - } - getProperties(instanceType, opt.Properties) - body, err := json.Marshal(MappingForType(typeName, opt)) - if err != nil { - return err - } - _, err = c.DoCommand("PUT", fmt.Sprintf("/%s/%s/_mapping", index, typeName), nil, string(body)) - if err != nil { - return err - } - - return nil -} - -//Same as PutMapping, but takes a []byte for mapping and provides no check of structure -func (c *Conn) PutMappingFromJSON(index string, typeName string, mapping []byte) error { - _, err := c.DoCommand("PUT", fmt.Sprintf("/%s/%s/_mapping", index, typeName), nil, string(mapping)) - return err -} - -func getProperties(t reflect.Type, prop map[string]interface{}) { - n := t.NumField() - for i := 0; i < n; i++ { - field := t.Field(i) - - name := strings.Split(field.Tag.Get("json"), ",")[0] - if name == "-" { - continue - } else if name == "" { - name = field.Name - } - - attrMap := make(map[string]interface{}) - attrs := splitTag(field.Tag.Get("elastic")) - for _, attr := range attrs { - keyvalue := strings.Split(attr, ":") - attrMap[keyvalue[0]] = keyvalue[1] - } - - if len(attrMap) == 0 || attrMap["type"] == "nested" { - - // We are looking for tags on any inner struct, independently of - // whether the field is a struct, a pointer to struct, or a slice of structs - targetType := field.Type - if targetType.Kind() == reflect.Ptr || - targetType.Kind() == reflect.Slice { - targetType = field.Type.Elem() - } - if targetType.Kind() == reflect.Struct { - if field.Anonymous { - getProperties(targetType, prop) - } else { - innerStructProp := make(map[string]interface{}) - getProperties(targetType, innerStructProp) - attrMap["properties"] = innerStructProp - } - } - } - if len(attrMap) != 0 { - prop[name] = attrMap - } - } -} - -func splitTag(tag string) []string { - tag = strings.Trim(tag, " ") - if tag == "" { - return []string{} - } else { - return strings.Split(tag, ",") - } -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesputmapping_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesputmapping_test.go deleted file mode 100644 index 8da3ca346..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesputmapping_test.go +++ /dev/null @@ -1,302 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "io/ioutil" - "net/http" - "net/http/httptest" - "net/url" - "reflect" - "sort" - "strings" - "testing" -) - -var ( - mux *http.ServeMux - server *httptest.Server -) - -func setup(t *testing.T) *Conn { - mux = http.NewServeMux() - server = httptest.NewServer(mux) - c := NewTestConn() - - serverURL, err := url.Parse(server.URL) - if err != nil { - t.Fatalf("Error: %v", err) - } - - c.Domain = strings.Split(serverURL.Host, ":")[0] - c.Port = strings.Split(serverURL.Host, ":")[1] - - return c -} - -func teardown() { - server.Close() -} - -type TestStruct struct { - Id string `json:"id" elastic:"index:not_analyzed"` - DontIndex string `json:"dontIndex" elastic:"index:no"` - Number int `json:"number" elastic:"type:integer,index:analyzed"` - Omitted string `json:"-"` - NoJson string `elastic:"type:string"` - unexported string - JsonOmitEmpty string `json:"jsonOmitEmpty,omitempty" elastic:"type:string"` - Embedded - Inner InnerStruct `json:"inner"` - InnerP *InnerStruct `json:"pointer_to_inner"` - InnerS []InnerStruct `json:"slice_of_inner"` - MultiAnalyze string `json:"multi_analyze"` - NestedObject NestedStruct `json:"nestedObject" elastic:"type:nested"` -} - -type Embedded struct { - EmbeddedField string `json:"embeddedField" elastic:"type:string"` -} - -type InnerStruct struct { - InnerField string `json:"innerField" elastic:"type:date"` -} - -type NestedStruct struct { - InnerField string `json:"innerField" elastic:"type:date"` -} - -// Sorting string -// RuneSlice implements sort.Interface (http://golang.org/pkg/sort/#Interface) -type RuneSlice []rune - -func (p RuneSlice) Len() int { return len(p) } -func (p RuneSlice) Less(i, j int) bool { return p[i] < p[j] } -func (p RuneSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// sorted func returns string with sorted characters -func sorted(s string) string { - runes := []rune(s) - sort.Sort(RuneSlice(runes)) - return string(runes) -} - -func TestPutMapping(t *testing.T) { - c := setup(t) - defer teardown() - - options := MappingOptions{ - Timestamp: TimestampOptions{Enabled: true}, - Id: IdOptions{Index: "analyzed", Path: "id"}, - Parent: &ParentOptions{Type: "testParent"}, - TTL: &TTLOptions{Enabled: true, Default: "1w"}, - Properties: map[string]interface{}{ - // special properties that can't be expressed as tags - "multi_analyze": map[string]interface{}{ - "type": "multi_field", - "fields": map[string]map[string]string{ - "ma_analyzed": {"type": "string", "index": "analyzed"}, - "ma_notanalyzed": {"type": "string", "index": "not_analyzed"}, - }, - }, - }, - } - expValue := MappingForType("myType", MappingOptions{ - Timestamp: TimestampOptions{Enabled: true}, - Id: IdOptions{Index: "analyzed", Path: "id"}, - Parent: &ParentOptions{Type: "testParent"}, - TTL: &TTLOptions{Enabled: true, Default: "1w"}, - Properties: map[string]interface{}{ - "NoJson": map[string]string{"type": "string"}, - "dontIndex": map[string]string{"index": "no"}, - "embeddedField": map[string]string{"type": "string"}, - "id": map[string]string{"index": "not_analyzed"}, - "jsonOmitEmpty": map[string]string{"type": "string"}, - "number": map[string]string{"index": "analyzed", "type": "integer"}, - "multi_analyze": map[string]interface{}{ - "type": "multi_field", - "fields": map[string]map[string]string{ - "ma_analyzed": {"type": "string", "index": "analyzed"}, - "ma_notanalyzed": {"type": "string", "index": "not_analyzed"}, - }, - }, - "inner": map[string]map[string]map[string]string{ - "properties": { - "innerField": {"type": "date"}, - }, - }, - "pointer_to_inner": map[string]map[string]map[string]string{ - "properties": { - "innerField": {"type": "date"}, - }, - }, - "slice_of_inner": map[string]map[string]map[string]string{ - "properties": { - "innerField": {"type": "date"}, - }, - }, - "nestedObject": map[string]interface{}{ - "type": "nested", - "properties": map[string]map[string]string{ - "innerField": {"type": "date"}, - }, - }, - }, - }) - - mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - var value map[string]interface{} - bd, err := ioutil.ReadAll(r.Body) - json.NewDecoder(strings.NewReader(string(bd))).Decode(&value) - expValJson, err := json.MarshalIndent(expValue, "", " ") - if err != nil { - t.Errorf("Got error: %v", err) - } - valJson, err := json.MarshalIndent(value, "", " ") - if err != nil { - t.Errorf("Got error: %v", err) - } - - if sorted(string(expValJson)) != sorted(string(valJson)) { - t.Errorf("Expected %s but got %s", string(expValJson), string(valJson)) - } - }) - - err := c.PutMapping("myIndex", "myType", TestStruct{}, options) - if err != nil { - t.Errorf("Error: %v", err) - } -} - -func TestPutMappingFromJSON(t *testing.T) { - c := setup(t) - defer teardown() - /* - options := MappingOptions{ - Timestamp: TimestampOptions{Enabled: true}, - Id: IdOptions{Index: "analyzed", Path: "id"}, - Parent: &ParentOptions{Type: "testParent"}, - Properties: map[string]interface{}{ - // special properties that can't be expressed as tags - "multi_analyze": map[string]interface{}{ - "type": "multi_field", - "fields": map[string]map[string]string{ - "ma_analyzed": {"type": "string", "index": "analyzed"}, - "ma_notanalyzed": {"type": "string", "index": "not_analyzed"}, - }, - }, - }, - } - */ - - options := `{ - "myType": { - "_id": { - "index": "analyzed", - "path": "id" - }, - "_timestamp": { - "enabled": true - }, - "_parent": { - "type": "testParent" - }, - "properties": { - "analyzed_string": { - "type": "string", - "index": "analyzed" - }, - "multi_analyze": { - "type": "multi_field", - "fields": { - "ma_analyzed": { - "type": "string", - "index": "analyzed" - }, - "ma_notanalyzed": { - "type": "string", - "index": "not_analyzed" - } - } - } - } - } - }` - - expValue := map[string]interface{}{ - "myType": map[string]interface{}{ - "_timestamp": map[string]interface{}{ - "enabled": true, - }, - "_id": map[string]interface{}{ - "index": "analyzed", - "path": "id", - }, - "_parent": map[string]interface{}{ - "type": "testParent", - }, - "properties": map[string]interface{}{ - "analyzed_string": map[string]string{ - "type": "string", - "index": "analyzed", - }, - "multi_analyze": map[string]interface{}{ - "type": "multi_field", - "fields": map[string]map[string]string{ - "ma_analyzed": {"type": "string", "index": "analyzed"}, - "ma_notanalyzed": {"type": "string", "index": "not_analyzed"}, - }, - }, - }, - }, - } - - mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - var value map[string]interface{} - bd, err := ioutil.ReadAll(r.Body) - err = json.Unmarshal(bd, &value) - if err != nil { - t.Errorf("Got error: %v", err) - } - expValJson, err := json.MarshalIndent(expValue, "", " ") - if err != nil { - t.Errorf("Got error: %v", err) - } - - valJson, err := json.MarshalIndent(value, "", " ") - if err != nil { - t.Errorf("Got error: %v", err) - } - - if sorted(string(expValJson)) != sorted(string(valJson)) { - t.Errorf("Expected %s but got %s", string(expValJson), string(valJson)) - } - }) - - err := c.PutMappingFromJSON("myIndex", "myType", []byte(options)) - if err != nil { - t.Errorf("Error: %v", err) - } -} - -type StructWithEmptyElasticTag struct { - Field string `json:"field" elastic:""` -} - -func TestPutMapping_empty_elastic_tag_is_accepted(t *testing.T) { - properties := map[string]interface{}{} - getProperties(reflect.TypeOf(StructWithEmptyElasticTag{}), properties) - if len(properties) != 0 { - t.Errorf("Expected empty properites but got: %v", properties) - } -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesputsettings.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesputsettings.go deleted file mode 100644 index a21cc698c..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesputsettings.go +++ /dev/null @@ -1,42 +0,0 @@ -package elastigo - -import ( - "encoding/json" - "fmt" - "reflect" -) - -func (c *Conn) PutSettings(index string, settings interface{}) (BaseResponse, error) { - - var url string - var retval BaseResponse - - settingsType := reflect.TypeOf(settings) - if settingsType.Kind() != reflect.Struct { - return retval, fmt.Errorf("Settings kind was not struct") - } - - if len(index) > 0 { - url = fmt.Sprintf("/%s/_settings", index) - } else { - url = fmt.Sprintf("/_settings") - } - - requestBody, err := json.Marshal(settings) - - if err != nil { - return retval, err - } - - body, errDo := c.DoCommand("PUT", url, nil, requestBody) - if errDo != nil { - return retval, errDo - } - - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - - return retval, err -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesrefresh.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesrefresh.go deleted file mode 100644 index 26534484a..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesrefresh.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" - "strings" -) - -// Refresh explicitly refreshes one or more index, making all operations performed since -// the last refresh available for search. The (near) real-time capabilities depend on the index engine used. -// For example, the internal one requires refresh to be called, but by default a refresh is scheduled periodically. -// http://www.elasticsearch.org/guide/reference/api/admin-indices-refresh.html -// TODO: add Shards to response -func (c *Conn) Refresh(indices ...string) (BaseResponse, error) { - var url string - var retval BaseResponse - if len(indices) > 0 { - url = fmt.Sprintf("/%s/_refresh", strings.Join(indices, ",")) - } else { - url = "/_refresh" - } - body, err := c.DoCommand("POST", url, nil, nil) - if err != nil { - return retval, err - } - if err == nil { - // marshall into json - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - return retval, err -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicessegments.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicessegments.go deleted file mode 100644 index e8227651a..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicessegments.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicessnapshot.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicessnapshot.go deleted file mode 100644 index 3fea027b7..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicessnapshot.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" - "strings" -) - -// Snapshot allows to explicitly perform a snapshot through the gateway of one or more indices (backup them). -// By default, each index gateway periodically snapshot changes, though it can be disabled and be controlled completely through this API. -// see http://www.elasticsearch.org/guide/reference/api/admin-indices-gateway-snapshot/ -func (c *Conn) Snapshot(indices ...string) (ExtendedStatus, error) { - var retval ExtendedStatus - var url string - if len(indices) > 0 { - url = fmt.Sprintf("/%s/_gateway/snapshot", strings.Join(indices, ",")) - - } else { - url = fmt.Sprintf("/_gateway/snapshot") - } - body, err := c.DoCommand("POST", url, nil, nil) - if err != nil { - return retval, err - } - if err == nil { - // marshall into json - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - return retval, err -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesstats.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesstats.go deleted file mode 100644 index e8227651a..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesstats.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesstatus.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesstatus.go deleted file mode 100644 index 38981a7c9..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesstatus.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" - "strings" -) - -// Status lists status details of all indices or the specified index. -// http://www.elasticsearch.org/guide/reference/api/admin-indices-status.html -func (c *Conn) Status(args map[string]interface{}, indices ...string) (BaseResponse, error) { - var retval BaseResponse - var url string - if len(indices) > 0 { - url = fmt.Sprintf("/%s/_status", strings.Join(indices, ",")) - - } else { - url = "/_status" - } - body, err := c.DoCommand("GET", url, args, nil) - if err != nil { - return retval, err - } - if err == nil { - // marshall into json - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - return retval, err -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicestemplates.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicestemplates.go deleted file mode 100644 index e8227651a..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicestemplates.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesupdatesettings.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesupdatesettings.go deleted file mode 100644 index e8227651a..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/indicesupdatesettings.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/request.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/request.go deleted file mode 100644 index 4f72955eb..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/request.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "reflect" - "strconv" - "strings" - - hostpool "github.com/bitly/go-hostpool" -) - -type Request struct { - *http.Client - *http.Request - hostResponse hostpool.HostPoolResponse -} - -func (r *Request) SetBodyJson(data interface{}) error { - body, err := json.Marshal(data) - if err != nil { - return err - } - r.SetBodyBytes(body) - r.Header.Set("Content-Type", "application/json") - return nil -} - -func (r *Request) SetBodyString(body string) { - r.SetBody(strings.NewReader(body)) -} - -func (r *Request) SetBodyBytes(body []byte) { - r.SetBody(bytes.NewReader(body)) -} - -func (r *Request) SetBody(body io.Reader) { - rc, ok := body.(io.ReadCloser) - if !ok && body != nil { - rc = ioutil.NopCloser(body) - } - r.Body = rc - r.ContentLength = -1 -} - -func (r *Request) Do(v interface{}) (int, []byte, error) { - response, bodyBytes, err := r.DoResponse(v) - if err != nil { - return -1, nil, err - } - return response.StatusCode, bodyBytes, err -} - -func (r *Request) DoResponse(v interface{}) (*http.Response, []byte, error) { - var client = r.Client - if client == nil { - client = http.DefaultClient - } - - res, err := client.Do(r.Request) - // Inform the HostPool of what happened to the request and allow it to update - r.hostResponse.Mark(err) - if err != nil { - return nil, nil, err - } - - defer res.Body.Close() - bodyBytes, err := ioutil.ReadAll(res.Body) - - if err != nil { - return nil, nil, err - } - - if res.StatusCode == 404 { - return nil, bodyBytes, RecordNotFound - } - - if res.StatusCode > 304 && v != nil { - jsonErr := json.Unmarshal(bodyBytes, v) - if jsonErr != nil { - return nil, nil, fmt.Errorf("Json response unmarshal error: [%s], response content: [%s]", jsonErr.Error(), string(bodyBytes)) - } - } - return res, bodyBytes, err -} - -func Escape(args map[string]interface{}) (s string, err error) { - vals := url.Values{} - for key, val := range args { - switch v := val.(type) { - case string: - vals.Add(key, v) - case bool: - vals.Add(key, strconv.FormatBool(v)) - case int, int32, int64: - vInt := reflect.ValueOf(v).Int() - vals.Add(key, strconv.FormatInt(vInt, 10)) - case float32, float64: - vFloat := reflect.ValueOf(v).Float() - vals.Add(key, strconv.FormatFloat(vFloat, 'f', -1, 32)) - case []string: - vals.Add(key, strings.Join(v, ",")) - default: - err = fmt.Errorf("Could not format URL argument: %s", key) - return - } - } - s = vals.Encode() - return -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/request_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/request_test.go deleted file mode 100644 index fa173d6ec..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/request_test.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "fmt" - "testing" - - "github.com/bmizerany/assert" -) - -func TestQueryString(t *testing.T) { - // Test nil argument - s, err := Escape(nil) - assert.T(t, s == "" && err == nil, fmt.Sprintf("Nil should not fail and yield empty string")) - - // Test single string argument - s, err = Escape(map[string]interface{}{"foo": "bar"}) - exp := "foo=bar" - assert.T(t, s == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, s)) - - // Test single int argument - s, err = Escape(map[string]interface{}{"foo": int(1)}) - exp = "foo=1" - assert.T(t, s == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, s)) - - // Test single int64 argument - s, err = Escape(map[string]interface{}{"foo": int64(1)}) - exp = "foo=1" - assert.T(t, s == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, s)) - - // Test single int32 argument - s, err = Escape(map[string]interface{}{"foo": int32(1)}) - exp = "foo=1" - assert.T(t, s == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, s)) - - // Test single float64 argument - s, err = Escape(map[string]interface{}{"foo": float64(3.141592)}) - exp = "foo=3.141592" - assert.T(t, s == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, s)) - - // Test single float32 argument - s, err = Escape(map[string]interface{}{"foo": float32(3.141592)}) - exp = "foo=3.141592" - assert.T(t, s == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, s)) - - // Test single []string argument - s, err = Escape(map[string]interface{}{"foo": []string{"bar", "baz"}}) - exp = "foo=bar%2Cbaz" - assert.T(t, s == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, s)) - - // Test combination of all arguments - s, err = Escape(map[string]interface{}{ - "foo": "bar", - "bar": 1, - "baz": 3.141592, - "test": []string{"a", "b"}, - }) - // url.Values also orders arguments alphabetically. - exp = "bar=1&baz=3.141592&foo=bar&test=a%2Cb" - assert.T(t, s == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, s)) - - // Test invalid datatype - s, err = Escape(map[string]interface{}{"foo": []int{}}) - assert.T(t, err != nil, fmt.Sprintf("Expected err to not be nil")) -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchaggregate.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchaggregate.go deleted file mode 100644 index b7412ff46..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchaggregate.go +++ /dev/null @@ -1,226 +0,0 @@ -package elastigo - -import "encoding/json" - -func Aggregate(name string) *AggregateDsl { - return &AggregateDsl{Name: name} -} - -type AggregateDsl struct { - Name string - TypeName string - Type interface{} - Filters *FilterWrap `json:"filters,omitempty"` - AggregatesVal map[string]*AggregateDsl `json:"aggregations,omitempty"` -} - -type FieldAggregate struct { - Field string `json:"field"` - Size *int `json:"size,omitempty"` -} - -/** - * Aggregates accepts n "sub-aggregates" to be applied to this aggregate - * - * agg := Aggregate("user").Term("user_id") - * agg.Aggregates( - * Aggregate("total_spent").Sum("price"), - * Aggregate("total_saved").Sum("discount"), - * ) - */ -func (d *AggregateDsl) Aggregates(aggs ...*AggregateDsl) *AggregateDsl { - if len(aggs) < 1 { - return d - } - if len(d.AggregatesVal) == 0 { - d.AggregatesVal = make(map[string]*AggregateDsl) - } - - for _, agg := range aggs { - d.AggregatesVal[agg.Name] = agg - } - return d -} - -func (d *AggregateDsl) Min(field string) *AggregateDsl { - d.Type = FieldAggregate{Field: field} - d.TypeName = "min" - return d -} - -func (d *AggregateDsl) Max(field string) *AggregateDsl { - d.Type = FieldAggregate{Field: field} - d.TypeName = "max" - return d -} - -func (d *AggregateDsl) Sum(field string) *AggregateDsl { - d.Type = FieldAggregate{Field: field} - d.TypeName = "sum" - return d -} - -func (d *AggregateDsl) Avg(field string) *AggregateDsl { - d.Type = FieldAggregate{Field: field} - d.TypeName = "avg" - return d -} - -func (d *AggregateDsl) Stats(field string) *AggregateDsl { - d.Type = FieldAggregate{Field: field} - d.TypeName = "stats" - return d -} - -func (d *AggregateDsl) ExtendedStats(field string) *AggregateDsl { - d.Type = FieldAggregate{Field: field} - d.TypeName = "extended_stats" - return d -} - -func (d *AggregateDsl) ValueCount(field string) *AggregateDsl { - d.Type = FieldAggregate{Field: field} - d.TypeName = "value_count" - return d -} - -func (d *AggregateDsl) Percentiles(field string) *AggregateDsl { - d.Type = FieldAggregate{Field: field} - d.TypeName = "percentiles" - return d -} - -type Cardinality struct { - Field string `json:"field"` - PrecisionThreshold float64 `json:"precision_threshold,omitempty"` - Rehash bool `json:"rehash,omitempty"` -} - -/** - * Cardinality( - * "field_name", - * true, - * 0, - * ) - */ -func (d *AggregateDsl) Cardinality(field string, rehash bool, threshold int) *AggregateDsl { - c := Cardinality{Field: field} - - // Only set if it's false, since the default is true - if !rehash { - c.Rehash = false - } - - if threshold > 0 { - c.PrecisionThreshold = float64(threshold) - } - d.Type = c - d.TypeName = "cardinality" - return d -} - -func (d *AggregateDsl) Global() *AggregateDsl { - d.Type = struct{}{} - d.TypeName = "global" - return d -} - -func (d *AggregateDsl) Filter(filters ...interface{}) *AggregateDsl { - - if len(filters) == 0 { - return d - } - - if d.Filters == nil { - d.Filters = NewFilterWrap() - } - - d.Filters.addFilters(filters) - return d -} - -func (d *AggregateDsl) Missing(field string) *AggregateDsl { - d.Type = FieldAggregate{Field: field} - d.TypeName = "missing" - return d -} - -func (d *AggregateDsl) Terms(field string) *AggregateDsl { - d.Type = FieldAggregate{Field: field} - d.TypeName = "terms" - return d -} - -func (d *AggregateDsl) TermsWithSize(field string, size int) *AggregateDsl { - d.Type = FieldAggregate{Field: field, Size: &size} - d.TypeName = "terms" - return d -} - -func (d *AggregateDsl) SignificantTerms(field string) *AggregateDsl { - d.Type = FieldAggregate{Field: field} - d.TypeName = "significant_terms" - return d -} - -type Histogram struct { - Field string `json:"field"` - Interval float64 `json:"interval"` -} - -func (d *AggregateDsl) Histogram(field string, interval int) *AggregateDsl { - d.Type = Histogram{ - Field: field, - Interval: float64(interval), - } - d.TypeName = "histogram" - return d -} - -type DateHistogram struct { - Field string `json:"field"` - Interval string `json:"interval"` -} - -func (d *AggregateDsl) DateHistogram(field, interval string) *AggregateDsl { - d.Type = DateHistogram{ - Field: field, - Interval: interval, - } - d.TypeName = "date_histogram" - return d -} - -func (d *AggregateDsl) MarshalJSON() ([]byte, error) { - return json.Marshal(d.toMap()) -} - -func (d *AggregateDsl) toMap() map[string]interface{} { - root := map[string]interface{}{} - - if d.Type != nil { - root[d.TypeName] = d.Type - } - aggregates := d.aggregatesMap() - - if d.Filters != nil { - root["filter"] = d.Filters - } - - if len(aggregates) > 0 { - root["aggregations"] = aggregates - } - return root - -} - -func (d *AggregateDsl) aggregatesMap() map[string]interface{} { - root := map[string]interface{}{} - - if len(d.AggregatesVal) > 0 { - for _, agg := range d.AggregatesVal { - root[agg.Name] = agg.toMap() - } - } - return root -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchaggregate_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchaggregate_test.go deleted file mode 100644 index 331809ce6..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchaggregate_test.go +++ /dev/null @@ -1,177 +0,0 @@ -package elastigo - -import ( - "encoding/json" - "reflect" - "testing" -) - -// Test all aggregate types and nested aggregations -func TestAggregateDsl(t *testing.T) { - - min := Aggregate("min_price").Min("price") - max := Aggregate("max_price").Max("price") - sum := Aggregate("sum_price").Sum("price") - avg := Aggregate("avg_price").Avg("price") - stats := Aggregate("stats_price").Stats("price") - extendedStats := Aggregate("extended_stats_price").ExtendedStats("price") - valueCount := Aggregate("value_count_price").ValueCount("price") - percentiles := Aggregate("percentiles_price").Percentiles("price") - cardinality := Aggregate("cardinality_price").Cardinality("price", true, 50) - global := Aggregate("global").Global() - missing := Aggregate("missing_price").Missing("price") - terms := Aggregate("terms_price").Terms("price") - termsSize := Aggregate("terms_price_size").TermsWithSize("price", 0) - significantTerms := Aggregate("significant_terms_price").SignificantTerms("price") - histogram := Aggregate("histogram_price").Histogram("price", 50) - - dateAgg := Aggregate("articles_over_time").DateHistogram("date", "month") - dateAgg.Aggregates( - min, - max, - sum, - avg, - stats, - extendedStats, - valueCount, - percentiles, - cardinality, - global, - missing, - terms, - termsSize, - significantTerms, - histogram, - ) - - qry := Search("github").Aggregates(dateAgg) - - marshaled, err := json.MarshalIndent(qry.AggregatesVal, "", " ") - if err != nil { - t.Errorf("Failed to marshal AggregatesVal: %s", err.Error()) - return - } - - assertJsonMatch( - t, - marshaled, - []byte(` - { - "articles_over_time": { - "date_histogram" : { - "field" : "date", - "interval" : "month" - }, - "aggregations": { - "min_price":{ - "min": { "field": "price" } - }, - "max_price":{ - "max": { "field": "price" } - }, - "sum_price":{ - "sum": { "field": "price" } - }, - "avg_price": { - "avg": { "field": "price" } - }, - "stats_price":{ - "stats": { "field": "price" } - }, - "extended_stats_price":{ - "extended_stats": { "field": "price" } - }, - "value_count_price":{ - "value_count": { "field": "price" } - }, - "percentiles_price":{ - "percentiles": { "field": "price" } - }, - "cardinality_price":{ - "cardinality": { "field": "price", "precision_threshold": 50 } - }, - "global":{ - "global": {} - }, - "missing_price":{ - "missing": { "field": "price" } - }, - "terms_price":{ - "terms": { "field": "price" } - }, - "terms_price_size":{ - "terms": { "field": "price", "size": 0 } - }, - "significant_terms_price":{ - "significant_terms": { "field": "price" } - }, - "histogram_price":{ - "histogram": { "field": "price", "interval": 50 } - } - } - } - } - `), - ) - -} - -func TestAggregateFilter(t *testing.T) { - - avg := Aggregate("avg_price").Avg("price") - - dateAgg := Aggregate("in_stock_products").Filter( - Filter().Range("stock", nil, 0, nil, nil, ""), - ) - - dateAgg.Aggregates( - avg, - ) - - qry := Search("github").Aggregates(dateAgg) - - marshaled, err := json.MarshalIndent(qry.AggregatesVal, "", " ") - if err != nil { - t.Errorf("Failed to marshal AggregatesVal: %s", err.Error()) - return - } - - assertJsonMatch( - t, - marshaled, - []byte(` - { - "in_stock_products" : { - "filter" : { - "range" : { "stock" : { "gt" : 0 } } - }, - "aggregations" : { - "avg_price" : { "avg" : { "field" : "price" } } - } - } - } - `), - ) -} - -func assertJsonMatch(t *testing.T, match, expected []byte) { - var m interface{} - var e interface{} - - err := json.Unmarshal(expected, &e) - if err != nil { - t.Errorf("Failed to unmarshal expectation: %s", err.Error()) - return - } - err = json.Unmarshal(match, &m) - if err != nil { - t.Errorf("Failed to unmarshal match: %s", err.Error()) - return - } - - if !reflect.DeepEqual(m, e) { - t.Errorf("Expected %s but got %s", string(expected), string(match)) - return - } - -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchdsl.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchdsl.go deleted file mode 100644 index 345a0e9dd..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchdsl.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -type SearchRequest struct { - From int `json:"from,omitempty"` - Size int `json:"size,omitempty"` - Query OneTermQuery `json:"query,omitempty"` - - Filter struct { - Term Term `json:"term"` - } `json:"filter,omitempty"` -} - -type Facets struct { - Tag struct { - Terms string `json:"terms"` - } `json:"tag"` -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfacet.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfacet.go deleted file mode 100644 index 5eec68bce..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfacet.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - - u "github.com/araddon/gou" -) - -var ( - _ = u.DEBUG -) - -/* -"facets": { - "terms": { - "terms": { - "field": [ - "@fields.category" - ], - "size": 25 - } - } -} - - -"facets": { - "actors": { "terms": {"field": ["actor"],"size": "10" }} - , "langauge": { "terms": {"field": ["repository.language"],"size": "10" }} -} - -*/ -func Facet() *FacetDsl { - return &FacetDsl{} -} - -func FacetRange(field string) *RangeDsl { - out := &RangeDsl{&RangeDef{}, nil} - out.RangeDef.Field = field - return out -} - -type FacetDsl struct { - size string - Terms map[string]*Term `json:"terms,omitempty"` - Ranges map[string]*RangeDsl `json:"terms,omitempty"` -} - -type RangeDsl struct { - RangeDef *RangeDef `json:"range,omitempty"` - FilterVal *FilterWrap `json:"facet_filter,omitempty"` -} - -type RangeDef struct { - Field string `json:"field,omitempty"` - Values []*RangeVal `json:"ranges,omitempty"` -} - -type RangeVal struct { - From string `json:"from,omitempty"` - To string `json:"to,omitempty"` -} - -func (m *RangeDsl) Range(from, to string) *RangeDsl { - if len(m.RangeDef.Values) == 0 { - m.RangeDef.Values = make([]*RangeVal, 0) - } - - m.RangeDef.Values = append(m.RangeDef.Values, &RangeVal{From: from, To: to}) - return m -} - -func (s *RangeDsl) Filter(fl ...interface{}) *RangeDsl { - if s.FilterVal == nil { - s.FilterVal = NewFilterWrap() - } - - s.FilterVal.addFilters(fl) - return s -} - -func (m *FacetDsl) Size(size string) *FacetDsl { - m.size = size - return m -} - -func (m *FacetDsl) Fields(fields ...string) *FacetDsl { - if len(fields) < 1 { - return m - } - if len(m.Terms) == 0 { - m.Terms = make(map[string]*Term) - } - m.Terms[fields[0]] = &Term{Terms{Fields: fields}, nil} - return m -} - -func (m *FacetDsl) Regex(field, match string) *FacetDsl { - if len(m.Terms) == 0 { - m.Terms = make(map[string]*Term) - } - m.Terms[field] = &Term{Terms{Fields: []string{field}, Regex: match}, nil} - return m -} - -func (m *FacetDsl) Term(t *Term) *FacetDsl { - if len(m.Terms) == 0 { - m.Terms = make(map[string]*Term) - } - m.Terms[t.Terms.Fields[0]] = t - return m -} - -func (m *FacetDsl) Range(r *RangeDsl) *FacetDsl { - if len(m.Ranges) == 0 { - m.Ranges = make(map[string]*RangeDsl) - } - m.Ranges[r.RangeDef.Field] = r - return m -} - -func (m *FacetDsl) MarshalJSON() ([]byte, error) { - data := map[string]interface{}{} - for key, t := range m.Terms { - t.Terms.Size = m.size - data[key] = t - } - for key, r := range m.Ranges { - data[key] = r - } - return json.Marshal(&data) -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfacet_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfacet_test.go deleted file mode 100644 index 11e2664a8..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfacet_test.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "github.com/araddon/gou" - . "github.com/smartystreets/goconvey/convey" - "testing" -) - -func TestFacetRegex(t *testing.T) { - - c := NewTestConn() - PopulateTestDB(t, c) - defer TearDownTestDB(c) - - Convey("Facted regex query", t, func() { - - // This is a possible solution for auto-complete - out, err := Search("oilers").Size("0").Facet( - Facet().Regex("name", "[jk].*").Size("8"), - ).Result(c) - So(err, ShouldBeNil) - So(out, ShouldNotBeNil) - - // Debug(string(out.Facets)) - fh := gou.NewJsonHelper([]byte(out.Facets)) - facets := fh.Helpers("/name/terms") - So(err, ShouldBeNil) - So(facets, ShouldNotBeNil) - So(len(facets), ShouldEqual, 4) - }) -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfilter.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfilter.go deleted file mode 100644 index 3d10ab57e..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfilter.go +++ /dev/null @@ -1,402 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" - "github.com/araddon/gou" -) - -var ( - _ = gou.DEBUG -) - -// BoolClause represents aa bool (and/or) clause for use with FilterWrap -// Legacy, use new FilterOp functions instead -type BoolClause string - -// TermExecutionMode refers to how a terms (not term) filter should behave -// The acceptable options are all prefixed with TEM -// See https://www.elastic.co/guide/en/elasticsearch/reference/1.5/query-dsl-terms-filter.html -type TermExecutionMode string - -const ( - // TEMDefault default ES term filter behavior (plain) - TEMDefault TermExecutionMode = "" - // TEMPlain default ES term filter behavior - TEMPlain TermExecutionMode = "plain" - // TEMField field_data execution mode - TEMField TermExecutionMode = "field_data" - // TEMBool bool execution mode - TEMBool TermExecutionMode = "bool" - // TEMAnd and execution mode - TEMAnd TermExecutionMode = "and" - // TEMOr or execution mode - TEMOr TermExecutionMode = "or" -) - -// FilterClause is either a boolClause or FilterOp for use with FilterWrap -type FilterClause interface { - String() string -} - -// FilterWrap is the legacy struct for chaining multiple filters with a bool -// Legacy, use new FilterOp functions instead -type FilterWrap struct { - boolClause string - filters []interface{} -} - -// NewFilterWrap creates a new FilterWrap struct -func NewFilterWrap() *FilterWrap { - return &FilterWrap{filters: make([]interface{}, 0), boolClause: "and"} -} - -func (f *FilterWrap) String() string { - return fmt.Sprintf(`fopv: %d:%v`, len(f.filters), f.filters) -} - -// Bool sets the type of boolean filter to use. -// Accepted values are "and" and "or". -// Legacy, use new FilterOp functions instead -func (f *FilterWrap) Bool(s string) { - f.boolClause = s -} - -// Custom marshalling to support the query dsl -func (f *FilterWrap) addFilters(fl []interface{}) { - if len(fl) > 1 { - fc := fl[0] - switch fc.(type) { - case BoolClause, string: - f.boolClause = fc.(string) - fl = fl[1:] - } - } - f.filters = append(f.filters, fl...) -} - -// MarshalJSON override for FilterWrap to match the expected ES syntax with the bool at the root -func (f *FilterWrap) MarshalJSON() ([]byte, error) { - var root interface{} - if len(f.filters) > 1 { - root = map[string]interface{}{f.boolClause: f.filters} - } else if len(f.filters) == 1 { - root = f.filters[0] - } - return json.Marshal(root) -} - -/* - "filter": { - "range": { - "@timestamp": { - "from": "2012-12-29T16:52:48+00:00", - "to": "2012-12-29T17:52:48+00:00" - } - } - } - "filter": { - "missing": { - "field": "repository.name" - } - } - - "filter" : { - "terms" : { - "user" : ["kimchy", "elasticsearch"], - "execution" : "bool", - "_cache": true - } - } - - "filter" : { - "term" : { "user" : "kimchy"} - } - - "filter" : { - "and" : [ - { - "range" : { - "postDate" : { - "from" : "2010-03-01", - "to" : "2010-04-01" - } - } - }, - { - "prefix" : { "name.second" : "ba" } - } - ] - } - -*/ - - -// Filter creates a blank FilterOp that can be customized with further function calls -// This is the starting point for constructing any filter query -// Examples: -// -// Filter().Term("user","kimchy") -// -// // we use variadics to allow n arguments, first is the "field" rest are values -// Filter().Terms("user", "kimchy", "elasticsearch") -// -// Filter().Exists("repository.name") -func Filter() *FilterOp { - return &FilterOp{} -} - -// CompoundFilter creates a complete FilterWrap given multiple filters -// Legacy, use new FilterOp functions instead -func CompoundFilter(fl ...interface{}) *FilterWrap { - FilterVal := NewFilterWrap() - FilterVal.addFilters(fl) - return FilterVal -} - -// FilterOp holds all the information for a filter query -// Properties should not be set directly, but instead via the fluent-style API. -type FilterOp struct { - TermsMap map[string]interface{} `json:"terms,omitempty"` - TermMap map[string]interface{} `json:"term,omitempty"` - RangeMap map[string]RangeFilter `json:"range,omitempty"` - ExistsProp *propertyPathMarker `json:"exists,omitempty"` - MissingProp *propertyPathMarker `json:"missing,omitempty"` - AndFilters []*FilterOp `json:"and,omitempty"` - OrFilters []*FilterOp `json:"or,omitempty"` - NotFilters []*FilterOp `json:"not,omitempty"` - LimitProp *LimitFilter `json:"limit,omitempty"` - TypeProp *TypeFilter `json:"type,omitempty"` - IdsProp *IdsFilter `json:"ids,omitempty"` - ScriptProp *ScriptFilter `json:"script,omitempty"` - GeoDistMap map[string]interface{} `json:"geo_distance,omitempty"` - GeoDistRangeMap map[string]interface{} `json:"geo_distance_range,omitempty"` -} - -type propertyPathMarker struct { - Field string `json:"field"` -} - -// LimitFilter holds the Limit filter information -// Value: number of documents to limit -type LimitFilter struct { - Value int `json:"value"` -} - -// TypeFilter filters on the document type -// Value: the document type to filter -type TypeFilter struct { - Value string `json:"value"` -} - -// IdsFilter holds the type and ids (on the _id field) to filter -// Type: a string or an array of string types. Optional. -// Values: Array of ids to match -type IdsFilter struct { - Type []string `json:"type,omitempty"` - Values []interface{} `json:"values,omitempty"` -} - -// ScriptFilter will filter using a custom javascript function -// Script: the javascript to run -// Params: map of custom parameters to pass into the function (JSON), if any -// IsCached: whether to cache the results of the filter -type ScriptFilter struct { - Script string `json:"script"` - Params map[string]interface{} `json:"params,omitempty"` - IsCached bool `json:"_cache,omitempty"` -} - -// RangeFilter filters given a range. Parameters need to be comparable for ES to accept. -// Only a minimum of one comparison parameter is required. You probably shouldn't mix GT and GTE parameters. -// Gte: the greater-than-or-equal to value. Should be a number or date. -// Lte: the less-than-or-equal to value. Should be a number or date. -// Gt: the greater-than value. Should be a number or date. -// Lt: the less-than value. Should be a number or date. -// TimeZone: the timezone to use (+|-h:mm format), if the other parameters are dates -type RangeFilter struct { - Gte interface{} `json:"gte,omitempty"` - Lte interface{} `json:"lte,omitempty"` - Gt interface{} `json:"gt,omitempty"` - Lt interface{} `json:"lt,omitempty"` - TimeZone string `json:"time_zone,omitempty"` //Ideally this would be an int -} - -// GeoLocation holds the coordinates for a geo query. Currently hashes are not supported. -type GeoLocation struct { - Latitude float32 `json:"lat"` - Longitude float32 `json:"lon"` -} - -// GeoField holds a GeoLocation and a field to match to. -// This exists so the struct will match the ES schema. -type GeoField struct { - GeoLocation - Field string -} - -// Term will add a term to the filter. -// Multiple Term filters can be added, and ES will OR them. -// If the term already exists in the FilterOp, the value will be overridden. -func (f *FilterOp) Term(field string, value interface{}) *FilterOp { - if len(f.TermMap) == 0 { - f.TermMap = make(map[string]interface{}) - } - - f.TermMap[field] = value - return f -} - -// And will add an AND op to the filter. One or more FilterOps can be passed in. -func (f *FilterOp) And(filters ...*FilterOp) *FilterOp { - if len(f.AndFilters) == 0 { - f.AndFilters = filters[:] - } else { - f.AndFilters = append(f.AndFilters, filters...) - } - - return f -} - -// Or will add an OR op to the filter. One or more FilterOps can be passed in. -func (f *FilterOp) Or(filters ...*FilterOp) *FilterOp { - if len(f.OrFilters) == 0 { - f.OrFilters = filters[:] - } else { - f.OrFilters = append(f.OrFilters, filters...) - } - - return f -} - -// Not will add a NOT op to the filter. One or more FilterOps can be passed in. -func (f *FilterOp) Not(filters ...*FilterOp) *FilterOp { - if len(f.NotFilters) == 0 { - f.NotFilters = filters[:] - - } else { - f.NotFilters = append(f.NotFilters, filters...) - } - - return f -} - -// GeoDistance will add a GEO DISTANCE op to the filter. -// distance: distance in ES distance format, i.e. "100km" or "100mi". -// fields: an array of GeoField origin coordinates. Only one coordinate needs to match. -func (f *FilterOp) GeoDistance(distance string, fields ...GeoField) *FilterOp { - f.GeoDistMap = make(map[string]interface{}) - f.GeoDistMap["distance"] = distance - for _, val := range fields { - f.GeoDistMap[val.Field] = val.GeoLocation - } - - return f -} - -// GeoDistanceRange will add a GEO DISTANCE RANGE op to the filter. -// from: minimum distance in ES distance format, i.e. "100km" or "100mi". -// to: maximum distance in ES distance format, i.e. "100km" or "100mi". -// fields: an array of GeoField origin coordinates. Only one coor -func (f *FilterOp) GeoDistanceRange(from string, to string, fields ...GeoField) *FilterOp { - f.GeoDistRangeMap = make(map[string]interface{}) - f.GeoDistRangeMap["from"] = from - f.GeoDistRangeMap["to"] = to - - for _, val := range fields { - f.GeoDistRangeMap[val.Field] = val.GeoLocation - } - - return f -} - -// NewGeoField is a helper function to create values for the GeoDistance filters -func NewGeoField(field string, latitude float32, longitude float32) GeoField { - return GeoField{ - GeoLocation: GeoLocation{Latitude: latitude, Longitude: longitude}, - Field: field} -} - -// Terms adds a TERMS op to the filter. -// field: the document field -// executionMode Term execution mode, starts with TEM -// values: array of values to match -// Note: you can only have one terms clause in a filter. Use a bool filter to combine multiple. -func (f *FilterOp) Terms(field string, executionMode TermExecutionMode, values ...interface{}) *FilterOp { - //You can only have one terms in a filter - f.TermsMap = make(map[string]interface{}) - - if executionMode != "" { - f.TermsMap["execution"] = executionMode - } - - f.TermsMap[field] = values - - return f -} - -// Range adds a range filter for the given field. -// See the RangeFilter struct documentation for information about the parameters. -func (f *FilterOp) Range(field string, gte interface{}, - gt interface{}, lte interface{}, lt interface{}, timeZone string) *FilterOp { - - if f.RangeMap == nil { - f.RangeMap = make(map[string]RangeFilter) - } - - f.RangeMap[field] = RangeFilter{ - Gte: gte, - Gt: gt, - Lte: lte, - Lt: lt, - TimeZone: timeZone} - - return f -} - -// Type adds a TYPE op to the filter. -func (f *FilterOp) Type(fieldType string) *FilterOp { - f.TypeProp = &TypeFilter{Value: fieldType} - return f -} - -// Ids adds a IDS op to the filter. -func (f *FilterOp) Ids(ids ...interface{}) *FilterOp { - f.IdsProp = &IdsFilter{Values: ids} - return f -} - -// IdsByTypes adds a IDS op to the filter, but also allows passing in an array of types for the query. -func (f *FilterOp) IdsByTypes(types []string, ids ...interface{}) *FilterOp { - f.IdsProp = &IdsFilter{Type: types, Values: ids} - return f -} - -// Exists adds an EXISTS op to the filter. -func (f *FilterOp) Exists(field string) *FilterOp { - f.ExistsProp = &propertyPathMarker{Field: field} - return f -} - -// Missing adds an MISSING op to the filter. -func (f *FilterOp) Missing(field string) *FilterOp { - f.MissingProp = &propertyPathMarker{Field: field} - return f -} - -// Limit adds an LIMIT op to the filter. -func (f *FilterOp) Limit(maxResults int) *FilterOp { - f.LimitProp = &LimitFilter{Value: maxResults} - return f -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfilter_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfilter_test.go deleted file mode 100644 index a4931ddc6..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchfilter_test.go +++ /dev/null @@ -1,287 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - . "github.com/smartystreets/goconvey/convey" - "testing" -) - -func TestFilterDsl(t *testing.T) { - Convey("And filter", t, func() { - filter := Filter().And(Filter().Term("test", "asdf")). - And(Filter().Range("rangefield", 1, 2, 3, 4, "+08:00")) - actual, err := GetJson(filter) - - actualFilters := actual["and"].([]interface{}) - - So(err, ShouldBeNil) - So(1, ShouldEqual, len(actual)) - So(2, ShouldEqual, len(actualFilters)) - So(true, ShouldEqual, HasKey(actualFilters[0].(map[string]interface{}), "term")) - So(true, ShouldEqual, HasKey(actualFilters[1].(map[string]interface{}), "range")) - }) - - Convey("Or filter", t, func() { - filter := Filter().Or(Filter().Term("test", "asdf"), Filter().Range("rangefield", 1, 2, 3, 4, "+08:00")) - actual, err := GetJson(filter) - - actualFilters := actual["or"].([]interface{}) - - So(err, ShouldBeNil) - So(1, ShouldEqual, len(actual)) - So(2, ShouldEqual, len(actualFilters)) - So(true, ShouldEqual, HasKey(actualFilters[0].(map[string]interface{}), "term")) - So(true, ShouldEqual, HasKey(actualFilters[1].(map[string]interface{}), "range")) - }) - - Convey("Not filter", t, func() { - filter := Filter().Not(Filter().Term("test", "asdf")). - Not(Filter().Range("rangefield", 1, 2, 3, 4, "+08:00")) - actual, err := GetJson(filter) - - actualFilters := actual["not"].([]interface{}) - - So(err, ShouldBeNil) - So(1, ShouldEqual, len(actual)) - So(2, ShouldEqual, len(actualFilters)) - So(true, ShouldEqual, HasKey(actualFilters[0].(map[string]interface{}), "term")) - So(true, ShouldEqual, HasKey(actualFilters[1].(map[string]interface{}), "range")) - }) - - Convey("Terms filter", t, func() { - filter := Filter().Terms("Sample", TEMAnd, "asdf", 123, true) - actual, err := GetJson(filter) - - actualTerms := actual["terms"].(map[string]interface{}) - actualValues := actualTerms["Sample"].([]interface{}) - - So(err, ShouldBeNil) - So(1, ShouldEqual, len(actual)) - So(3, ShouldEqual, len(actualValues)) - So(actualValues[0], ShouldEqual, "asdf") - So(actualValues[1], ShouldEqual, float64(123)) - So(actualValues[2], ShouldEqual, true) - So("and", ShouldEqual, actualTerms["execution"]) - }) - - Convey("Term filter", t, func() { - filter := Filter().Term("Sample", "asdf").Term("field2", 341.4) - actual, err := GetJson(filter) - - actualTerm := actual["term"].(map[string]interface{}) - - So(err, ShouldBeNil) - So(1, ShouldEqual, len(actual)) - So("asdf", ShouldEqual, actualTerm["Sample"]) - So(float64(341.4), ShouldEqual, actualTerm["field2"]) - }) - - Convey("Range filter", t, func() { - filter := Filter().Range("rangefield", 1, 2, 3, 4, "+08:00") - actual, err := GetJson(filter) - //A bit lazy, probably should assert keys exist - actualRange := actual["range"].(map[string]interface{})["rangefield"].(map[string]interface{}) - - So(err, ShouldBeNil) - So(1, ShouldEqual, len(actual)) - So(float64(1), ShouldEqual, actualRange["gte"]) - So(float64(2), ShouldEqual, actualRange["gt"]) - So(float64(3), ShouldEqual, actualRange["lte"]) - So(float64(4), ShouldEqual, actualRange["lt"]) - So("+08:00", ShouldEqual, actualRange["time_zone"]) - }) - - Convey("Exists filter", t, func() { - filter := Filter().Exists("field1") - actual, err := GetJson(filter) - - actualValue := actual["exists"].(map[string]interface{}) - - So(err, ShouldBeNil) - So(1, ShouldEqual, len(actual)) - So("field1", ShouldEqual, actualValue["field"]) - }) - - Convey("Missing filter", t, func() { - filter := Filter().Missing("field1") - actual, err := GetJson(filter) - - actualValue := actual["missing"].(map[string]interface{}) - - So(err, ShouldBeNil) - So(1, ShouldEqual, len(actual)) - So("field1", ShouldEqual, actualValue["field"]) - }) - - Convey("Limit filter", t, func() { - filter := Filter().Limit(100) - actual, err := GetJson(filter) - - actualValue := actual["limit"].(map[string]interface{}) - So(err, ShouldBeNil) - So(1, ShouldEqual, len(actual)) - So(float64(100), ShouldEqual, actualValue["value"]) - }) - - Convey("Type filter", t, func() { - filter := Filter().Type("my_type") - actual, err := GetJson(filter) - - actualValue := actual["type"].(map[string]interface{}) - So(err, ShouldBeNil) - So(1, ShouldEqual, len(actual)) - So("my_type", ShouldEqual, actualValue["value"]) - }) - - Convey("Ids filter", t, func() { - filter := Filter().Ids("test", "asdf", "fdsa") - actual, err := GetJson(filter) - - actualValue := actual["ids"].(map[string]interface{}) - actualValues := actualValue["values"].([]interface{}) - So(err, ShouldBeNil) - So(1, ShouldEqual, len(actual)) - So(nil, ShouldEqual, actualValue["type"]) - So(3, ShouldEqual, len(actualValues)) - So("test", ShouldEqual, actualValues[0]) - So("asdf", ShouldEqual, actualValues[1]) - So("fdsa", ShouldEqual, actualValues[2]) - }) - - Convey("IdsByTypes filter", t, func() { - filter := Filter().IdsByTypes([]string{"my_type"}, "test", "asdf", "fdsa") - actual, err := GetJson(filter) - - actualValue := actual["ids"].(map[string]interface{}) - actualTypes := actualValue["type"].([]interface{}) - actualValues := actualValue["values"].([]interface{}) - So(err, ShouldBeNil) - So(1, ShouldEqual, len(actual)) - So(1, ShouldEqual, len(actualTypes)) - So("my_type", ShouldEqual, actualTypes[0]) - So(3, ShouldEqual, len(actualValues)) - So("test", ShouldEqual, actualValues[0]) - So("asdf", ShouldEqual, actualValues[1]) - So("fdsa", ShouldEqual, actualValues[2]) - }) - - Convey("GeoDistance filter", t, func() { - filter := Filter().GeoDistance("100km", NewGeoField("pin.location", 32.3, 23.4)) - actual, err := GetJson(filter) - - actualValue := actual["geo_distance"].(map[string]interface{}) - actualLocation := actualValue["pin.location"].(map[string]interface{}) - So(err, ShouldBeNil) - So("100km", ShouldEqual, actualValue["distance"]) - So(float64(32.3), ShouldEqual, actualLocation["lat"]) - So(float64(23.4), ShouldEqual, actualLocation["lon"]) - }) - - Convey("GeoDistanceRange filter", t, func() { - filter := Filter().GeoDistanceRange("100km", "200km", NewGeoField("pin.location", 32.3, 23.4)) - actual, err := GetJson(filter) - - actualValue := actual["geo_distance_range"].(map[string]interface{}) - actualLocation := actualValue["pin.location"].(map[string]interface{}) - So(err, ShouldBeNil) - So("100km", ShouldEqual, actualValue["from"]) - So("200km", ShouldEqual, actualValue["to"]) - So(float64(32.3), ShouldEqual, actualLocation["lat"]) - So(float64(23.4), ShouldEqual, actualLocation["lon"]) - }) -} - -func TestFilters(t *testing.T) { - - c := NewTestConn() - PopulateTestDB(t, c) - defer TearDownTestDB(c) - - Convey("Exists filter", t, func() { - qry := Search("oilers").Filter( - Filter().Exists("goals"), - ) - out, err := qry.Result(c) - So(err, ShouldBeNil) - So(out, ShouldNotBeNil) - So(out.Hits, ShouldNotBeNil) - So(out.Hits.Len(), ShouldEqual, 10) - So(out.Hits.Total, ShouldEqual, 12) - }) - - Convey("Missing filter", t, func() { - qry := Search("oilers").Filter( - Filter().Missing("goals"), - ) - out, err := qry.Result(c) - So(err, ShouldBeNil) - So(out, ShouldNotBeNil) - So(out.Hits, ShouldNotBeNil) - So(out.Hits.Total, ShouldEqual, 2) - }) - - Convey("Terms filter", t, func() { - qry := Search("oilers").Filter( - Filter().Terms("pos", TEMDefault, "RW", "LW"), - ) - out, err := qry.Result(c) - So(err, ShouldBeNil) - So(out, ShouldNotBeNil) - So(out.Hits, ShouldNotBeNil) - So(out.Hits.Total, ShouldEqual, 6) - }) - - Convey("Filter involving an AND", t, func() { - qry := Search("oilers").Filter( - Filter().And( - Filter().Terms("pos", TEMDefault, "LW"), - Filter().Exists("PIM"), - ), - ) - - out, err := qry.Result(c) - So(err, ShouldBeNil) - So(out, ShouldNotBeNil) - So(out.Hits, ShouldNotBeNil) - So(out.Hits.Total, ShouldEqual, 2) - }) - - - Convey("Filterng filter results", t, func() { - qry := Search("oilers").Filter( - Filter().Terms("pos", TEMDefault, "LW"), - ) - qry.Filter( - Filter().Exists("PIM"), - ) - out, err := qry.Result(c) - So(err, ShouldBeNil) - So(out, ShouldNotBeNil) - So(out.Hits, ShouldNotBeNil) - So(out.Hits.Total, ShouldEqual, 2) - }) - - Convey("Filter involving OR", t, func() { - qry := Search("oilers").Filter( - Filter().Or( - Filter().Terms("pos", TEMDefault, "G"), - Filter().Range("goals", nil, 80, nil, nil, ""), - ), - ) - out, err := qry.Result(c) - So(err, ShouldBeNil) - So(out, ShouldNotBeNil) - So(out.Hits, ShouldNotBeNil) - So(out.Hits.Total, ShouldEqual, 3) - }) -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchhighlight.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchhighlight.go deleted file mode 100644 index ac74947d7..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchhighlight.go +++ /dev/null @@ -1,138 +0,0 @@ -package elastigo - -import "encoding/json" - -func NewHighlight() *HighlightDsl { - return &HighlightDsl{} -} - -type HighlightDsl struct { - Settings *HighlightEmbed `-` - TagSchema string `json:"tag_schema,omitempty"` - Fields map[string]HighlightEmbed `json:"fields,omitempty"` -} - -func NewHighlightOpts() *HighlightEmbed { - return &HighlightEmbed{} -} - -type HighlightEmbed struct { - BoundaryCharsVal string `json:"boundary_chars,omitempty"` - BoundaryMaxScanVal int `json:"boundary_max_scan,omitempty"` - PreTags []string `json:"pre_tags,omitempty"` - PostTags []string `json:"post_tags,omitempty"` - FragmentSizeVal int `json:"fragment_size,omitempty"` - NumOfFragmentsVal int `json:"number_of_fragments,omitempty"` - HighlightQuery *QueryDsl `json:"highlight_query,omitempty"` - MatchedFieldsVal []string `json:"matched_fields,omitempty"` - OrderVal string `json:"order,omitempty"` - TypeVal string `json:"type,omitempty"` -} - -// Custom marshalling -func (t *HighlightDsl) MarshalJSON() ([]byte, error) { - m := make(map[string]interface{}) - - if t.Fields != nil { - m["fields"] = t.Fields - } - - if t.TagSchema != "" { - m["tag_schema"] = t.TagSchema - } - - if t.Settings == nil { - return json.Marshal(m) - } - - //This is terrible :(, could use structs package to avoid extra serialization. - embed, err := json.Marshal(t.Settings) - if err == nil { - err = json.Unmarshal(embed, &m) - } - - if err == nil { - return json.Marshal(m) - } - - return nil, err -} - -func (h *HighlightDsl) AddField(name string, settings *HighlightEmbed) *HighlightDsl { - if h.Fields == nil { - h.Fields = make(map[string]HighlightEmbed) - } - - if settings != nil { - h.Fields[name] = *settings - } else { - h.Fields[name] = HighlightEmbed{} - } - - return h -} - -func (h *HighlightDsl) Schema(schema string) *HighlightDsl { - h.TagSchema = schema - return h -} - -func (h *HighlightDsl) SetOptions(options *HighlightEmbed) *HighlightDsl { - h.Settings = options - return h -} - -func (o *HighlightEmbed) BoundaryChars(chars string) *HighlightEmbed { - o.BoundaryCharsVal = chars - return o -} - -func (o *HighlightEmbed) BoundaryMaxScan(max int) *HighlightEmbed { - o.BoundaryMaxScanVal = max - return o -} - -func (he *HighlightEmbed) FragSize(size int) *HighlightEmbed { - he.FragmentSizeVal = size - return he -} - -func (he *HighlightEmbed) NumFrags(numFrags int) *HighlightEmbed { - he.NumOfFragmentsVal = numFrags - return he -} - -func (he *HighlightEmbed) MatchedFields(fields ...string) *HighlightEmbed { - he.MatchedFieldsVal = fields - return he -} - -func (he *HighlightEmbed) Order(order string) *HighlightEmbed { - he.OrderVal = order - return he -} - -func (he *HighlightEmbed) Tags(pre string, post string) *HighlightEmbed { - if he == nil { - he = &HighlightEmbed{} - } - - if he.PreTags == nil { - he.PreTags = []string{pre} - } else { - he.PreTags = append(he.PreTags, pre) - } - - if he.PostTags == nil { - he.PostTags = []string{post} - } else { - he.PostTags = append(he.PostTags, post) - } - - return he -} - -func (he *HighlightEmbed) Type(highlightType string) *HighlightEmbed { - he.TypeVal = highlightType - return he -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchhighlight_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchhighlight_test.go deleted file mode 100644 index ca5b9304d..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchhighlight_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package elastigo - -import ( - "github.com/bmizerany/assert" - "testing" -) - -func TestEmbedDsl(t *testing.T) { - highlight := NewHighlight().SetOptions(NewHighlightOpts(). - Tags("
", "
"). - BoundaryChars("asdf").BoundaryMaxScan(100). - FragSize(10).NumFrags(50). - Order("order").Type("fdsa"). - MatchedFields("1", "2")) - - actual, err := GetJson(highlight) - - assert.Equal(t, nil, err) - assert.Equal(t, "
", actual["pre_tags"].([]interface{})[0]) - assert.Equal(t, "
", actual["post_tags"].([]interface{})[0]) - assert.Equal(t, "asdf", actual["boundary_chars"]) - assert.Equal(t, float64(100), actual["boundary_max_scan"]) - assert.Equal(t, float64(10), actual["fragment_size"]) - assert.Equal(t, float64(50), actual["number_of_fragments"]) - assert.Equal(t, "1", actual["matched_fields"].([]interface{})[0]) - assert.Equal(t, "2", actual["matched_fields"].([]interface{})[1]) - assert.Equal(t, "order", actual["order"]) - assert.Equal(t, "fdsa", actual["type"]) -} - -func TestFieldDsl(t *testing.T) { - highlight := NewHighlight().AddField("whatever", NewHighlightOpts(). - Tags("
", "
"). - BoundaryChars("asdf").BoundaryMaxScan(100). - FragSize(10).NumFrags(50). - Order("order").Type("fdsa"). - MatchedFields("1", "2")) - - result, err := GetJson(highlight) - actual := result["fields"].(map[string]interface{})["whatever"].(map[string]interface{}) - - assert.Equal(t, nil, err) - assert.Equal(t, "
", actual["pre_tags"].([]interface{})[0]) - assert.Equal(t, "
", actual["post_tags"].([]interface{})[0]) - assert.Equal(t, "asdf", actual["boundary_chars"]) - assert.Equal(t, float64(100), actual["boundary_max_scan"]) - assert.Equal(t, float64(10), actual["fragment_size"]) - assert.Equal(t, float64(50), actual["number_of_fragments"]) - assert.Equal(t, "1", actual["matched_fields"].([]interface{})[0]) - assert.Equal(t, "2", actual["matched_fields"].([]interface{})[1]) - assert.Equal(t, "order", actual["order"]) - assert.Equal(t, "fdsa", actual["type"]) -} - -func TestEmbedAndFieldDsl(t *testing.T) { - highlight := NewHighlight(). - SetOptions(NewHighlightOpts().Tags("
", "
")). - AddField("afield", NewHighlightOpts().Type("something")) - - actual, err := GetJson(highlight) - actualField := actual["fields"].(map[string]interface{})["afield"].(map[string]interface{}) - - assert.Equal(t, nil, err) - assert.Equal(t, "
", actual["pre_tags"].([]interface{})[0]) - assert.Equal(t, "
", actual["post_tags"].([]interface{})[0]) - assert.Equal(t, "something", actualField["type"]) -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchquery.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchquery.go deleted file mode 100644 index dd01ed717..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchquery.go +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" - //"log" - "strings" -) - -// Query creates a new Query Dsl -func Query() *QueryDsl { - return &QueryDsl{} -} - -/* - -some ways to serialize -"query": { - "filtered": { - "query": { - "query_string": { - "default_operator": "OR", - "default_field": "_all", - "query": " actor:\"bob\" AND type:\"EventType\"" - } - }, - "filter": { - "range": { - "@timestamp": { - "from": "2012-12-29T16:52:48+00:00", - "to": "2012-12-29T17:52:48+00:00" - } - } - } - } -}, - -"query" : { - "term" : { "user" : "kimchy" } -} - -"query" : { - "match_all" : {} -}, -*/ -type QueryDsl struct { - QueryEmbed - FilterVal *FilterOp `json:"filter,omitempty"` -} - -// The core Query Syntax can be embedded as a child of a variety of different parents -type QueryEmbed struct { - MatchAll *MatchAll `json:"match_all,omitempty"` - Terms map[string]string `json:"term,omitempty"` - Qs *QueryString `json:"query_string,omitempty"` - MultiMatch *MultiMatch `json:"multi_match,omitempty"` - FunctionScore map[string]interface{} `json:"function_score,omitempty"` - //Exist string `json:"_exists_,omitempty"` -} - -// MarshalJSON provides custom marshalling to support the query dsl which is a conditional -// json format, not always the same parent/children -func (qd *QueryDsl) MarshalJSON() ([]byte, error) { - q := qd.QueryEmbed - hasQuery := false - if q.Qs != nil || len(q.Terms) > 0 || q.MatchAll != nil || q.MultiMatch != nil { - hasQuery = true - } - // If a query has a - if qd.FilterVal != nil && hasQuery { - queryB, err := json.Marshal(q) - if err != nil { - return queryB, err - } - filterB, err := json.Marshal(qd.FilterVal) - if err != nil { - return filterB, err - } - return []byte(fmt.Sprintf(`{"filtered":{"query":%s,"filter":%s}}`, queryB, filterB)), nil - } - return json.Marshal(q) -} - -// get all -func (q *QueryDsl) All() *QueryDsl { - q.MatchAll = &MatchAll{""} - return q -} - -// Range adds a RANGE FilterOp to the search query -// Legacy. Use the Filter() function instead -func (q *QueryDsl) Range(fop *FilterOp) *QueryDsl { - if q.FilterVal == nil { - q.FilterVal = fop - return q - } - - return q -} - -// Add a term search for a specific field -// Term("user","kimchy") -func (q *QueryDsl) Term(name, value string) *QueryDsl { - if len(q.Terms) == 0 { - q.Terms = make(map[string]string) - } - q.Terms[name] = value - return q -} - -// FunctionScore sets functions to use to score the documents. -// http://www.elastic.co/guide/en/elasticsearch/reference/1.x/query-dsl-function-score-query.html -func (q *QueryDsl) FunctionScore(mode string, functions ...map[string]interface{}) *QueryDsl { - q.QueryEmbed.FunctionScore = map[string]interface{}{ - "functions": functions, - "score_mode": mode, - } - return q -} - -// The raw search strings (lucene valid) -func (q *QueryDsl) Search(searchFor string) *QueryDsl { - //I don't think this is right, it is not a filter.query, it should be q query? - qs := NewQueryString("", "") - q.QueryEmbed.Qs = &qs - q.QueryEmbed.Qs.Query = searchFor - return q -} - -// Querystring operations -func (q *QueryDsl) Qs(qs *QueryString) *QueryDsl { - q.QueryEmbed.Qs = qs - return q -} - -// SetLenient sets whether the query should ignore format based failures, -// such as passing in text to a number field. -func (q *QueryDsl) SetLenient(lenient bool) *QueryDsl { - q.QueryEmbed.Qs.Lenient = lenient - return q -} - -// Fields in query_string search -// Fields("fieldname","search_for","","") -// -// Fields("fieldname,field2,field3","search_for","","") -// -// Fields("fieldname,field2,field3","search_for","field_exists","") -func (q *QueryDsl) Fields(fields, search, exists, missing string) *QueryDsl { - fieldList := strings.Split(fields, ",") - qs := NewQueryString("", "") - q.QueryEmbed.Qs = &qs - q.QueryEmbed.Qs.Query = search - if len(fieldList) == 1 { - q.QueryEmbed.Qs.DefaultField = fields - } else { - q.QueryEmbed.Qs.Fields = fieldList - } - q.QueryEmbed.Qs.Exists = exists - q.QueryEmbed.Qs.Missing = missing - return q -} - -// Filter this query -func (q *QueryDsl) Filter(f *FilterOp) *QueryDsl { - q.FilterVal = f - return q -} - -// MultiMatch allows searching against multiple fields. -func (q *QueryDsl) MultiMatch(s string, fields []string) *QueryDsl { - q.QueryEmbed.MultiMatch = &MultiMatch{Query: s, Fields: fields} - return q -} - -type MultiMatch struct { - Query string `json:"query"` - Fields []string `json:"fields"` -} - -type MatchAll struct { - All string `json:"-"` -} - -// should we reuse QueryDsl here? -type QueryWrap struct { - Qs QueryString `json:"query_string,omitempty"` -} - -// QueryString based search -func NewQueryString(field, query string) QueryString { - return QueryString{"", field, query, "", "", nil, false} -} - -type QueryString struct { - DefaultOperator string `json:"default_operator,omitempty"` - DefaultField string `json:"default_field,omitempty"` - Query string `json:"query,omitempty"` - Exists string `json:"_exists_,omitempty"` - Missing string `json:"_missing_,omitempty"` - Fields []string `json:"fields,omitempty"` - Lenient bool `json:"lenient,omitempty"` - //_exists_:field1, - //_missing_:field1, -} - -//I don't know how any of the Term stuff below is supposed to work. -mikeyoon - -// Generic Term based (used in query, facet, filter) -type Term struct { - Terms Terms `json:"terms,omitempty"` - FilterVal *FilterWrap `json:"facet_filter,omitempty"` -} - -type Terms struct { - Fields []string `json:"field,omitempty"` - Size string `json:"size,omitempty"` - Regex string `json:"regex,omitempty"` -} - -func NewTerm(fields ...string) *Term { - m := &Term{Terms{Fields: fields}, nil} - return m -} - -func (s *Term) Filter(fl ...interface{}) *Term { - if s.FilterVal == nil { - s.FilterVal = NewFilterWrap() - } - - s.FilterVal.addFilters(fl) - return s -} - -// Custom marshalling -func (t *Terms) MarshalJSON() ([]byte, error) { - m := make(map[string]interface{}) - // TODO: this isn't getting called!? - if len(t.Fields) == 1 { - m["field"] = t.Fields[0] - } else if len(t.Fields) > 1 { - m["fields"] = t.Fields - } - if len(t.Regex) > 0 { - m["regex"] = t.Regex - } - if len(t.Size) > 0 { - m["size"] = t.Size - } - return json.Marshal(m) -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchreadme b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchreadme deleted file mode 100644 index 2d2d55582..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchreadme +++ /dev/null @@ -1,4 +0,0 @@ - - -To run tests on this, you must first have run/imported data inside of *core* - diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchsearch.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchsearch.go deleted file mode 100644 index c921ae5a3..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchsearch.go +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" - u "github.com/araddon/gou" - "strconv" - "strings" -) - -var ( - _ = u.DEBUG -) - -// Search is the entry point to the SearchDsl, it is a chainable set of utilities -// to create searches. -// -// params -// @index = elasticsearch index to search -// -// out, err := Search("github").Type("Issues").Pretty().Query( -// Query().Range( -// Range().Field("created_at").From("2012-12-10T15:00:00-08:00").To("2012-12-10T15:10:00-08:00"), -// ).Search("add"), -// ).Result() -func Search(index string) *SearchDsl { - return &SearchDsl{Index: index, args: map[string]interface{}{}} -} - -type SearchDsl struct { - args map[string]interface{} - types []string - FromVal int `json:"from,omitempty"` - SizeVal int `json:"size,omitempty"` - Index string `json:"-"` - FacetVal *FacetDsl `json:"facets,omitempty"` - QueryVal *QueryDsl `json:"query,omitempty"` - SortBody []*SortDsl `json:"sort,omitempty"` - FilterVal *FilterOp `json:"filter,omitempty"` - AggregatesVal map[string]*AggregateDsl `json:"aggregations,omitempty"` - HighlightVal *HighlightDsl `json:"highlight,omitempty"` -} - -func (s *SearchDsl) Bytes(conn *Conn) ([]byte, error) { - return conn.DoCommand("POST", s.url(), s.args, s) -} - -func (s *SearchDsl) Result(conn *Conn) (*SearchResult, error) { - var retval SearchResult - body, err := s.Bytes(conn) - retval.RawJSON = body - if err != nil { - u.Errorf("%v", err) - return nil, err - } - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - u.Errorf("%v \n\t%s", jsonErr, string(body)) - } - return &retval, jsonErr -} - -func (s *SearchDsl) url() string { - url := fmt.Sprintf("/%s%s/_search", s.Index, s.getType()) - return url -} - -func (s *SearchDsl) Pretty() *SearchDsl { - s.args["pretty"] = "1" - return s -} - -// Type is the elasticsearch *Type* within a specific index -func (s *SearchDsl) Type(indexType string) *SearchDsl { - if len(s.types) == 0 { - s.types = make([]string, 0) - } - s.types = append(s.types, indexType) - return s -} - -func (s *SearchDsl) getType() string { - if len(s.types) > 0 { - return "/" + strings.Join(s.types, ",") - } - return "" -} - -func (s *SearchDsl) From(from string) *SearchDsl { - s.args["from"] = from - return s -} - -// Search is a simple interface to search, doesn't have the power of query -// but uses a simple query_string search -func (s *SearchDsl) Search(srch string) *SearchDsl { - s.QueryVal = Query().Search(srch) - return s -} - -func (s *SearchDsl) Size(size string) *SearchDsl { - s.args["size"] = size - return s -} - -func (s *SearchDsl) Fields(fields ...string) *SearchDsl { - s.args["fields"] = strings.Join(fields, ",") - return s -} - -func (s *SearchDsl) Source(returnSource bool) *SearchDsl { - s.args["_source"] = strconv.FormatBool(returnSource) - return s -} - -// Facet passes a Query expression to this search -// -// qry := Search("github").Size("0").Facet( -// Facet().Regex("repository.name", "no.*").Size("8"), -// ) -// -// qry := Search("github").Pretty().Facet( -// Facet().Fields("type").Size("25"), -// ) -func (s *SearchDsl) Facet(f *FacetDsl) *SearchDsl { - s.FacetVal = f - return s -} - -func (s *SearchDsl) Aggregates(aggs ...*AggregateDsl) *SearchDsl { - if len(aggs) < 1 { - return s - } - if len(s.AggregatesVal) == 0 { - s.AggregatesVal = make(map[string]*AggregateDsl) - } - - for _, agg := range aggs { - s.AggregatesVal[agg.Name] = agg - } - return s -} - -func (s *SearchDsl) Query(q *QueryDsl) *SearchDsl { - s.QueryVal = q - return s -} - -// Filter adds a Filter Clause with optional Boolean Clause. This accepts n number of -// filter clauses. If more than one, and missing Boolean Clause it assumes "and" -// -// qry := Search("github").Filter( -// Filter().Exists("repository.name"), -// ) -// -// qry := Search("github").Filter( -// "or", -// Filter().Exists("repository.name"), -// Filter().Terms("actor_attributes.location", "portland"), -// ) -// -// qry := Search("github").Filter( -// Filter().Exists("repository.name"), -// Filter().Terms("repository.has_wiki", true) -// ) - -func (s *SearchDsl) Filter(fl *FilterOp) *SearchDsl { - s.FilterVal = fl - return s -} - -func (s *SearchDsl) Sort(sort ...*SortDsl) *SearchDsl { - if s.SortBody == nil { - s.SortBody = make([]*SortDsl, 0) - } - s.SortBody = append(s.SortBody, sort...) - return s -} - -func (s *SearchDsl) Scroll(duration string) *SearchDsl { - s.args["scroll"] = duration - return s -} - -func (s *SearchDsl) SearchType(searchType string) *SearchDsl { - s.args["search_type"] = searchType - return s -} - -func (s *SearchDsl) Highlight(highlight *HighlightDsl) *SearchDsl { - s.HighlightVal = highlight - return s -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchsearch_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchsearch_test.go deleted file mode 100644 index 81f11b3a3..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchsearch_test.go +++ /dev/null @@ -1,291 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "github.com/araddon/gou" - . "github.com/smartystreets/goconvey/convey" - "testing" -) - -func TestSearch(t *testing.T) { - - c := NewTestConn() - PopulateTestDB(t, c) - defer TearDownTestDB(c) - - Convey("Wildcard request query", t, func() { - - qry := map[string]interface{}{ - "query": map[string]interface{}{ - "wildcard": map[string]string{"name": "*hu*"}, - }, - } - out, err := c.Search("oilers", "", nil, qry) - - So(err, ShouldBeNil) - So(out, ShouldNotBeNil) - So(out.Hits, ShouldNotBeNil) - So(out.Hits.Total, ShouldEqual, 3) - }) - - Convey("Simple search", t, func() { - - // searching without faceting - qry := Search("oilers").Pretty().Query( - Query().Search("dave"), - ) - - // how many different docs used the word "dave" - out, err := qry.Result(c) - So(err, ShouldBeNil) - So(out, ShouldNotBeNil) - So(out.Hits, ShouldNotBeNil) - So(out.Hits.Total, ShouldEqual, 2) - - out, _ = Search("oilers").Search("dave").Result(c) - So(err, ShouldBeNil) - So(out, ShouldNotBeNil) - So(out.Hits, ShouldNotBeNil) - So(out.Hits.Total, ShouldEqual, 2) - }) - - Convey("URL Request query string", t, func() { - - out, err := c.SearchUri("oilers", "", map[string]interface{}{"q": "pos:LW"}) - - So(err, ShouldBeNil) - So(out, ShouldNotBeNil) - So(out.Hits, ShouldNotBeNil) - So(out.Hits.Total, ShouldEqual, 3) - }) - - - // A faceted search for what "type" of events there are - // - since we are not specifying an elasticsearch type it searches all () - // - // { - // "terms" : { - // "_type" : "terms", - // "missing" : 0, - // "total" : 7561, - // "other" : 0, - // "terms" : [ { - // "term" : "pushevent", - // "count" : 4185 - // }, { - // "term" : "createevent", - // "count" : 786 - // }.....] - // } - // } - - Convey("Facet search simple", t, func() { - - qry := Search("oilers").Pretty().Facet( - Facet().Fields("teams").Size("4"), - ).Query( - Query().All(), - ).Size("1") - out, err := qry.Result(c) - So(err, ShouldBeNil) - So(out, ShouldNotBeNil) - - h := gou.NewJsonHelper(out.Facets) - So(h.Int("teams.total"), ShouldEqual, 37) - So(h.Int("teams.missing"), ShouldEqual, 0) - So(len(h.List("teams.terms")), ShouldEqual, 4) - - // change the size - qry.FacetVal.Size("20") - out, err = qry.Result(c) - So(err, ShouldBeNil) - So(out, ShouldNotBeNil) - - h = gou.NewJsonHelper(out.Facets) - So(h.Int("teams.total"), ShouldEqual, 37) - So(len(h.List("teams.terms")), ShouldEqual, 11) - - }) - - Convey("Facet search with type", t, func() { - - out, err := Search("oilers").Type("heyday").Pretty().Facet( - Facet().Fields("teams").Size("4"), - ).Query( - Query().All(), - ).Result(c) - So(err, ShouldBeNil) - So(out, ShouldNotBeNil) - - h := gou.NewJsonHelper(out.Facets) - So(h.Int("teams.total"), ShouldEqual, 37) - So(len(h.List("teams.terms")), ShouldEqual, 4) - }) - - - Convey("Facet search with wildcard", t, func() { - - qry := Search("oilers").Pretty().Facet( - Facet().Fields("teams").Size("20"), - ).Query( - Query().Search("*w*"), - ) - out, err := qry.Result(c) - So(err, ShouldBeNil) - So(out, ShouldNotBeNil) - - h := gou.NewJsonHelper(out.Facets) - So(h.Int("teams.total"), ShouldEqual, 20) - So(len(h.List("teams.terms")), ShouldEqual, 7) - }) - - Convey("Facet search with range", t, func() { - - qry := Search("oilers").Pretty().Facet( - Facet().Fields("teams").Size("20"), - ).Query( - Query().Range( - Filter().Range("dob", "19600101", nil, "19621231", nil, ""), - ).Search("*w*"), - ) - out, err := qry.Result(c) - So(err, ShouldBeNil) - So(out, ShouldNotBeNil) - - h := gou.NewJsonHelper(out.Facets) - So(h.Int("teams.total"), ShouldEqual, 12) - So(len(h.List("teams.terms")), ShouldEqual, 5) - }) - - Convey("Search query with terms", t, func() { - - qry := Search("oilers").Query( - Query().Term("teams", "NYR"), - ) - out, err := qry.Result(c) - So(err, ShouldBeNil) - So(out, ShouldNotBeNil) - So(out.Hits.Len(), ShouldEqual, 4) - So(out.Hits.Total, ShouldEqual, 4) - }) - - Convey("Search query with fields", t, func() { - - qry := Search("oilers").Query( - Query().Fields("teams", "NYR", "", ""), - ) - out, err := qry.Result(c) - So(err, ShouldBeNil) - So(out, ShouldNotBeNil) - So(out.Hits.Len(), ShouldEqual, 4) - So(out.Hits.Total, ShouldEqual, 4) - }) - - Convey("Search query with fields exist and missing", t, func() { - - qry := Search("oilers").Filter( - Filter().Exists("PIM"), - ) - out, err := qry.Result(c) - So(err, ShouldBeNil) - So(out, ShouldNotBeNil) - So(out.Hits.Len(), ShouldEqual, 2) - So(out.Hits.Total, ShouldEqual, 2) - - qry = Search("oilers").Filter( - Filter().Missing("PIM"), - ) - out, err = qry.Result(c) - So(err, ShouldBeNil) - So(out, ShouldNotBeNil) - So(out.Hits.Len(), ShouldEqual, 10) - So(out.Hits.Total, ShouldEqual, 12) - }) - - Convey("Search with query and filter", t, func() { - - out, err := Search("oilers").Size("25").Query( - Query().Fields("name", "*d*", "", ""), - ).Filter( - Filter().Terms("teams", TEMDefault, "STL"), - ).Result(c) - So(err, ShouldBeNil) - So(out, ShouldNotBeNil) - So(out.Hits.Len(), ShouldEqual, 2) - So(out.Hits.Total, ShouldEqual, 2) - }) - - Convey("Search with range", t, func() { - - out, err := Search("oilers").Size("25").Query( - Query().Range( - Filter().Range("dob", "19600101", nil, "19621231", nil, ""), - ).Search("*w*"), - ).Result(c) - So(err, ShouldBeNil) - So(out, ShouldNotBeNil) - So(out.Hits.Len(), ShouldEqual, 4) - So(out.Hits.Total, ShouldEqual, 4) - }) - - Convey("Search with sorting desc", t, func() { - - out, err := Search("oilers").Pretty().Query( - Query().All(), - ).Sort( - Sort("dob").Desc(), - ).Result(c) - So(err, ShouldBeNil) - So(out, ShouldNotBeNil) - So(out.Hits.Len(), ShouldEqual, 10) - So(out.Hits.Total, ShouldEqual, 14) - - b, err := out.Hits.Hits[0].Source.MarshalJSON() - h1 := gou.NewJsonHelper(b) - So(h1.String("name"), ShouldEqual, "Grant Fuhr") - }) - - Convey("Search with sorting asc", t, func() { - - out, err := Search("oilers").Pretty().Query( - Query().All(), - ).Sort( - Sort("dob"), - ).Result(c) - So(err, ShouldBeNil) - So(out, ShouldNotBeNil) - So(out.Hits.Len(), ShouldEqual, 10) - So(out.Hits.Total, ShouldEqual, 14) - - b, err := out.Hits.Hits[0].Source.MarshalJSON() - h1 := gou.NewJsonHelper(b) - So(h1.String("name"), ShouldEqual, "Pat Hughes") - }) - - Convey("Search with sorting desc with query", t, func() { - - out, err := Search("oilers").Pretty().Query( - Query().Search("*w*"), - ).Sort( - Sort("dob").Desc(), - ).Result(c) - So(err, ShouldBeNil) - So(out, ShouldNotBeNil) - So(out.Hits.Len(), ShouldEqual, 8) - So(out.Hits.Total, ShouldEqual, 8) - - b, err := out.Hits.Hits[0].Source.MarshalJSON() - h1 := gou.NewJsonHelper(b) - So(h1.String("name"), ShouldEqual, "Wayne Gretzky") - }) -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchsort.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchsort.go deleted file mode 100644 index a8359173f..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/searchsort.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" -) - -// SortDsl accepts any number of Sort commands -// -// Query().Sort( -// Sort("last_name").Desc(), -// Sort("age"), -// ) -func Sort(field string) *SortDsl { - return &SortDsl{Name: field} -} - -type SortBody []interface{} -type SortDsl struct { - Name string - IsDesc bool -} - -func (s *SortDsl) Desc() *SortDsl { - s.IsDesc = true - return s -} -func (s *SortDsl) Asc() *SortDsl { - s.IsDesc = false - return s -} - -func (s *SortDsl) MarshalJSON() ([]byte, error) { - if s.IsDesc { - return json.Marshal(map[string]string{s.Name: "desc"}) - } - if s.Name == "_score" { - return []byte(`"_score"`), nil - } - return []byte(fmt.Sprintf(`"%s"`, s.Name)), nil // "user" assuming default = asc? -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/setup_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/setup_test.go deleted file mode 100644 index 026f2dc54..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/setup_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package elastigo - -import ( - "testing" - "time" -) - -/* -// elastigo Conn adapter to avoid a circular dependency -type conn interface { - CreateIndex(name string) (interface{}, error) - DeleteIndex(name string) (interface{}, error) - - Index(index string, _type string, id string, args map[string]interface{}, data interface{}) (interface{}, error) -} -*/ - -func newIndexWorker(c *Conn, t *testing.T) func(interface{}) { - - return func(d interface{}) { - _, err := c.Index("oilers", "heyday", "", nil, d) - if err != nil { - t.Fatalf("Index failed: %s", err) - } - } -} - -func PopulateTestDB(t *testing.T, c *Conn) { - - // it is not technically necessary to create an index here - _, err := c.CreateIndex("oilers") - if err != nil { - t.Fatal("Error in CreateIndex", err) - } - - // set the mapping for dob to be a date so it can be used for range searches - _, err = c.DoCommand("PUT", "/oilers/heyday/_mapping?ignore_conflicts", nil, - string(`{"heyday": {"properties": { - "dob": {"type": "date", "format": "basic_date"}, - "pos": {"type": "string", "index": "not_analyzed"}, - "teams": {"type": "string", "index": "not_analyzed"} - }}}`)) - if err != nil { - t.Fatal("Error setting dob mapping", err) - } - - idx := newIndexWorker(c, t) - - idx(`{"name": "Mark Messier", "jersey": 11, "pos": "LW", "goals": 37, "PIM": 165, - "dob": "19610118", "teams": ["EDM", "NYR", "VAN"]}`) - idx(`{"name": "Wayne Gretzky", "jersey": 99, "pos": "C", "goals": 87, - "dob": "19610126", "teams": ["EDM", "NYR", "STL"]}`) - idx(`{"name": "Paul Coffey", "jersey": 7, "pos": "D", "goals": 40, - "dob": "19610601", "teams": ["EDM", "DET"]}`) - idx(`{"name": "Jari Kurri", "jersey": 17, "pos": "RW", "goals": 52, - "dob": "19600518", "teams": ["EDM", "VAN"]}`) - idx(`{"name": "Glenn Anderson", "jersey": 9, "pos": "RW", "goals": 54, - "dob": "19601002", "teams": ["EDM", "NYR", "TOR", "STL"]}`) - idx(`{"name": "Ken Linseman", "jersey": 13, "pos": "C", "goals": 18, - "dob": "19580811", "teams": ["EDM", "TOR"]}`) - idx(`{"name": "Pat Hughes", "jersey": 16, "pos": "RW", "goals": 27, - "dob": "19550325", "teams": ["EDM", "MTL", "PIT"]}`) - idx(`{"name": "Dave Hunter", "jersey": 12, "pos": "LW", "goals": 22, - "dob": "19580101", "teams": ["EDM", "PIT"]}`) - idx(`{"name": "Kevin Lowe", "jersey": 4, "pos": "D", "goals": 4, - "dob": "19590415", "teams": ["EDM", "NYR"]}`) - idx(`{"name": "Charlie Huddy", "jersey": 22, "pos": "D", "goals": 8, - "dob": "19590602", "teams": ["EDM", "BUF", "STL"]}`) - idx(`{"name": "Randy Gregg", "jersey": 21, "pos": "D", "goals": 13, - "dob": "19560219", "teams": ["EDM", "VAN"]}`) - idx(`{"name": "Dave Semenko", "jersey": 27, "pos": "LW", "goals": 4, "PIM": 118, - "dob": "19570712", "teams": ["EDM"]}`) - idx(`{"name": "Grant Fuhr", "jersey": 31, "pos": "G", "GAA": 3.91, - "dob": "19620928", "teams": ["EDM", "TOR", "BUF", "STL"]}`) - idx(`{"name": "Andy Moog", "jersey": 35, "pos": "G", "GAA": 3.77, - "dob": "19600218", "teams": ["EDM", "BOS", "DAL", "MTL"]}`) - - // HACK to let the ES magic happen - time.Sleep(time.Second) -} - -func TearDownTestDB(c *Conn) { - c.DeleteIndex("oilers") -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/shared.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/shared.go deleted file mode 100644 index 697a8adf2..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/shared.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -type OneTermQuery struct { - Query struct { - Term string `json:"term"` - } `json:"query"` -} diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/shared_test.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/shared_test.go deleted file mode 100644 index 2a3d8de41..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/shared_test.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2013 Matthew Baird -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "flag" - "log" - "encoding/json" -) - -var ( - _ = log.Ldate - eshost *string = flag.String("host", "localhost", "Elasticsearch Server Host Address") - logLevel *string = flag.String("logging", "info", "Which log level: [debug,info,warn,error,fatal]") -) - -func GetJson(input interface{}) (map[string]interface{}, error) { - var result map[string]interface{} - bytes, err := json.Marshal(input) - - if err == nil { - err = json.Unmarshal(bytes, &result) - } - - return result, err -} - -func HasKey(input map[string]interface{}, key string) bool { - if _, ok := input[key]; ok { - return true - } - - return false -} \ No newline at end of file diff --git a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/snapshot.go b/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/snapshot.go deleted file mode 100644 index 5b106d193..000000000 --- a/services/templeton/vendor/src/github.com/mattbaird/elastigo/lib/snapshot.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2015 Niels Freier -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package elastigo - -import ( - "encoding/json" - "fmt" - "time" -) - -type GetSnapshotsResponse struct { - Snapshots []struct { - Snapshot string `json:"snapshot"` - Indices []string `json:"indices"` - State string `json:"state"` - StartTime time.Time `json:"start_time"` - EndTime time.Time `json:"end_time"` - } `json:"snapshots"` -} - -// CreateSnapshotRepository creates a new snapshot repository on the cluster -// http://www.elastic.co/guide/en/elasticsearch/reference/1.3/modules-snapshots.html -func (c *Conn) CreateSnapshotRepository(name string, args map[string]interface{}, settings interface{}) (BaseResponse, error) { - var url string - var retval BaseResponse - url = fmt.Sprintf("/_snapshot/%s", name) - body, err := c.DoCommand("POST", url, args, settings) - if err != nil { - return retval, err - } - if err == nil { - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - - return retval, nil - -} - -// TakeSnapshot takes a snapshot of the current state of the cluster with a specific name and for a existing repositoriy -// http://www.elastic.co/guide/en/elasticsearch/reference/1.3/modules-snapshots.html -func (c *Conn) TakeSnapshot(repository, name string, args map[string]interface{}, query interface{}) (BaseResponse, error) { - var url string - var retval BaseResponse - url = fmt.Sprintf("/_snapshot/%s/%s", repository, name) - body, err := c.DoCommand("PUT", url, args, query) - if err != nil { - return retval, err - } - if err == nil { - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - - return retval, nil -} - -// RestoreSnapshot restores a snapshot of the current state of the cluster with a specific name and for a existing repositoriy -// http://www.elastic.co/guide/en/elasticsearch/reference/1.3/modules-snapshots.html -func (c *Conn) RestoreSnapshot(repository, name string, args map[string]interface{}, query interface{}) (BaseResponse, error) { - var url string - var retval BaseResponse - url = fmt.Sprintf("/_snapshot/%s/%s/_restore", repository, name) - body, err := c.DoCommand("POST", url, args, query) - if err != nil { - return retval, err - } - if err == nil { - fmt.Println(string(body)) - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - - return retval, nil -} - -// GetSnapshots returns all snapshot of the specified name for a specific repository -// http://www.elastic.co/guide/en/elasticsearch/reference/1.3/modules-snapshots.html -func (c *Conn) GetSnapshotByName(repository, name string, args map[string]interface{}) (GetSnapshotsResponse, error) { - return c.getSnapshots(repository, name, args) -} - -// GetSnapshots returns all snapshot for a specific repository -// http://www.elastic.co/guide/en/elasticsearch/reference/1.3/modules-snapshots.html -func (c *Conn) GetSnapshots(repository string, args map[string]interface{}) (GetSnapshotsResponse, error) { - return c.getSnapshots(repository, "_all", args) -} - -func (c *Conn) getSnapshots(repository, name string, args map[string]interface{}) (GetSnapshotsResponse, error) { - var url string - var retval GetSnapshotsResponse - url = fmt.Sprintf("/_snapshot/%s/%s", repository, name) - body, err := c.DoCommand("GET", url, args, nil) - if err != nil { - return retval, err - } - if err == nil { - jsonErr := json.Unmarshal(body, &retval) - if jsonErr != nil { - return retval, jsonErr - } - } - - return retval, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/CHANGELOG-3.0.md b/services/templeton/vendor/src/github.com/olivere/elastic/CHANGELOG-3.0.md new file mode 100644 index 000000000..07f3e66bf --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/CHANGELOG-3.0.md @@ -0,0 +1,363 @@ +# Elastic 3.0 + +Elasticsearch 2.0 comes with some [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/breaking-changes-2.0.html). You will probably need to upgrade your application and/or rewrite part of it due to those changes. + +We use that window of opportunity to also update Elastic (the Go client) from version 2.0 to 3.0. This will introduce both changes due to the Elasticsearch 2.0 update as well as changes that make Elastic cleaner by removing some old cruft. + +So, to summarize: + +1. Elastic 2.0 is compatible with Elasticsearch 1.7+ and is still actively maintained. +2. Elastic 3.0 is compatible with Elasticsearch 2.0+ and will soon become the new master branch. + +The rest of the document is a list of all changes in Elastic 3.0. + +## Pointer types + +All types have changed to be pointer types, not value types. This not only is cleaner but also simplifies the API as illustrated by the following example: + +Example for Elastic 2.0 (old): + +```go +q := elastic.NewMatchAllQuery() +res, err := elastic.Search("one").Query(&q).Do() // notice the & here +``` + +Example for Elastic 3.0 (new): + +```go +q := elastic.NewMatchAllQuery() +res, err := elastic.Search("one").Query(q).Do() // no more & +// ... which can be simplified as: +res, err := elastic.Search("one").Query(elastic.NewMatchAllQuery()).Do() +``` + +It also helps to prevent [subtle issues](https://github.com/olivere/elastic/issues/115#issuecomment-130753046). + +## Query/filter merge + +One of the biggest changes in Elasticsearch 2.0 is the [merge of queries and filters](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_queries_and_filters_merged). In Elasticsearch 1.x, you had a whole range of queries and filters that were basically identical (e.g. `term_query` and `term_filter`). + +The practical aspect of the merge is that you can now basically use queries where once you had to use filters instead. For Elastic 3.0 this means: We could remove a whole bunch of files. Yay! + +Notice that some methods still come by "filter", e.g. `PostFilter`. However, they accept a `Query` now when they used to accept a `Filter` before. + +Example for Elastic 2.0 (old): + +```go +q := elastic.NewMatchAllQuery() +f := elastic.NewTermFilter("tag", "important") +res, err := elastic.Search().Index("one").Query(&q).PostFilter(f) +``` + +Example for Elastic 3.0 (new): + +```go +q := elastic.NewMatchAllQuery() +f := elastic.NewTermQuery("tag", "important") // it's a query now! +res, err := elastic.Search().Index("one").Query(q).PostFilter(f) +``` + +## Facets are removed + +[Facets have been removed](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_removed_features.html#_facets_have_been_removed) in Elasticsearch 2.0. You need to use aggregations now. + +## Errors + +Elasticsearch 2.0 returns more information about an error in the HTTP response body. Elastic 3.0 now reads this information and makes it accessible by the consumer. + +Errors and all its details are now returned in [`Error`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L59). + +### HTTP Status 404 (Not Found) + +When Elasticsearch does not find an entity or an index, it generally returns HTTP status code 404. In Elastic 2.0 this was a valid result and didn't raise an error from the `Do` functions. This has now changed in Elastic 3.0. + +Starting with Elastic 3.0, there are only two types of responses considered successful. First, responses with HTTP status codes [200..299]. Second, HEAD requests which return HTTP status 404. The latter is used by Elasticsearch to e.g. check for existence of indices or documents. All other responses will return an error. + +To check for HTTP Status 404 (with non-HEAD requests), e.g. when trying to get or delete a missing document, you can use the [`IsNotFound`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L84) helper (see below). + +The following example illustrates how to check for a missing document in Elastic 2.0 and what has changed in 3.0. + +Example for Elastic 2.0 (old): + +```go +res, err = client.Get().Index("one").Type("tweet").Id("no-such-id").Do() +if err != nil { + // Something else went wrong (but 404 is NOT an error in Elastic 2.0) +} +if !res.Found { + // Document has not been found +} +``` + +Example for Elastic 3.0 (new): + +```go +res, err = client.Get().Index("one").Type("tweet").Id("no-such-id").Do() +if err != nil { + if elastic.IsNotFound(err) { + // Document has not been found + } else { + // Something else went wrong + } +} +``` + +### HTTP Status 408 (Timeouts) + +Elasticsearch now responds with HTTP status code 408 (Timeout) when a request fails due to a timeout. E.g. if you specify a timeout with the Cluster Health API, the HTTP response status will be 408 if the timeout is raised. See [here](https://github.com/elastic/elasticsearch/commit/fe3179d9cccb569784434b2135ca9ae13d5158d3) for the specific commit to the Cluster Health API. + +To check for HTTP Status 408, we introduced the [`IsTimeout`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L101) helper. + +Example for Elastic 2.0 (old): + +```go +health, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("1s").Do() +if err != nil { + // ... +} +if health.TimedOut { + // We have a timeout +} +``` + +Example for Elastic 3.0 (new): + +```go +health, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("1s").Do() +if elastic.IsTimeout(err) { + // We have a timeout +} +``` + +### Bulk Errors + +The error response of a bulk operation used to be a simple string in Elasticsearch 1.x. +In Elasticsearch 2.0, it returns a structured JSON object with a lot more details about the error. +These errors are now captured in an object of type [`ErrorDetails`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L59) which is used in [`BulkResponseItem`](https://github.com/olivere/elastic/blob/release-branch.v3/bulk.go#L206). + +### Removed specific Elastic errors + +The specific error types `ErrMissingIndex`, `ErrMissingType`, and `ErrMissingId` have been removed. They were only used by `DeleteService` and are replaced by a generic error message. + +## Numeric types + +Elastic 3.0 has settled to use `float64` everywhere. It used to be a mix of `float32` and `float64` in Elastic 2.0. E.g. all boostable queries in Elastic 3.0 now have a boost type of `float64` where it used to be `float32`. + +## Pluralization + +Some services accept zero, one or more indices or types to operate on. +E.g. in the `SearchService` accepts a list of zero, one, or more indices to +search and therefor had a func called `Index(index string)` and a func +called `Indices(indices ...string)`. + +Elastic 3.0 now only uses the singular form that, when applicable, accepts a +variadic type. E.g. in the case of the `SearchService`, you now only have +one func with the following signature: `Index(indices ...string)`. + +Notice this is only limited to `Index(...)` and `Type(...)`. There are other +services with variadic functions. These have not been changed. + +## Multiple calls to variadic functions + +Some services with variadic functions have cleared the underlying slice when +called while other services just add to the existing slice. This has now been +normalized to always add to the underlying slice. + +Example for Elastic 2.0 (old): + +```go +// Would only cleared scroll id "two" +// because ScrollId cleared the values when called multiple times +client.ClearScroll().ScrollId("one").ScrollId("two").Do() +``` + +Example for Elastic 3.0 (new): + +```go +// Now (correctly) clears both scroll id "one" and "two" +// because ScrollId no longer clears the values when called multiple times +client.ClearScroll().ScrollId("one").ScrollId("two").Do() +``` + +## Ping service requires URL + +The `Ping` service raised some issues because it is different from all +other services. If not explicitly given a URL, it always pings `127.0.0.1:9200`. + +Users expected to ping the cluster, but that is not possible as the cluster +can be a set of many nodes: So which node do we ping then? + +To make it more clear, the `Ping` function on the client now requires users +to explicitly set the URL of the node to ping. + +## Meta fields + +Many of the meta fields e.g. `_parent` or `_routing` are now +[part of the top-level of a document](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_mapping_changes.html#migration-meta-fields) +and are no longer returned as parts of the `fields` object. We had to change +larger parts of e.g. the `Reindexer` to get it to work seamlessly with Elasticsearch 2.0. + +Notice that all stored meta-fields are now [returned by default](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_crud_and_routing_changes.html#_all_stored_meta_fields_returned_by_default). + +## HasParentQuery / HasChildQuery + +`NewHasParentQuery` and `NewHasChildQuery` must now include both parent/child type and query. It is now in line with the Java API. + +Example for Elastic 2.0 (old): + +```go +allQ := elastic.NewMatchAllQuery() +q := elastic.NewHasChildFilter("tweet").Query(&allQ) +``` + +Example for Elastic 3.0 (new): + +```go +q := elastic.NewHasChildQuery("tweet", elastic.NewMatchAllQuery()) +``` + +## SetBasicAuth client option + +You can now tell Elastic to pass HTTP Basic Auth credentials with each request. In previous versions of Elastic you had to set up your own `http.Transport` to do this. This should make it more convenient to use Elastic in combination with [Shield](https://www.elastic.co/products/shield) in its [basic setup](https://www.elastic.co/guide/en/shield/current/enable-basic-auth.html). + +Example: + +```go +client, err := elastic.NewClient(elastic.SetBasicAuth("user", "secret")) +if err != nil { + log.Fatal(err) +} +``` + +## Delete-by-Query API + +The Delete-by-Query API is [a plugin now](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_removed_features.html#_delete_by_query_is_now_a_plugin). It is no longer core part of Elasticsearch. You can [install it as a plugin as described here](https://www.elastic.co/guide/en/elasticsearch/plugins/2.0/plugins-delete-by-query.html). + +Elastic 3.0 still contains the `DeleteByQueryService`, but you need to install the plugin first. If you don't install it and use `DeleteByQueryService` you will most probably get a 404. + +An older version of this document stated the following: + +> Elastic 3.0 still contains the `DeleteByQueryService` but it will fail with `ErrPluginNotFound` when the plugin is not installed. +> +> Example for Elastic 3.0 (new): +> +> ```go +> _, err := client.DeleteByQuery().Query(elastic.NewTermQuery("client", "1")).Do() +> if err == elastic.ErrPluginNotFound { +> // Delete By Query API is not available +> } +> ``` + +I have decided that this is not a good way to handle the case of a missing plugin. The main reason is that with this logic, you'd always have to check if the plugin is missing in case of an error. This is not only slow, but it also puts logic into a service where it should really be just opaque and return the response of Elasticsearch. + +If you rely on certain plugins to be installed, you should check on startup. That's where the following two helpers come into play. + +## HasPlugin and SetRequiredPlugins + +Some of the core functionality of Elasticsearch has now been moved into plugins. E.g. the Delete-by-Query API is [a plugin now](https://www.elastic.co/guide/en/elasticsearch/plugins/2.0/plugins-delete-by-query.html). + +You need to make sure to add these plugins to your Elasticsearch installation to still be able to use the `DeleteByQueryService`. You can test this now with the `HasPlugin(name string)` helper in the client. + +Example for Elastic 3.0 (new): + +```go +err, found := client.HasPlugin("delete-by-query") +if err == nil && found { + // ... Delete By Query API is available +} +``` + +To simplify this process, there is now a `SetRequiredPlugins` helper that can be passed as an option func when creating a new client. If the plugin is not installed, the client wouldn't be created in the first place. + +```go +// Will raise an error if the "delete-by-query" plugin is NOT installed +client, err := elastic.NewClient(elastic.SetRequiredPlugins("delete-by-query")) +if err != nil { + log.Fatal(err) +} +``` + +Notice that there also is a way to define [mandatory plugins](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-plugins.html#_mandatory_plugins) in the Elasticsearch configuration file. + +## Common Query has been renamed to Common Terms Query + +The `CommonQuery` has been renamed to `CommonTermsQuery` to be in line with the [Java API](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_java_api_changes.html#_query_filter_refactoring). + +## Remove `MoreLikeThis` and `MoreLikeThisField` + +The More Like This API and the More Like This Field query [have been removed](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_more_like_this) and replaced with the `MoreLikeThisQuery`. + +## Remove Filtered Query + +With the merge of queries and filters, the [filtered query became deprecated](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_filtered_literal_query_and_literal_query_literal_filter_deprecated). While it is only deprecated and therefore still available in Elasticsearch 2.0, we have decided to remove it from Elastic 3.0. Why? Because we think that when you're already forced to rewrite many of your application code, it might be a good chance to get rid of things that are deprecated as well. So you might simply change your filtered query with a boolean query as [described here](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_filtered_literal_query_and_literal_query_literal_filter_deprecated). + +## Remove FuzzyLikeThis and FuzzyLikeThisField + +Both have been removed from Elasticsearch 2.0 as well. + +## Remove LimitFilter + +The `limit` filter is [deprecated in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_limit_literal_filter_deprecated) and becomes a no-op. Now is a good chance to remove it from your application as well. Use the `terminate_after` parameter in your search [as described here](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/search-request-body.html) to achieve similar effects. + +## Remove `_cache` and `_cache_key` from filters + +Both have been [removed from Elasticsearch 2.0 as well](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_filter_auto_caching). + +## Partial fields are gone + +Partial fields are [removed in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_search_changes.html#_partial_fields) in favor of [source filtering](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/search-request-source-filtering.html). + +## Scripting + +A [`Script`](https://github.com/olivere/elastic/blob/release-branch.v3/script.go) type has been added to Elastic 3.0. In Elastic 2.0, there were various places (e.g. aggregations) where you could just add the script as a string, specify the scripting language, add parameters etc. With Elastic 3.0, you should now always use the `Script` type. + +Example for Elastic 2.0 (old): + +```go +update, err := client.Update().Index("twitter").Type("tweet").Id("1"). + Script("ctx._source.retweets += num"). + ScriptParams(map[string]interface{}{"num": 1}). + Upsert(map[string]interface{}{"retweets": 0}). + Do() +``` + +Example for Elastic 3.0 (new): + +```go +update, err := client.Update().Index("twitter").Type("tweet").Id("1"). + Script(elastic.NewScript("ctx._source.retweets += num").Param("num", 1)). + Upsert(map[string]interface{}{"retweets": 0}). + Do() +``` + +## Cluster State + +The combination of `Metric(string)` and `Metrics(...string)` has been replaced by a single func with the signature `Metric(...string)`. + +## Unexported structs in response + +Services generally return a typed response from a `Do` func. Those structs are exported so that they can be passed around in your own application. In Elastic 3.0 however, we changed that (most) sub-structs are now unexported, meaning: You can only pass around the whole response, not sub-structures of it. This makes it easier for restructuring responses according to the Elasticsearch API. See [`ClusterStateResponse`](https://github.com/olivere/elastic/blob/release-branch.v3/cluster_state.go#L182) as an example. + +## Add offset to Histogram aggregation + +Histogram aggregations now have an [offset](https://github.com/elastic/elasticsearch/pull/9505) option. + +## Services + +### REST API specification + +As you might know, Elasticsearch comes with a REST API specification. The specification describes the endpoints in a JSON structure. + +Most services in Elastic predated the REST API specification. We are in the process of bringing all these services in line with the specification. Services can be generated by `go generate` (not 100% automatic though). This is an ongoing process. + +This probably doesn't mean a lot to you. However, you can now be more confident that Elastic supports all features that the REST API specification describes. + +At the same time, the file names of the services are renamed to match the REST API specification naming. + +### REST API Test Suite + +The REST API specification of Elasticsearch comes along with a test suite that official clients typically use to test for conformance. Up until now, Elastic didn't run this test suite. However, we are in the process of setting up infrastructure and tests to match this suite as well. + +This process in not completed though. + + diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/CONTRIBUTING.md b/services/templeton/vendor/src/github.com/olivere/elastic/CONTRIBUTING.md new file mode 100644 index 000000000..4fbc79dd0 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/CONTRIBUTING.md @@ -0,0 +1,40 @@ +# How to contribute + +Elastic is an open-source project and we are looking forward to each +contribution. + +Notice that while the [official Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) is rather good, it is a high-level +overview of the features of Elasticsearch. However, Elastic tries to resemble +the Java API of Elasticsearch which you can find [on GitHub](https://github.com/elastic/elasticsearch). + +This explains why you might think that some options are strange or missing +in Elastic, while often they're just different. Please check the Java API first. + +Having said that: Elasticsearch is moving fast and it might be very likely +that we missed some features or changes. Feel free to change that. + +## Your Pull Request + +To make it easy to review and understand your changes, please keep the +following things in mind before submitting your pull request: + +* You compared the existing implemenation with the Java API, did you? +* Please work on the latest possible state of `olivere/elastic`. + Use `release-branch.v2` for targeting Elasticsearch 1.x and + `release-branch.v3` for targeting 2.x. +* Create a branch dedicated to your change. +* If possible, write a test case which confirms your change. +* Make sure your changes and your tests work with all recent versions of + Elasticsearch. We currently support Elasticsearch 1.7.x in the + release-branch.v2 and Elasticsearch 2.x in the release-branch.v3. +* Test your changes before creating a pull request (`go test ./...`). +* Don't mix several features or bug fixes in one pull request. +* Create a meaningful commit message. +* Explain your change, e.g. provide a link to the issue you are fixing and + probably a link to the Elasticsearch documentation and/or source code. +* Format your source with `go fmt`. + +## Additional Resources + +* [GitHub documentation](http://help.github.com/) +* [GitHub pull request documentation](http://help.github.com/send-pull-requests/) diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/CONTRIBUTORS b/services/templeton/vendor/src/github.com/olivere/elastic/CONTRIBUTORS new file mode 100644 index 000000000..0743d2d15 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/CONTRIBUTORS @@ -0,0 +1,35 @@ +# This is a list of people who have contributed code +# to the Elastic repository. +# +# It is just my small "thank you" to all those that helped +# making Elastic what it is. +# +# Please keep this list sorted. + +Adam Alix [@adamalix](https://github.com/adamalix) +Adam Weiner [@adamweiner](https://github.com/adamweiner) +Alexey Sharov [@nizsheanez](https://github.com/nizsheanez) +Braden Bassingthwaite [@bbassingthwaite-va](https://github.com/bbassingthwaite-va) +Christophe Courtaut [@kri5](https://github.com/kri5) +Conrad Pankoff [@deoxxa](https://github.com/deoxxa) +Corey Scott [@corsc](https://github.com/corsc) +Daniel Heckrath [@DanielHeckrath](https://github.com/DanielHeckrath) +Gerhard Häring [@ghaering](https://github.com/ghaering) +Guilherme Silveira [@guilherme-santos](https://github.com/guilherme-santos) +Guillaume J. Charmes [@creack](https://github.com/creack) +Igor Dubinskiy [@idubinskiy](https://github.com/idubinskiy) +Isaac Saldana [@isaldana](https://github.com/isaldana) +Jack Lindamood [@cep21](https://github.com/cep21) +John Goodall [@jgoodall](https://github.com/jgoodall) +Junpei Tsuji [@jun06t](https://github.com/jun06t) +Maciej Lisiewski [@c2h5oh](https://github.com/c2h5oh) +Mara Kim [@autochthe](https://github.com/autochthe) +Medhi Bechina [@mdzor](https://github.com/mdzor) +Nicholas Wolff [@nwolff](https://github.com/nwolff) +Orne Brocaar [@brocaar](https://github.com/brocaar) +Sacheendra talluri [@sacheendra](https://github.com/sacheendra) +Sean DuBois [@Sean-Der](https://github.com/Sean-Der) +Shalin LK [@shalinlk](https://github.com/shalinlk) +Sundar [@sundarv85](https://github.com/sundarv85) +Tetsuya Morimoto [@t2y](https://github.com/t2y) +zakthomas [@zakthomas](https://github.com/zakthomas) diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/LICENSE b/services/templeton/vendor/src/github.com/olivere/elastic/LICENSE new file mode 100644 index 000000000..8b22cdb60 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) +Copyright © 2012-2015 Oliver Eilhard + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the “Software”), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/README.md b/services/templeton/vendor/src/github.com/olivere/elastic/README.md new file mode 100644 index 000000000..eefd530df --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/README.md @@ -0,0 +1,415 @@ +# Elastic + +Elastic is an [Elasticsearch](http://www.elasticsearch.org/) client for the +[Go](http://www.golang.org/) programming language. + +[![Build Status](https://travis-ci.org/olivere/elastic.svg?branch=release-branch.v3)](https://travis-ci.org/olivere/elastic) +[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](http://godoc.org/gopkg.in/olivere/elastic.v3) +[![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/olivere/elastic/master/LICENSE) + +See the [wiki](https://github.com/olivere/elastic/wiki) for additional information about Elastic. + + +## Releases + +**The release branches (e.g. [`release-branch.v3`](https://github.com/olivere/elastic/tree/release-branch.v3)) are actively being worked on and can break at any time. If you want to use stable versions of Elastic, please use the packages released via [gopkg.in](https://gopkg.in).** + +Here's the version matrix: + +Elasticsearch version | Elastic version -| Package URL +----------------------|------------------|------------ +2.x | 3.0 | [`gopkg.in/olivere/elastic.v3`](https://gopkg.in/olivere/elastic.v3) ([source](https://github.com/olivere/elastic/tree/release-branch.v3) [doc](http://godoc.org/gopkg.in/olivere/elastic.v3)) +1.x | 2.0 | [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2) ([source](https://github.com/olivere/elastic/tree/release-branch.v2) [doc](http://godoc.org/gopkg.in/olivere/elastic.v2)) +0.9-1.3 | 1.0 | [`gopkg.in/olivere/elastic.v1`](https://gopkg.in/olivere/elastic.v1) ([source](https://github.com/olivere/elastic/tree/release-branch.v1) [doc](http://godoc.org/gopkg.in/olivere/elastic.v1)) + +**Example:** + +You have installed Elasticsearch 2.1.1 and want to use Elastic. As listed above, you should use Elastic 3.0. So you first install the stable release of Elastic 3.0 from gopkg.in. + +```sh +$ go get gopkg.in/olivere/elastic.v3 +``` + +You then import it with this import path: + +```go +import "gopkg.in/olivere/elastic.v3" +``` + +### Elastic 3.0 + +Elastic 3.0 targets Elasticsearch 2.0 and later. Elasticsearch 2.0.0 was [released on 28th October 2015](https://www.elastic.co/blog/elasticsearch-2-0-0-released). + +Notice that there are a lot of [breaking changes in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/breaking-changes-2.0.html) and we used this as an opportunity to [clean up and refactor Elastic as well](https://github.com/olivere/elastic/blob/release-branch.v3/CHANGELOG-3.0.md). + +### Elastic 2.0 + +Elastic 2.0 targets Elasticsearch 1.x and is published via [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2). + +### Elastic 1.0 + +Elastic 1.0 is deprecated. You should really update Elasticsearch and Elastic +to a recent version. + +However, if you cannot update for some reason, don't worry. Version 1.0 is +still available. All you need to do is go-get it and change your import path +as described above. + + +## Status + +We use Elastic in production since 2012. Elastic is stable but the API changes +now and then. We strive for API compatibility. +However, Elasticsearch sometimes introduces [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes.html) +and we sometimes have to adapt. + +Having said that, there have been no big API changes that required you +to rewrite your application big time. More often than not it's renaming APIs +and adding/removing features so that Elastic is in sync with Elasticsearch. + +Elastic has been used in production with the following Elasticsearch versions: +0.90, 1.0-1.7. Furthermore, we use [Travis CI](https://travis-ci.org/) +to test Elastic with the most recent versions of Elasticsearch and Go. +See the [.travis.yml](https://github.com/olivere/elastic/blob/master/.travis.yml) +file for the exact matrix and [Travis](https://travis-ci.org/olivere/elastic) +for the results. + +Elasticsearch has quite a few features. Most of them are implemented +by Elastic. I add features and APIs as required. It's straightforward +to implement missing pieces. I'm accepting pull requests :-) + +Having said that, I hope you find the project useful. + + +## Getting Started + +The first thing you do is to create a [Client](https://github.com/olivere/elastic/blob/master/client.go). The client connects to Elasticsearch on `http://127.0.0.1:9200` by default. + +You typically create one client for your app. Here's a complete example of +creating a client, creating an index, adding a document, executing a search etc. + +```go +// Create a client +client, err := elastic.NewClient() +if err != nil { + // Handle error +} + +// Create an index +_, err = client.CreateIndex("twitter").Do() +if err != nil { + // Handle error + panic(err) +} + +// Add a document to the index +tweet := Tweet{User: "olivere", Message: "Take Five"} +_, err = client.Index(). + Index("twitter"). + Type("tweet"). + Id("1"). + BodyJson(tweet). + Do() +if err != nil { + // Handle error + panic(err) +} + +// Search with a term query +termQuery := elastic.NewTermQuery("user", "olivere") +searchResult, err := client.Search(). + Index("twitter"). // search in index "twitter" + Query(termQuery). // specify the query + Sort("user", true). // sort by "user" field, ascending + From(0).Size(10). // take documents 0-9 + Pretty(true). // pretty print request and response JSON + Do() // execute +if err != nil { + // Handle error + panic(err) +} + +// searchResult is of type SearchResult and returns hits, suggestions, +// and all kinds of other information from Elasticsearch. +fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis) + +// Each is a convenience function that iterates over hits in a search result. +// It makes sure you don't need to check for nil values in the response. +// However, it ignores errors in serialization. If you want full control +// over iterating the hits, see below. +var ttyp Tweet +for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) { + if t, ok := item.(Tweet); ok { + fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } +} +// TotalHits is another convenience function that works even when something goes wrong. +fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits()) + +// Here's how you iterate through results with full control over each step. +if searchResult.Hits != nil { + fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits) + + // Iterate through results + for _, hit := range searchResult.Hits.Hits { + // hit.Index contains the name of the index + + // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}). + var t Tweet + err := json.Unmarshal(*hit.Source, &t) + if err != nil { + // Deserialization failed + } + + // Work with tweet + fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } +} else { + // No hits + fmt.Print("Found no tweets\n") +} + +// Delete the index again +_, err = client.DeleteIndex("twitter").Do() +if err != nil { + // Handle error + panic(err) +} +``` + +See the [wiki](https://github.com/olivere/elastic/wiki) for more details. + + +## API Status + +### Document APIs + +- [x] Index API +- [x] Get API +- [x] Delete API +- [x] Update API +- [x] Multi Get API +- [x] Bulk API +- [x] Delete By Query API +- [x] Term Vectors +- [ ] Multi termvectors API + +### Search APIs + +- [x] Search +- [x] Search Template +- [ ] Search Shards API +- [x] Suggesters + - [x] Term Suggester + - [x] Phrase Suggester + - [x] Completion Suggester + - [x] Context Suggester +- [x] Multi Search API +- [x] Count API +- [ ] Search Exists API +- [ ] Validate API +- [x] Explain API +- [x] Percolator API +- [ ] Field Stats API + +### Aggregations + +- Metrics Aggregations + - [x] Avg + - [x] Cardinality + - [x] Extended Stats + - [x] Geo Bounds + - [x] Max + - [x] Min + - [x] Percentiles + - [x] Percentile Ranks + - [ ] Scripted Metric + - [x] Stats + - [x] Sum + - [x] Top Hits + - [x] Value Count +- Bucket Aggregations + - [x] Children + - [x] Date Histogram + - [x] Date Range + - [x] Filter + - [x] Filters + - [x] Geo Distance + - [ ] GeoHash Grid + - [x] Global + - [x] Histogram + - [x] IPv4 Range + - [x] Missing + - [x] Nested + - [x] Range + - [x] Reverse Nested + - [x] Sampler + - [x] Significant Terms + - [x] Terms +- Pipeline Aggregations + - [x] Avg Bucket + - [x] Derivative + - [x] Max Bucket + - [x] Min Bucket + - [x] Sum Bucket + - [x] Moving Average + - [x] Cumulative Sum + - [x] Bucket Script + - [x] Bucket Selector + - [x] Serial Differencing +- [x] Aggregation Metadata + +### Indices APIs + +- [x] Create Index +- [x] Delete Index +- [x] Get Index +- [x] Indices Exists +- [x] Open / Close Index +- [x] Put Mapping +- [x] Get Mapping +- [ ] Get Field Mapping +- [ ] Types Exists +- [x] Index Aliases +- [x] Update Indices Settings +- [x] Get Settings +- [ ] Analyze +- [x] Index Templates +- [x] Warmers +- [x] Indices Stats +- [ ] Indices Segments +- [ ] Indices Recovery +- [ ] Clear Cache +- [x] Flush +- [x] Refresh +- [x] Optimize +- [ ] Shadow Replica Indices +- [ ] Upgrade + +### cat APIs + +The cat APIs are not implemented as of now. We think they are better suited for operating with Elasticsearch on the command line. + +- [ ] cat aliases +- [ ] cat allocation +- [ ] cat count +- [ ] cat fielddata +- [ ] cat health +- [ ] cat indices +- [ ] cat master +- [ ] cat nodes +- [ ] cat pending tasks +- [ ] cat plugins +- [ ] cat recovery +- [ ] cat thread pool +- [ ] cat shards +- [ ] cat segments + +### Cluster APIs + +- [x] Cluster Health +- [x] Cluster State +- [x] Cluster Stats +- [ ] Pending Cluster Tasks +- [ ] Cluster Reroute +- [ ] Cluster Update Settings +- [ ] Nodes Stats +- [x] Nodes Info +- [ ] Nodes hot_threads + +### Query DSL + +- [x] Match All Query +- [x] Inner hits +- Full text queries + - [x] Match Query + - [x] Multi Match Query + - [x] Common Terms Query + - [x] Query String Query + - [x] Simple Query String Query +- Term level queries + - [x] Term Query + - [x] Terms Query + - [x] Range Query + - [x] Exists Query + - [x] Missing Query + - [x] Prefix Query + - [x] Wildcard Query + - [x] Regexp Query + - [x] Fuzzy Query + - [x] Type Query + - [x] Ids Query +- Compound queries + - [x] Constant Score Query + - [x] Bool Query + - [x] Dis Max Query + - [x] Function Score Query + - [x] Boosting Query + - [x] Indices Query + - [x] And Query (deprecated) + - [x] Not Query + - [x] Or Query (deprecated) + - [ ] Filtered Query (deprecated) + - [ ] Limit Query (deprecated) +- Joining queries + - [x] Nested Query + - [x] Has Child Query + - [x] Has Parent Query +- Geo queries + - [ ] GeoShape Query + - [x] Geo Bounding Box Query + - [x] Geo Distance Query + - [ ] Geo Distance Range Query + - [x] Geo Polygon Query + - [ ] Geohash Cell Query +- Specialized queries + - [x] More Like This Query + - [x] Template Query + - [x] Script Query +- Span queries + - [ ] Span Term Query + - [ ] Span Multi Term Query + - [ ] Span First Query + - [ ] Span Near Query + - [ ] Span Or Query + - [ ] Span Not Query + - [ ] Span Containing Query + - [ ] Span Within Query + +### Modules + +- [ ] Snapshot and Restore + +### Sorting + +- [x] Sort by score +- [x] Sort by field +- [x] Sort by geo distance +- [x] Sort by script + +### Scan + +Scrolling through documents (e.g. `search_type=scan`) are implemented via +the `Scroll` and `Scan` services. The `ClearScroll` API is implemented as well. + + +## How to contribute + +Read [the contribution guidelines](https://github.com/olivere/elastic/blob/master/CONTRIBUTING.md). + +## Credits + +Thanks a lot for the great folks working hard on +[Elasticsearch](http://www.elasticsearch.org/) +and +[Go](http://www.golang.org/). + +Elastic uses portions of the +[uritemplates](https://github.com/jtacoma/uritemplates) library +by Joshua Tacoma and +[backoff](https://github.com/cenkalti/backoff) by Cenk Altı. + +## LICENSE + +MIT-LICENSE. See [LICENSE](http://olivere.mit-license.org/) +or the LICENSE file provided in the repository for details. diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/backoff/LICENSE b/services/templeton/vendor/src/github.com/olivere/elastic/backoff/LICENSE new file mode 100644 index 000000000..f6f2dcc97 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/backoff/LICENSE @@ -0,0 +1,22 @@ +Portions of this code rely on this LICENSE: + +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/backoff/backoff.go b/services/templeton/vendor/src/github.com/olivere/elastic/backoff/backoff.go new file mode 100644 index 000000000..f6d7ad9a0 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/backoff/backoff.go @@ -0,0 +1,159 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package backoff + +import ( + "math" + "math/rand" + "sync" + "sync/atomic" + "time" +) + +// Backoff is an interface for different types of backoff algorithms. +type Backoff interface { + Next() time.Duration + Reset() +} + +// Stop is used as a signal to indicate that no more retries should be made. +const Stop time.Duration = -1 + +// -- Simple Backoff -- + +// SimpleBackoff takes a list of fixed values for backoff intervals. +// Each call to Next returns the next value from that fixed list. +// After each value is returned, subsequent calls to Next will only return +// the last element. The caller may specify if the values are "jittered". +type SimpleBackoff struct { + sync.Mutex + ticks []int + index int + jitter bool + stop bool +} + +// NewSimpleBackoff creates a SimpleBackoff algorithm with the specified +// list of fixed intervals in milliseconds. +func NewSimpleBackoff(ticks ...int) *SimpleBackoff { + return &SimpleBackoff{ + ticks: ticks, + index: 0, + jitter: false, + stop: false, + } +} + +// Jitter, when set, randomizes to return a value of [0.5*value .. 1.5*value]. +func (b *SimpleBackoff) Jitter(doJitter bool) *SimpleBackoff { + b.Lock() + defer b.Unlock() + b.jitter = doJitter + return b +} + +// SendStop, when enables, makes Next to return Stop once +// the list of values is exhausted. +func (b *SimpleBackoff) SendStop(doStop bool) *SimpleBackoff { + b.Lock() + defer b.Unlock() + b.stop = doStop + return b +} + +// Next returns the next wait interval. +func (b *SimpleBackoff) Next() time.Duration { + b.Lock() + defer b.Unlock() + + i := b.index + if i >= len(b.ticks) { + if b.stop { + return Stop + } + i = len(b.ticks) - 1 + b.index = i + } else { + b.index++ + } + + ms := b.ticks[i] + if b.jitter { + ms = jitter(ms) + } + return time.Duration(ms) * time.Millisecond +} + +// Reset resets SimpleBackoff. +func (b *SimpleBackoff) Reset() { + b.Lock() + b.index = 0 + b.Unlock() +} + +// jitter randomizes the interval to return a value of [0.5*millis .. 1.5*millis]. +func jitter(millis int) int { + if millis <= 0 { + return 0 + } + return millis/2 + rand.Intn(millis) +} + +// -- Exponential -- + +// ExponentialBackoff implements the simple exponential backoff described by +// Douglas Thain at http://dthain.blogspot.de/2009/02/exponential-backoff-in-distributed.html. +type ExponentialBackoff struct { + sync.Mutex + t float64 // initial timeout (in msec) + f float64 // exponential factor (e.g. 2) + m float64 // maximum timeout (in msec) + n int64 // number of retries + stop bool // indicates whether Next should send "Stop" whan max timeout is reached +} + +// NewExponentialBackoff returns a ExponentialBackoff backoff policy. +// Use initialTimeout to set the first/minimal interval +// and maxTimeout to set the maximum wait interval. +func NewExponentialBackoff(initialTimeout, maxTimeout time.Duration) *ExponentialBackoff { + return &ExponentialBackoff{ + t: float64(int64(initialTimeout / time.Millisecond)), + f: 2.0, + m: float64(int64(maxTimeout / time.Millisecond)), + n: 0, + stop: false, + } +} + +// SendStop, when enables, makes Next to return Stop once +// the maximum timeout is reached. +func (b *ExponentialBackoff) SendStop(doStop bool) *ExponentialBackoff { + b.Lock() + defer b.Unlock() + b.stop = doStop + return b +} + +// Next returns the next wait interval. +func (t *ExponentialBackoff) Next() time.Duration { + t.Lock() + defer t.Unlock() + + n := float64(atomic.AddInt64(&t.n, 1)) + r := 1.0 + rand.Float64() // random number in [1..2] + m := math.Min(r*t.t*math.Pow(t.f, n), t.m) + if t.stop && m >= t.m { + return Stop + } + d := time.Duration(int64(m)) * time.Millisecond + return d +} + +// Reset resets the backoff policy so that it can be reused. +func (t *ExponentialBackoff) Reset() { + t.Lock() + t.n = 0 + t.Unlock() +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/backoff/backoff_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/backoff/backoff_test.go new file mode 100644 index 000000000..9b5bcf0e1 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/backoff/backoff_test.go @@ -0,0 +1,146 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package backoff + +import ( + "math/rand" + "testing" + "time" +) + +func TestSimpleBackoff(t *testing.T) { + b := NewSimpleBackoff(1, 2, 7) + + if got, want := b.Next(), time.Duration(1)*time.Millisecond; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := b.Next(), time.Duration(2)*time.Millisecond; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := b.Next(), time.Duration(7)*time.Millisecond; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := b.Next(), time.Duration(7)*time.Millisecond; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + + b.Reset() + + if got, want := b.Next(), time.Duration(1)*time.Millisecond; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := b.Next(), time.Duration(2)*time.Millisecond; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := b.Next(), time.Duration(7)*time.Millisecond; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := b.Next(), time.Duration(7)*time.Millisecond; got != want { + t.Errorf("expected %v; got: %v", want, got) + } +} + +func TestSimpleBackoffWithStop(t *testing.T) { + b := NewSimpleBackoff(1, 2, 7).SendStop(true) + + // It should eventually return Stop (-1) after some loops. + var last time.Duration + for i := 0; i < 10; i++ { + last = b.Next() + if last == Stop { + break + } + } + if got, want := last, Stop; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + + b.Reset() + + // It should eventually return Stop (-1) after some loops. + for i := 0; i < 10; i++ { + last = b.Next() + if last == Stop { + break + } + } + if got, want := last, Stop; got != want { + t.Errorf("expected %v; got: %v", want, got) + } +} + +func TestExponentialBackoff(t *testing.T) { + rand.Seed(time.Now().UnixNano()) + + min := time.Duration(8) * time.Millisecond + max := time.Duration(256) * time.Millisecond + b := NewExponentialBackoff(min, max) + + between := func(value time.Duration, a, b int) bool { + x := int(value / time.Millisecond) + return a <= x && x <= b + } + + if got := b.Next(); !between(got, 8, 256) { + t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) + } + if got := b.Next(); !between(got, 8, 256) { + t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) + } + if got := b.Next(); !between(got, 8, 256) { + t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) + } + if got := b.Next(); !between(got, 8, 256) { + t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) + } + + b.Reset() + + if got := b.Next(); !between(got, 8, 256) { + t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) + } + if got := b.Next(); !between(got, 8, 256) { + t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) + } + if got := b.Next(); !between(got, 8, 256) { + t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) + } + if got := b.Next(); !between(got, 8, 256) { + t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) + } +} + +func TestExponentialBackoffWithStop(t *testing.T) { + rand.Seed(time.Now().UnixNano()) + + min := time.Duration(8) * time.Millisecond + max := time.Duration(256) * time.Millisecond + b := NewExponentialBackoff(min, max).SendStop(true) + + // It should eventually return Stop (-1) after some loops. + var last time.Duration + for i := 0; i < 10; i++ { + last = b.Next() + if last == Stop { + break + } + } + if got, want := last, Stop; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + + b.Reset() + + // It should eventually return Stop (-1) after some loops. + for i := 0; i < 10; i++ { + last = b.Next() + if last == Stop { + break + } + } + if got, want := last, Stop; got != want { + t.Errorf("expected %v; got: %v", want, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/backoff/retry.go b/services/templeton/vendor/src/github.com/olivere/elastic/backoff/retry.go new file mode 100644 index 000000000..701e03ccc --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/backoff/retry.go @@ -0,0 +1,53 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +// This file is (c) 2014 Cenk Altı and governed by the MIT license. +// See https://github.com/cenkalti/backoff for original source. + +package backoff + +import "time" + +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +// Retry the function f until it does not return error or BackOff stops. +// f is guaranteed to be run at least once. +// It is the caller's responsibility to reset b after Retry returns. +// +// Retry sleeps the goroutine for the duration returned by BackOff after a +// failed operation returns. +func Retry(o Operation, b Backoff) error { return RetryNotify(o, b, nil) } + +// RetryNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func RetryNotify(operation Operation, b Backoff, notify Notify) error { + var err error + var next time.Duration + + b.Reset() + for { + if err = operation(); err == nil { + return nil + } + + if next = b.Next(); next == Stop { + return err + } + + if notify != nil { + notify(err, next) + } + + time.Sleep(next) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/backoff/retry_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/backoff/retry_test.go new file mode 100644 index 000000000..0dd45404b --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/backoff/retry_test.go @@ -0,0 +1,44 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +// This file is (c) 2014 Cenk Altı and governed by the MIT license. +// See https://github.com/cenkalti/backoff for original source. + +package backoff + +import ( + "errors" + "log" + "testing" + "time" +) + +func TestRetry(t *testing.T) { + const successOn = 3 + var i = 0 + + // This function is successfull on "successOn" calls. + f := func() error { + i++ + log.Printf("function is called %d. time\n", i) + + if i == successOn { + log.Println("OK") + return nil + } + + log.Println("error") + return errors.New("error") + } + + min := time.Duration(8) * time.Millisecond + max := time.Duration(256) * time.Millisecond + err := Retry(f, NewExponentialBackoff(min, max).SendStop(true)) + if err != nil { + t.Errorf("unexpected error: %s", err.Error()) + } + if i != successOn { + t.Errorf("invalid number of retries: %d", i) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/bulk.go b/services/templeton/vendor/src/github.com/olivere/elastic/bulk.go new file mode 100644 index 000000000..91c7a9c17 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/bulk.go @@ -0,0 +1,314 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +type BulkService struct { + client *Client + + index string + _type string + requests []BulkableRequest + //replicationType string + //consistencyLevel string + timeout string + refresh *bool + pretty bool + + sizeInBytes int64 +} + +func NewBulkService(client *Client) *BulkService { + builder := &BulkService{ + client: client, + requests: make([]BulkableRequest, 0), + } + return builder +} + +func (s *BulkService) reset() { + s.requests = make([]BulkableRequest, 0) + s.sizeInBytes = 0 +} + +func (s *BulkService) Index(index string) *BulkService { + s.index = index + return s +} + +func (s *BulkService) Type(_type string) *BulkService { + s._type = _type + return s +} + +func (s *BulkService) Timeout(timeout string) *BulkService { + s.timeout = timeout + return s +} + +func (s *BulkService) Refresh(refresh bool) *BulkService { + s.refresh = &refresh + return s +} + +func (s *BulkService) Pretty(pretty bool) *BulkService { + s.pretty = pretty + return s +} + +func (s *BulkService) Add(r BulkableRequest) *BulkService { + s.requests = append(s.requests, r) + s.sizeInBytes += s.estimateSizeInBytes(r) + return s +} + +func (s *BulkService) EstimatedSizeInBytes() int64 { + return s.sizeInBytes +} + +func (s *BulkService) estimateSizeInBytes(r BulkableRequest) int64 { + // +1 for the \n + return int64(1 + len([]byte(r.String()))) +} + +func (s *BulkService) NumberOfActions() int { + return len(s.requests) +} + +func (s *BulkService) bodyAsString() (string, error) { + buf := bytes.NewBufferString("") + + for _, req := range s.requests { + source, err := req.Source() + if err != nil { + return "", err + } + for _, line := range source { + _, err := buf.WriteString(fmt.Sprintf("%s\n", line)) + if err != nil { + return "", nil + } + } + } + + return buf.String(), nil +} + +func (s *BulkService) Do() (*BulkResponse, error) { + // No actions? + if s.NumberOfActions() == 0 { + return nil, errors.New("elastic: No bulk actions to commit") + } + + // Get body + body, err := s.bodyAsString() + if err != nil { + return nil, err + } + + // Build url + path := "/" + if s.index != "" { + index, err := uritemplates.Expand("{index}", map[string]string{ + "index": s.index, + }) + if err != nil { + return nil, err + } + path += index + "/" + } + if s._type != "" { + typ, err := uritemplates.Expand("{type}", map[string]string{ + "type": s._type, + }) + if err != nil { + return nil, err + } + path += typ + "/" + } + path += "_bulk" + + // Parameters + params := make(url.Values) + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + if s.refresh != nil { + params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + + // Get response + res, err := s.client.PerformRequest("POST", path, params, body) + if err != nil { + return nil, err + } + + // Return results + ret := new(BulkResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + + // Reset so the request can be reused + s.reset() + + return ret, nil +} + +// BulkResponse is a response to a bulk execution. +// +// Example: +// { +// "took":3, +// "errors":false, +// "items":[{ +// "index":{ +// "_index":"index1", +// "_type":"tweet", +// "_id":"1", +// "_version":3, +// "status":201 +// } +// },{ +// "index":{ +// "_index":"index2", +// "_type":"tweet", +// "_id":"2", +// "_version":3, +// "status":200 +// } +// },{ +// "delete":{ +// "_index":"index1", +// "_type":"tweet", +// "_id":"1", +// "_version":4, +// "status":200, +// "found":true +// } +// },{ +// "update":{ +// "_index":"index2", +// "_type":"tweet", +// "_id":"2", +// "_version":4, +// "status":200 +// } +// }] +// } +type BulkResponse struct { + Took int `json:"took,omitempty"` + Errors bool `json:"errors,omitempty"` + Items []map[string]*BulkResponseItem `json:"items,omitempty"` +} + +// BulkResponseItem is the result of a single bulk request. +type BulkResponseItem struct { + Index string `json:"_index,omitempty"` + Type string `json:"_type,omitempty"` + Id string `json:"_id,omitempty"` + Version int `json:"_version,omitempty"` + Status int `json:"status,omitempty"` + Found bool `json:"found,omitempty"` + Error *ErrorDetails `json:"error,omitempty"` +} + +// Indexed returns all bulk request results of "index" actions. +func (r *BulkResponse) Indexed() []*BulkResponseItem { + return r.ByAction("index") +} + +// Created returns all bulk request results of "create" actions. +func (r *BulkResponse) Created() []*BulkResponseItem { + return r.ByAction("create") +} + +// Updated returns all bulk request results of "update" actions. +func (r *BulkResponse) Updated() []*BulkResponseItem { + return r.ByAction("update") +} + +// Deleted returns all bulk request results of "delete" actions. +func (r *BulkResponse) Deleted() []*BulkResponseItem { + return r.ByAction("delete") +} + +// ByAction returns all bulk request results of a certain action, +// e.g. "index" or "delete". +func (r *BulkResponse) ByAction(action string) []*BulkResponseItem { + if r.Items == nil { + return nil + } + items := make([]*BulkResponseItem, 0) + for _, item := range r.Items { + if result, found := item[action]; found { + items = append(items, result) + } + } + return items +} + +// ById returns all bulk request results of a given document id, +// regardless of the action ("index", "delete" etc.). +func (r *BulkResponse) ById(id string) []*BulkResponseItem { + if r.Items == nil { + return nil + } + items := make([]*BulkResponseItem, 0) + for _, item := range r.Items { + for _, result := range item { + if result.Id == id { + items = append(items, result) + } + } + } + return items +} + +// Failed returns those items of a bulk response that have errors, +// i.e. those that don't have a status code between 200 and 299. +func (r *BulkResponse) Failed() []*BulkResponseItem { + if r.Items == nil { + return nil + } + errors := make([]*BulkResponseItem, 0) + for _, item := range r.Items { + for _, result := range item { + if !(result.Status >= 200 && result.Status <= 299) { + errors = append(errors, result) + } + } + } + return errors +} + +// Succeeded returns those items of a bulk response that have no errors, +// i.e. those have a status code between 200 and 299. +func (r *BulkResponse) Succeeded() []*BulkResponseItem { + if r.Items == nil { + return nil + } + succeeded := make([]*BulkResponseItem, 0) + for _, item := range r.Items { + for _, result := range item { + if result.Status >= 200 && result.Status <= 299 { + succeeded = append(succeeded, result) + } + } + } + return succeeded +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_delete_request.go b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_delete_request.go new file mode 100644 index 000000000..0ea372209 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_delete_request.go @@ -0,0 +1,112 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "strings" +) + +// -- Bulk delete request -- + +// Bulk request to remove document from Elasticsearch. +type BulkDeleteRequest struct { + BulkableRequest + index string + typ string + id string + routing string + refresh *bool + version int64 // default is MATCH_ANY + versionType string // default is "internal" +} + +func NewBulkDeleteRequest() *BulkDeleteRequest { + return &BulkDeleteRequest{} +} + +func (r *BulkDeleteRequest) Index(index string) *BulkDeleteRequest { + r.index = index + return r +} + +func (r *BulkDeleteRequest) Type(typ string) *BulkDeleteRequest { + r.typ = typ + return r +} + +func (r *BulkDeleteRequest) Id(id string) *BulkDeleteRequest { + r.id = id + return r +} + +func (r *BulkDeleteRequest) Routing(routing string) *BulkDeleteRequest { + r.routing = routing + return r +} + +func (r *BulkDeleteRequest) Refresh(refresh bool) *BulkDeleteRequest { + r.refresh = &refresh + return r +} + +func (r *BulkDeleteRequest) Version(version int64) *BulkDeleteRequest { + r.version = version + return r +} + +// VersionType can be "internal" (default), "external", "external_gte", +// "external_gt", or "force". +func (r *BulkDeleteRequest) VersionType(versionType string) *BulkDeleteRequest { + r.versionType = versionType + return r +} + +func (r *BulkDeleteRequest) String() string { + lines, err := r.Source() + if err == nil { + return strings.Join(lines, "\n") + } + return fmt.Sprintf("error: %v", err) +} + +func (r *BulkDeleteRequest) Source() ([]string, error) { + lines := make([]string, 1) + + source := make(map[string]interface{}) + deleteCommand := make(map[string]interface{}) + if r.index != "" { + deleteCommand["_index"] = r.index + } + if r.typ != "" { + deleteCommand["_type"] = r.typ + } + if r.id != "" { + deleteCommand["_id"] = r.id + } + if r.routing != "" { + deleteCommand["_routing"] = r.routing + } + if r.version > 0 { + deleteCommand["_version"] = r.version + } + if r.versionType != "" { + deleteCommand["_version_type"] = r.versionType + } + if r.refresh != nil { + deleteCommand["refresh"] = *r.refresh + } + source["delete"] = deleteCommand + + body, err := json.Marshal(source) + if err != nil { + return nil, err + } + + lines[0] = string(body) + + return lines, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_delete_request_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_delete_request_test.go new file mode 100644 index 000000000..73abfcd40 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_delete_request_test.go @@ -0,0 +1,42 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestBulkDeleteRequestSerialization(t *testing.T) { + tests := []struct { + Request BulkableRequest + Expected []string + }{ + // #0 + { + Request: NewBulkDeleteRequest().Index("index1").Type("tweet").Id("1"), + Expected: []string{ + `{"delete":{"_id":"1","_index":"index1","_type":"tweet"}}`, + }, + }, + } + + for i, test := range tests { + lines, err := test.Request.Source() + if err != nil { + t.Fatalf("case #%d: expected no error, got: %v", i, err) + } + if lines == nil { + t.Fatalf("case #%d: expected lines, got nil", i) + } + if len(lines) != len(test.Expected) { + t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines)) + } + for j, line := range lines { + if line != test.Expected[j] { + t.Errorf("case #%d: expected line #%d to be %s, got: %s", i, j, test.Expected[j], line) + } + } + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_index_request.go b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_index_request.go new file mode 100644 index 000000000..495694671 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_index_request.go @@ -0,0 +1,173 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Bulk request to add document to Elasticsearch. +type BulkIndexRequest struct { + BulkableRequest + index string + typ string + id string + opType string + routing string + parent string + timestamp string + ttl int64 + refresh *bool + version int64 // default is MATCH_ANY + versionType string // default is "internal" + doc interface{} +} + +func NewBulkIndexRequest() *BulkIndexRequest { + return &BulkIndexRequest{ + opType: "index", + } +} + +func (r *BulkIndexRequest) Index(index string) *BulkIndexRequest { + r.index = index + return r +} + +func (r *BulkIndexRequest) Type(typ string) *BulkIndexRequest { + r.typ = typ + return r +} + +func (r *BulkIndexRequest) Id(id string) *BulkIndexRequest { + r.id = id + return r +} + +func (r *BulkIndexRequest) OpType(opType string) *BulkIndexRequest { + r.opType = opType + return r +} + +func (r *BulkIndexRequest) Routing(routing string) *BulkIndexRequest { + r.routing = routing + return r +} + +func (r *BulkIndexRequest) Parent(parent string) *BulkIndexRequest { + r.parent = parent + return r +} + +func (r *BulkIndexRequest) Timestamp(timestamp string) *BulkIndexRequest { + r.timestamp = timestamp + return r +} + +func (r *BulkIndexRequest) Ttl(ttl int64) *BulkIndexRequest { + r.ttl = ttl + return r +} + +func (r *BulkIndexRequest) Refresh(refresh bool) *BulkIndexRequest { + r.refresh = &refresh + return r +} + +func (r *BulkIndexRequest) Version(version int64) *BulkIndexRequest { + r.version = version + return r +} + +func (r *BulkIndexRequest) VersionType(versionType string) *BulkIndexRequest { + r.versionType = versionType + return r +} + +func (r *BulkIndexRequest) Doc(doc interface{}) *BulkIndexRequest { + r.doc = doc + return r +} + +func (r *BulkIndexRequest) String() string { + lines, err := r.Source() + if err == nil { + return strings.Join(lines, "\n") + } + return fmt.Sprintf("error: %v", err) +} + +func (r *BulkIndexRequest) Source() ([]string, error) { + // { "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } } + // { "field1" : "value1" } + + lines := make([]string, 2) + + // "index" ... + command := make(map[string]interface{}) + indexCommand := make(map[string]interface{}) + if r.index != "" { + indexCommand["_index"] = r.index + } + if r.typ != "" { + indexCommand["_type"] = r.typ + } + if r.id != "" { + indexCommand["_id"] = r.id + } + if r.routing != "" { + indexCommand["_routing"] = r.routing + } + if r.parent != "" { + indexCommand["_parent"] = r.parent + } + if r.timestamp != "" { + indexCommand["_timestamp"] = r.timestamp + } + if r.ttl > 0 { + indexCommand["_ttl"] = r.ttl + } + if r.version > 0 { + indexCommand["_version"] = r.version + } + if r.versionType != "" { + indexCommand["_version_type"] = r.versionType + } + if r.refresh != nil { + indexCommand["refresh"] = *r.refresh + } + command[r.opType] = indexCommand + line, err := json.Marshal(command) + if err != nil { + return nil, err + } + lines[0] = string(line) + + // "field1" ... + if r.doc != nil { + switch t := r.doc.(type) { + default: + body, err := json.Marshal(r.doc) + if err != nil { + return nil, err + } + lines[1] = string(body) + case json.RawMessage: + lines[1] = string(t) + case *json.RawMessage: + lines[1] = string(*t) + case string: + lines[1] = t + case *string: + lines[1] = *t + } + } else { + lines[1] = "{}" + } + + return lines, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_index_request_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_index_request_test.go new file mode 100644 index 000000000..271347e30 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_index_request_test.go @@ -0,0 +1,63 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + "time" +) + +func TestBulkIndexRequestSerialization(t *testing.T) { + tests := []struct { + Request BulkableRequest + Expected []string + }{ + // #0 + { + Request: NewBulkIndexRequest().Index("index1").Type("tweet").Id("1"). + Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}), + Expected: []string{ + `{"index":{"_id":"1","_index":"index1","_type":"tweet"}}`, + `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`, + }, + }, + // #1 + { + Request: NewBulkIndexRequest().OpType("create").Index("index1").Type("tweet").Id("1"). + Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}), + Expected: []string{ + `{"create":{"_id":"1","_index":"index1","_type":"tweet"}}`, + `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`, + }, + }, + // #2 + { + Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("tweet").Id("1"). + Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}), + Expected: []string{ + `{"index":{"_id":"1","_index":"index1","_type":"tweet"}}`, + `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`, + }, + }, + } + + for i, test := range tests { + lines, err := test.Request.Source() + if err != nil { + t.Fatalf("case #%d: expected no error, got: %v", i, err) + } + if lines == nil { + t.Fatalf("case #%d: expected lines, got nil", i) + } + if len(lines) != len(test.Expected) { + t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines)) + } + for j, line := range lines { + if line != test.Expected[j] { + t.Errorf("case #%d: expected line #%d to be %s, got: %s", i, j, test.Expected[j], line) + } + } + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_processor.go b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_processor.go new file mode 100644 index 000000000..04492a47c --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_processor.go @@ -0,0 +1,515 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "sync" + "sync/atomic" + "time" + + "gopkg.in/olivere/elastic.v3/backoff" +) + +// BulkProcessorService allows to easily process bulk requests. It allows setting +// policies when to flush new bulk requests, e.g. based on a number of actions, +// on the size of the actions, and/or to flush periodically. It also allows +// to control the number of concurrent bulk requests allowed to be executed +// in parallel. +// +// BulkProcessorService, by default, commits either every 1000 requests or when the +// (estimated) size of the bulk requests exceeds 5 MB. However, it does not +// commit periodically. BulkProcessorService also does retry by default, using +// an exponential backoff algorithm. +// +// The caller is responsible for setting the index and type on every +// bulk request added to BulkProcessorService. +// +// BulkProcessorService takes ideas from the BulkProcessor of the +// Elasticsearch Java API as documented in +// https://www.elastic.co/guide/en/elasticsearch/client/java-api/current/java-docs-bulk-processor.html. +type BulkProcessorService struct { + c *Client + beforeFn BulkBeforeFunc + afterFn BulkAfterFunc + name string // name of processor + numWorkers int // # of workers (>= 1) + bulkActions int // # of requests after which to commit + bulkSize int // # of bytes after which to commit + flushInterval time.Duration // periodic flush interval + wantStats bool // indicates whether to gather statistics + initialTimeout time.Duration // initial wait time before retry on errors + maxTimeout time.Duration // max time to wait for retry on errors +} + +// NewBulkProcessorService creates a new BulkProcessorService. +func NewBulkProcessorService(client *Client) *BulkProcessorService { + return &BulkProcessorService{ + c: client, + numWorkers: 1, + bulkActions: 1000, + bulkSize: 5 << 20, // 5 MB + initialTimeout: time.Duration(200) * time.Millisecond, + maxTimeout: time.Duration(10000) * time.Millisecond, + } +} + +// BulkBeforeFunc defines the signature of callbacks that are executed +// before a commit to Elasticsearch. +type BulkBeforeFunc func(executionId int64, requests []BulkableRequest) + +// BulkAfterFunc defines the signature of callbacks that are executed +// after a commit to Elasticsearch. The err parameter signals an error. +type BulkAfterFunc func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) + +// Before specifies a function to be executed before bulk requests get comitted +// to Elasticsearch. +func (s *BulkProcessorService) Before(fn BulkBeforeFunc) *BulkProcessorService { + s.beforeFn = fn + return s +} + +// After specifies a function to be executed when bulk requests have been +// comitted to Elasticsearch. The After callback executes both when the +// commit was successful as well as on failures. +func (s *BulkProcessorService) After(fn BulkAfterFunc) *BulkProcessorService { + s.afterFn = fn + return s +} + +// Name is an optional name to identify this bulk processor. +func (s *BulkProcessorService) Name(name string) *BulkProcessorService { + s.name = name + return s +} + +// Workers is the number of concurrent workers allowed to be +// executed. Defaults to 1 and must be greater or equal to 1. +func (s *BulkProcessorService) Workers(num int) *BulkProcessorService { + s.numWorkers = num + return s +} + +// BulkActions specifies when to flush based on the number of actions +// currently added. Defaults to 1000 and can be set to -1 to be disabled. +func (s *BulkProcessorService) BulkActions(bulkActions int) *BulkProcessorService { + s.bulkActions = bulkActions + return s +} + +// BulkSize specifies when to flush based on the size (in bytes) of the actions +// currently added. Defaults to 5 MB and can be set to -1 to be disabled. +func (s *BulkProcessorService) BulkSize(bulkSize int) *BulkProcessorService { + s.bulkSize = bulkSize + return s +} + +// FlushInterval specifies when to flush at the end of the given interval. +// This is disabled by default. If you want the bulk processor to +// operate completely asynchronously, set both BulkActions and BulkSize to +// -1 and set the FlushInterval to a meaningful interval. +func (s *BulkProcessorService) FlushInterval(interval time.Duration) *BulkProcessorService { + s.flushInterval = interval + return s +} + +// Stats tells bulk processor to gather stats while running. +// Use Stats to return the stats. This is disabled by default. +func (s *BulkProcessorService) Stats(wantStats bool) *BulkProcessorService { + s.wantStats = wantStats + return s +} + +// Do creates a new BulkProcessor and starts it. +// Consider the BulkProcessor as a running instance that accepts bulk requests +// and commits them to Elasticsearch, spreading the work across one or more +// workers. +// +// You can interoperate with the BulkProcessor returned by Do, e.g. Start and +// Stop (or Close) it. +// +// Calling Do several times returns new BulkProcessors. You probably don't +// want to do this. BulkProcessorService implements just a builder pattern. +func (s *BulkProcessorService) Do() (*BulkProcessor, error) { + p := newBulkProcessor( + s.c, + s.beforeFn, + s.afterFn, + s.name, + s.numWorkers, + s.bulkActions, + s.bulkSize, + s.flushInterval, + s.wantStats, + s.initialTimeout, + s.maxTimeout) + + err := p.Start() + if err != nil { + return nil, err + } + return p, nil +} + +// -- Bulk Processor Statistics -- + +// BulkProcessorStats contains various statistics of a bulk processor +// while it is running. Use the Stats func to return it while running. +type BulkProcessorStats struct { + Flushed int64 // number of times the flush interval has been invoked + Committed int64 // # of times workers committed bulk requests + Indexed int64 // # of requests indexed + Created int64 // # of requests that ES reported as creates (201) + Updated int64 // # of requests that ES reported as updates + Deleted int64 // # of requests that ES reported as deletes + Succeeded int64 // # of requests that ES reported as successful + Failed int64 // # of requests that ES reported as failed + + Workers []*BulkProcessorWorkerStats // stats for each worker +} + +// BulkProcessorWorkerStats represents per-worker statistics. +type BulkProcessorWorkerStats struct { + Queued int64 // # of requests queued in this worker + LastDuration time.Duration // duration of last commit +} + +// newBulkProcessorStats initializes and returns a BulkProcessorStats struct. +func newBulkProcessorStats(workers int) *BulkProcessorStats { + stats := &BulkProcessorStats{ + Workers: make([]*BulkProcessorWorkerStats, workers), + } + for i := 0; i < workers; i++ { + stats.Workers[i] = &BulkProcessorWorkerStats{} + } + return stats +} + +// -- Bulk Processor -- + +// BulkProcessor encapsulates a task that accepts bulk requests and +// orchestrates committing them to Elasticsearch via one or more workers. +// +// BulkProcessor is returned by setting up a BulkProcessorService and +// calling the Do method. +type BulkProcessor struct { + c *Client + beforeFn BulkBeforeFunc + afterFn BulkAfterFunc + name string + bulkActions int + bulkSize int + numWorkers int + executionId int64 + requestsC chan BulkableRequest + workerWg sync.WaitGroup + workers []*bulkWorker + flushInterval time.Duration + flusherStopC chan struct{} + wantStats bool + initialTimeout time.Duration // initial wait time before retry on errors + maxTimeout time.Duration // max time to wait for retry on errors + + startedMu sync.Mutex // guards the following block + started bool + + statsMu sync.Mutex // guards the following block + stats *BulkProcessorStats +} + +func newBulkProcessor( + client *Client, + beforeFn BulkBeforeFunc, + afterFn BulkAfterFunc, + name string, + numWorkers int, + bulkActions int, + bulkSize int, + flushInterval time.Duration, + wantStats bool, + initialTimeout time.Duration, + maxTimeout time.Duration) *BulkProcessor { + return &BulkProcessor{ + c: client, + beforeFn: beforeFn, + afterFn: afterFn, + name: name, + numWorkers: numWorkers, + bulkActions: bulkActions, + bulkSize: bulkSize, + flushInterval: flushInterval, + wantStats: wantStats, + initialTimeout: initialTimeout, + maxTimeout: maxTimeout, + } +} + +// Start starts the bulk processor. If the processor is already started, +// nil is returned. +func (p *BulkProcessor) Start() error { + p.startedMu.Lock() + defer p.startedMu.Unlock() + + if p.started { + return nil + } + + // We must have at least one worker. + if p.numWorkers < 1 { + p.numWorkers = 1 + } + + p.requestsC = make(chan BulkableRequest) + p.executionId = 0 + p.stats = newBulkProcessorStats(p.numWorkers) + + // Create and start up workers. + p.workers = make([]*bulkWorker, p.numWorkers) + for i := 0; i < p.numWorkers; i++ { + p.workerWg.Add(1) + p.workers[i] = newBulkWorker(p, i) + go p.workers[i].work() + } + + // Start the ticker for flush (if enabled) + if int64(p.flushInterval) > 0 { + p.flusherStopC = make(chan struct{}) + go p.flusher(p.flushInterval) + } + + p.started = true + + return nil +} + +// Stop is an alias for Close. +func (p *BulkProcessor) Stop() error { + return p.Close() +} + +// Close stops the bulk processor previously started with Do. +// If it is already stopped, this is a no-op and nil is returned. +// +// By implementing Close, BulkProcessor implements the io.Closer interface. +func (p *BulkProcessor) Close() error { + p.startedMu.Lock() + defer p.startedMu.Unlock() + + // Already stopped? Do nothing. + if !p.started { + return nil + } + + // Stop flusher (if enabled) + if p.flusherStopC != nil { + p.flusherStopC <- struct{}{} + <-p.flusherStopC + close(p.flusherStopC) + p.flusherStopC = nil + } + + // Stop all workers. + close(p.requestsC) + p.workerWg.Wait() + + p.started = false + + return nil +} + +// Stats returns the latest bulk processor statistics. +// Collecting stats must be enabled first by calling Stats(true) on +// the service that created this processor. +func (p *BulkProcessor) Stats() BulkProcessorStats { + p.statsMu.Lock() + defer p.statsMu.Unlock() + return *p.stats +} + +// Add adds a single request to commit by the BulkProcessorService. +// +// The caller is responsible for setting the index and type on the request. +func (p *BulkProcessor) Add(request BulkableRequest) { + p.requestsC <- request +} + +// Flush manually asks all workers to commit their outstanding requests. +// It returns only when all workers acknowledge completion. +func (p *BulkProcessor) Flush() error { + p.statsMu.Lock() + p.stats.Flushed++ + p.statsMu.Unlock() + + for _, w := range p.workers { + w.flushC <- struct{}{} + <-w.flushAckC // wait for completion + } + return nil +} + +// flusher is a single goroutine that periodically asks all workers to +// commit their outstanding bulk requests. It is only started if +// FlushInterval is greater than 0. +func (p *BulkProcessor) flusher(interval time.Duration) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: // Periodic flush + p.Flush() // TODO swallow errors here? + + case <-p.flusherStopC: + p.flusherStopC <- struct{}{} + return + } + } +} + +// -- Bulk Worker -- + +// bulkWorker encapsulates a single worker, running in a goroutine, +// receiving bulk requests and eventually committing them to Elasticsearch. +// It is strongly bound to a BulkProcessor. +type bulkWorker struct { + p *BulkProcessor + i int + bulkActions int + bulkSize int + service *BulkService + flushC chan struct{} + flushAckC chan struct{} +} + +// newBulkWorker creates a new bulkWorker instance. +func newBulkWorker(p *BulkProcessor, i int) *bulkWorker { + return &bulkWorker{ + p: p, + i: i, + bulkActions: p.bulkActions, + bulkSize: p.bulkSize, + service: NewBulkService(p.c), + flushC: make(chan struct{}), + flushAckC: make(chan struct{}), + } +} + +// work waits for bulk requests and manual flush calls on the respective +// channels and is invoked as a goroutine when the bulk processor is started. +func (w *bulkWorker) work() { + defer func() { + w.p.workerWg.Done() + close(w.flushAckC) + close(w.flushC) + }() + + var stop bool + for !stop { + select { + case req, open := <-w.p.requestsC: + if open { + // Received a new request + w.service.Add(req) + if w.commitRequired() { + w.commit() // TODO swallow errors here? + } + } else { + // Channel closed: Stop. + stop = true + if w.service.NumberOfActions() > 0 { + w.commit() // TODO swallow errors here? + } + } + + case <-w.flushC: + // Commit outstanding requests + if w.service.NumberOfActions() > 0 { + w.commit() // TODO swallow errors here? + } + w.flushAckC <- struct{}{} + } + } +} + +// commit commits the bulk requests in the given service, +// invoking callbacks as specified. +func (w *bulkWorker) commit() error { + var res *BulkResponse + + // commitFunc will commit bulk requests and, on failure, be retried + // via exponential backoff + commitFunc := func() error { + var err error + res, err = w.service.Do() + return err + } + // notifyFunc will be called if retry fails + notifyFunc := func(err error, d time.Duration) { + w.p.c.errorf("elastic: bulk processor %q failed but will retry in %v: %v", w.p.name, d, err) + } + + id := atomic.AddInt64(&w.p.executionId, 1) + + // Update # documents in queue before eventual retries + w.p.statsMu.Lock() + if w.p.wantStats { + w.p.stats.Workers[w.i].Queued = int64(len(w.service.requests)) + } + w.p.statsMu.Unlock() + + // Invoke before callback + if w.p.beforeFn != nil { + w.p.beforeFn(id, w.service.requests) + } + + // Commit bulk requests + policy := backoff.NewExponentialBackoff(w.p.initialTimeout, w.p.maxTimeout).SendStop(true) + err := backoff.RetryNotify(commitFunc, policy, notifyFunc) + w.updateStats(res) + if err != nil { + w.p.c.errorf("elastic: bulk processor %q failed: %v", w.p.name, err) + } + + // Invoke after callback + if w.p.afterFn != nil { + w.p.afterFn(id, w.service.requests, res, err) + } + + return err +} + +func (w *bulkWorker) updateStats(res *BulkResponse) { + // Update stats + if res != nil { + w.p.statsMu.Lock() + if w.p.wantStats { + w.p.stats.Committed++ + if res != nil { + w.p.stats.Indexed += int64(len(res.Indexed())) + w.p.stats.Created += int64(len(res.Created())) + w.p.stats.Updated += int64(len(res.Updated())) + w.p.stats.Deleted += int64(len(res.Deleted())) + w.p.stats.Succeeded += int64(len(res.Succeeded())) + w.p.stats.Failed += int64(len(res.Failed())) + } + w.p.stats.Workers[w.i].Queued = int64(len(w.service.requests)) + w.p.stats.Workers[w.i].LastDuration = time.Duration(int64(res.Took)) * time.Millisecond + } + w.p.statsMu.Unlock() + } +} + +// commitRequired returns true if the service has to commit its +// bulk requests. This can be either because the number of actions +// or the estimated size in bytes is larger than specified in the +// BulkProcessorService. +func (w *bulkWorker) commitRequired() bool { + if w.bulkActions >= 0 && w.service.NumberOfActions() >= w.bulkActions { + return true + } + if w.bulkSize >= 0 && w.service.EstimatedSizeInBytes() >= int64(w.bulkSize) { + return true + } + return false +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_processor_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_processor_test.go new file mode 100644 index 000000000..645617b4d --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_processor_test.go @@ -0,0 +1,406 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "math/rand" + "sync/atomic" + "testing" + "time" +) + +func TestBulkProcessorDefaults(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + p := client.BulkProcessor() + if p == nil { + t.Fatalf("expected BulkProcessorService; got: %v", p) + } + if got, want := p.name, ""; got != want { + t.Errorf("expected %q; got: %q", want, got) + } + if got, want := p.numWorkers, 1; got != want { + t.Errorf("expected %d; got: %d", want, got) + } + if got, want := p.bulkActions, 1000; got != want { + t.Errorf("expected %d; got: %d", want, got) + } + if got, want := p.bulkSize, 5*1024*1024; got != want { + t.Errorf("expected %d; got: %d", want, got) + } + if got, want := p.flushInterval, time.Duration(0); got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := p.wantStats, false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } +} + +func TestBulkProcessorCommitOnBulkActions(t *testing.T) { + //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0))) + client := setupTestClientAndCreateIndex(t) + + testBulkProcessor(t, + 10000, + client.BulkProcessor(). + Name("Actions-1"). + Workers(1). + BulkActions(100). + BulkSize(-1), + ) + + testBulkProcessor(t, + 10000, + client.BulkProcessor(). + Name("Actions-2"). + Workers(2). + BulkActions(100). + BulkSize(-1), + ) +} + +func TestBulkProcessorCommitOnBulkSize(t *testing.T) { + //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0))) + client := setupTestClientAndCreateIndex(t) + + testBulkProcessor(t, + 10000, + client.BulkProcessor(). + Name("Size-1"). + Workers(1). + BulkActions(-1). + BulkSize(64*1024), + ) + + testBulkProcessor(t, + 10000, + client.BulkProcessor(). + Name("Size-2"). + Workers(2). + BulkActions(-1). + BulkSize(64*1024), + ) +} + +func TestBulkProcessorBasedOnFlushInterval(t *testing.T) { + //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0))) + client := setupTestClientAndCreateIndex(t) + + var beforeRequests int64 + var befores int64 + var afters int64 + var failures int64 + + beforeFn := func(executionId int64, requests []BulkableRequest) { + atomic.AddInt64(&beforeRequests, int64(len(requests))) + atomic.AddInt64(&befores, 1) + } + afterFn := func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) { + atomic.AddInt64(&afters, 1) + if err != nil { + atomic.AddInt64(&failures, 1) + } + } + + svc := client.BulkProcessor(). + Name("FlushInterval-1"). + Workers(2). + BulkActions(-1). + BulkSize(-1). + FlushInterval(1 * time.Second). + Before(beforeFn). + After(afterFn) + + p, err := svc.Do() + if err != nil { + t.Fatal(err) + } + + const numDocs = 1000 // low-enough number that flush should be invoked + + for i := 1; i <= numDocs; i++ { + tweet := tweet{User: "olivere", Message: fmt.Sprintf("%d. %s", i, randomString(rand.Intn(64)))} + request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet) + p.Add(request) + } + + // Should flush at least once + time.Sleep(2 * time.Second) + + err = p.Close() + if err != nil { + t.Fatal(err) + } + + if p.stats.Flushed == 0 { + t.Errorf("expected at least 1 flush; got: %d", p.stats.Flushed) + } + if got, want := beforeRequests, int64(numDocs); got != want { + t.Errorf("expected %d requests to before callback; got: %d", want, got) + } + if befores == 0 { + t.Error("expected at least 1 call to before callback") + } + if afters == 0 { + t.Error("expected at least 1 call to after callback") + } + if failures != 0 { + t.Errorf("expected 0 calls to failure callback; got: %d", failures) + } + + // Check number of documents that were bulk indexed + _, err = p.c.Flush(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + count, err := p.c.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if count != int64(numDocs) { + t.Fatalf("expected %d documents; got: %d", numDocs, count) + } +} + +func TestBulkProcessorClose(t *testing.T) { + //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0))) + client := setupTestClientAndCreateIndex(t) + + var beforeRequests int64 + var befores int64 + var afters int64 + var failures int64 + + beforeFn := func(executionId int64, requests []BulkableRequest) { + atomic.AddInt64(&beforeRequests, int64(len(requests))) + atomic.AddInt64(&befores, 1) + } + afterFn := func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) { + atomic.AddInt64(&afters, 1) + if err != nil { + atomic.AddInt64(&failures, 1) + } + } + + p, err := client.BulkProcessor(). + Name("FlushInterval-1"). + Workers(2). + BulkActions(-1). + BulkSize(-1). + FlushInterval(30 * time.Second). // 30 seconds to flush + Before(beforeFn).After(afterFn). + Do() + if err != nil { + t.Fatal(err) + } + + const numDocs = 1000 // low-enough number that flush should be invoked + + for i := 1; i <= numDocs; i++ { + tweet := tweet{User: "olivere", Message: fmt.Sprintf("%d. %s", i, randomString(rand.Intn(64)))} + request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet) + p.Add(request) + } + + // Should not flush because 30s > 1s + time.Sleep(1 * time.Second) + + // Close should flush + err = p.Close() + if err != nil { + t.Fatal(err) + } + + if p.stats.Flushed != 0 { + t.Errorf("expected no flush; got: %d", p.stats.Flushed) + } + if got, want := beforeRequests, int64(numDocs); got != want { + t.Errorf("expected %d requests to before callback; got: %d", want, got) + } + if befores == 0 { + t.Error("expected at least 1 call to before callback") + } + if afters == 0 { + t.Error("expected at least 1 call to after callback") + } + if failures != 0 { + t.Errorf("expected 0 calls to failure callback; got: %d", failures) + } + + // Check number of documents that were bulk indexed + _, err = p.c.Flush(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + count, err := p.c.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if count != int64(numDocs) { + t.Fatalf("expected %d documents; got: %d", numDocs, count) + } +} + +func TestBulkProcessorFlush(t *testing.T) { + //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0))) + client := setupTestClientAndCreateIndex(t) + + p, err := client.BulkProcessor(). + Name("ManualFlush"). + Workers(10). + BulkActions(-1). + BulkSize(-1). + FlushInterval(30 * time.Second). // 30 seconds to flush + Stats(true). + Do() + if err != nil { + t.Fatal(err) + } + + const numDocs = 100 + + for i := 1; i <= numDocs; i++ { + tweet := tweet{User: "olivere", Message: fmt.Sprintf("%d. %s", i, randomString(rand.Intn(64)))} + request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet) + p.Add(request) + } + + // Should not flush because 30s > 1s + time.Sleep(1 * time.Second) + + // No flush yet + stats := p.Stats() + if stats.Flushed != 0 { + t.Errorf("expected no flush; got: %d", p.stats.Flushed) + } + + // Manual flush + err = p.Flush() + if err != nil { + t.Fatal(err) + } + + time.Sleep(1 * time.Second) + + // Now flushed + stats = p.Stats() + if got, want := p.stats.Flushed, int64(1); got != want { + t.Errorf("expected %d flush; got: %d", want, got) + } + + // Close should not start another flush + err = p.Close() + if err != nil { + t.Fatal(err) + } + + // Still 1 flush + stats = p.Stats() + if got, want := p.stats.Flushed, int64(1); got != want { + t.Errorf("expected %d flush; got: %d", want, got) + } + + // Check number of documents that were bulk indexed + _, err = p.c.Flush(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + count, err := p.c.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if count != int64(numDocs) { + t.Fatalf("expected %d documents; got: %d", numDocs, count) + } +} + +// -- Helper -- + +func testBulkProcessor(t *testing.T, numDocs int, svc *BulkProcessorService) { + var beforeRequests int64 + var befores int64 + var afters int64 + var failures int64 + + beforeFn := func(executionId int64, requests []BulkableRequest) { + atomic.AddInt64(&beforeRequests, int64(len(requests))) + atomic.AddInt64(&befores, 1) + } + afterFn := func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) { + atomic.AddInt64(&afters, 1) + if err != nil { + atomic.AddInt64(&failures, 1) + } + } + + p, err := svc.Before(beforeFn).After(afterFn).Stats(true).Do() + if err != nil { + t.Fatal(err) + } + + for i := 1; i <= numDocs; i++ { + tweet := tweet{User: "olivere", Message: fmt.Sprintf("%07d. %s", i, randomString(1+rand.Intn(63)))} + request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet) + p.Add(request) + } + + err = p.Close() + if err != nil { + t.Fatal(err) + } + + stats := p.Stats() + + if stats.Flushed != 0 { + t.Errorf("expected no flush; got: %d", stats.Flushed) + } + if stats.Committed <= 0 { + t.Errorf("expected committed > %d; got: %d", 0, stats.Committed) + } + if got, want := stats.Indexed, int64(numDocs); got != want { + t.Errorf("expected indexed = %d; got: %d", want, got) + } + if got, want := stats.Created, int64(0); got != want { + t.Errorf("expected created = %d; got: %d", want, got) + } + if got, want := stats.Updated, int64(0); got != want { + t.Errorf("expected updated = %d; got: %d", want, got) + } + if got, want := stats.Deleted, int64(0); got != want { + t.Errorf("expected deleted = %d; got: %d", want, got) + } + if got, want := stats.Succeeded, int64(numDocs); got != want { + t.Errorf("expected succeeded = %d; got: %d", want, got) + } + if got, want := stats.Failed, int64(0); got != want { + t.Errorf("expected failed = %d; got: %d", want, got) + } + if got, want := beforeRequests, int64(numDocs); got != want { + t.Errorf("expected %d requests to before callback; got: %d", want, got) + } + if befores == 0 { + t.Error("expected at least 1 call to before callback") + } + if afters == 0 { + t.Error("expected at least 1 call to after callback") + } + if failures != 0 { + t.Errorf("expected 0 calls to failure callback; got: %d", failures) + } + + // Check number of documents that were bulk indexed + _, err = p.c.Flush(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + count, err := p.c.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if count != int64(numDocs) { + t.Fatalf("expected %d documents; got: %d", numDocs, count) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_request.go b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_request.go new file mode 100644 index 000000000..315b535ca --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_request.go @@ -0,0 +1,17 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" +) + +// -- Bulkable request (index/update/delete) -- + +// Generic interface to bulkable requests. +type BulkableRequest interface { + fmt.Stringer + Source() ([]string, error) +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_test.go new file mode 100644 index 000000000..7ce9053c8 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_test.go @@ -0,0 +1,463 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestBulk(t *testing.T) { + //client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."} + + index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1) + index2Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("2").Doc(tweet2) + delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1") + + bulkRequest := client.Bulk() + bulkRequest = bulkRequest.Add(index1Req) + bulkRequest = bulkRequest.Add(index2Req) + bulkRequest = bulkRequest.Add(delete1Req) + + if bulkRequest.NumberOfActions() != 3 { + t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 3, bulkRequest.NumberOfActions()) + } + + bulkResponse, err := bulkRequest.Do() + if err != nil { + t.Fatal(err) + } + if bulkResponse == nil { + t.Errorf("expected bulkResponse to be != nil; got nil") + } + + if bulkRequest.NumberOfActions() != 0 { + t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions()) + } + + // Document with Id="1" should not exist + exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do() + if err != nil { + t.Fatal(err) + } + if exists { + t.Errorf("expected exists %v; got %v", false, exists) + } + + // Document with Id="2" should exist + exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("2").Do() + if err != nil { + t.Fatal(err) + } + if !exists { + t.Errorf("expected exists %v; got %v", true, exists) + } + + // Update + updateDoc := struct { + Retweets int `json:"retweets"` + }{ + 42, + } + update1Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2").Doc(&updateDoc) + bulkRequest = client.Bulk() + bulkRequest = bulkRequest.Add(update1Req) + + if bulkRequest.NumberOfActions() != 1 { + t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 1, bulkRequest.NumberOfActions()) + } + + bulkResponse, err = bulkRequest.Do() + if err != nil { + t.Fatal(err) + } + if bulkResponse == nil { + t.Errorf("expected bulkResponse to be != nil; got nil") + } + + if bulkRequest.NumberOfActions() != 0 { + t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions()) + } + + // Document with Id="1" should have a retweets count of 42 + doc, err := client.Get().Index(testIndexName).Type("tweet").Id("2").Do() + if err != nil { + t.Fatal(err) + } + if doc == nil { + t.Fatal("expected doc to be != nil; got nil") + } + if !doc.Found { + t.Fatalf("expected doc to be found; got found = %v", doc.Found) + } + if doc.Source == nil { + t.Fatal("expected doc source to be != nil; got nil") + } + var updatedTweet tweet + err = json.Unmarshal(*doc.Source, &updatedTweet) + if err != nil { + t.Fatal(err) + } + if updatedTweet.Retweets != 42 { + t.Errorf("expected updated tweet retweets = %v; got %v", 42, updatedTweet.Retweets) + } + + // Update with script + update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2"). + RetryOnConflict(3). + Script(NewScript("ctx._source.retweets += v").Param("v", 1)) + bulkRequest = client.Bulk() + bulkRequest = bulkRequest.Add(update2Req) + if bulkRequest.NumberOfActions() != 1 { + t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 1, bulkRequest.NumberOfActions()) + } + bulkResponse, err = bulkRequest.Do() + if err != nil { + t.Fatal(err) + } + if bulkResponse == nil { + t.Errorf("expected bulkResponse to be != nil; got nil") + } + + if bulkRequest.NumberOfActions() != 0 { + t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions()) + } + + // Document with Id="1" should have a retweets count of 43 + doc, err = client.Get().Index(testIndexName).Type("tweet").Id("2").Do() + if err != nil { + t.Fatal(err) + } + if doc == nil { + t.Fatal("expected doc to be != nil; got nil") + } + if !doc.Found { + t.Fatalf("expected doc to be found; got found = %v", doc.Found) + } + if doc.Source == nil { + t.Fatal("expected doc source to be != nil; got nil") + } + err = json.Unmarshal(*doc.Source, &updatedTweet) + if err != nil { + t.Fatal(err) + } + if updatedTweet.Retweets != 43 { + t.Errorf("expected updated tweet retweets = %v; got %v", 43, updatedTweet.Retweets) + } +} + +func TestBulkWithIndexSetOnClient(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."} + + index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1) + index2Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("2").Doc(tweet2) + delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1") + + bulkRequest := client.Bulk().Index(testIndexName).Type("tweet") + bulkRequest = bulkRequest.Add(index1Req) + bulkRequest = bulkRequest.Add(index2Req) + bulkRequest = bulkRequest.Add(delete1Req) + + if bulkRequest.NumberOfActions() != 3 { + t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 3, bulkRequest.NumberOfActions()) + } + + bulkResponse, err := bulkRequest.Do() + if err != nil { + t.Fatal(err) + } + if bulkResponse == nil { + t.Errorf("expected bulkResponse to be != nil; got nil") + } + + // Document with Id="1" should not exist + exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do() + if err != nil { + t.Fatal(err) + } + if exists { + t.Errorf("expected exists %v; got %v", false, exists) + } + + // Document with Id="2" should exist + exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("2").Do() + if err != nil { + t.Fatal(err) + } + if !exists { + t.Errorf("expected exists %v; got %v", true, exists) + } +} + +func TestBulkRequestsSerialization(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."} + + index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1) + index2Req := NewBulkIndexRequest().OpType("create").Index(testIndexName).Type("tweet").Id("2").Doc(tweet2) + delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1") + update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2"). + Doc(struct { + Retweets int `json:"retweets"` + }{ + Retweets: 42, + }) + + bulkRequest := client.Bulk() + bulkRequest = bulkRequest.Add(index1Req) + bulkRequest = bulkRequest.Add(index2Req) + bulkRequest = bulkRequest.Add(delete1Req) + bulkRequest = bulkRequest.Add(update2Req) + + if bulkRequest.NumberOfActions() != 4 { + t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 4, bulkRequest.NumberOfActions()) + } + + expected := `{"index":{"_id":"1","_index":"` + testIndexName + `","_type":"tweet"}} +{"user":"olivere","message":"Welcome to Golang and Elasticsearch.","retweets":0,"created":"0001-01-01T00:00:00Z"} +{"create":{"_id":"2","_index":"` + testIndexName + `","_type":"tweet"}} +{"user":"sandrae","message":"Dancing all night long. Yeah.","retweets":0,"created":"0001-01-01T00:00:00Z"} +{"delete":{"_id":"1","_index":"` + testIndexName + `","_type":"tweet"}} +{"update":{"_id":"2","_index":"` + testIndexName + `","_type":"tweet"}} +{"doc":{"retweets":42}} +` + got, err := bulkRequest.bodyAsString() + if err != nil { + t.Fatalf("expected no error, got: %v", err) + } + if got != expected { + t.Errorf("expected\n%s\ngot:\n%s", expected, got) + } + + // Run the bulk request + bulkResponse, err := bulkRequest.Do() + if err != nil { + t.Fatal(err) + } + if bulkResponse == nil { + t.Errorf("expected bulkResponse to be != nil; got nil") + } + if bulkResponse.Took == 0 { + t.Errorf("expected took to be > 0; got %d", bulkResponse.Took) + } + if bulkResponse.Errors { + t.Errorf("expected errors to be %v; got %v", false, bulkResponse.Errors) + } + if len(bulkResponse.Items) != 4 { + t.Fatalf("expected 4 result items; got %d", len(bulkResponse.Items)) + } + + // Indexed actions + indexed := bulkResponse.Indexed() + if indexed == nil { + t.Fatal("expected indexed to be != nil; got nil") + } + if len(indexed) != 1 { + t.Fatalf("expected len(indexed) == %d; got %d", 1, len(indexed)) + } + if indexed[0].Id != "1" { + t.Errorf("expected indexed[0].Id == %s; got %s", "1", indexed[0].Id) + } + if indexed[0].Status != 201 { + t.Errorf("expected indexed[0].Status == %d; got %d", 201, indexed[0].Status) + } + + // Created actions + created := bulkResponse.Created() + if created == nil { + t.Fatal("expected created to be != nil; got nil") + } + if len(created) != 1 { + t.Fatalf("expected len(created) == %d; got %d", 1, len(created)) + } + if created[0].Id != "2" { + t.Errorf("expected created[0].Id == %s; got %s", "2", created[0].Id) + } + if created[0].Status != 201 { + t.Errorf("expected created[0].Status == %d; got %d", 201, created[0].Status) + } + + // Deleted actions + deleted := bulkResponse.Deleted() + if deleted == nil { + t.Fatal("expected deleted to be != nil; got nil") + } + if len(deleted) != 1 { + t.Fatalf("expected len(deleted) == %d; got %d", 1, len(deleted)) + } + if deleted[0].Id != "1" { + t.Errorf("expected deleted[0].Id == %s; got %s", "1", deleted[0].Id) + } + if deleted[0].Status != 200 { + t.Errorf("expected deleted[0].Status == %d; got %d", 200, deleted[0].Status) + } + if !deleted[0].Found { + t.Errorf("expected deleted[0].Found == %v; got %v", true, deleted[0].Found) + } + + // Updated actions + updated := bulkResponse.Updated() + if updated == nil { + t.Fatal("expected updated to be != nil; got nil") + } + if len(updated) != 1 { + t.Fatalf("expected len(updated) == %d; got %d", 1, len(updated)) + } + if updated[0].Id != "2" { + t.Errorf("expected updated[0].Id == %s; got %s", "2", updated[0].Id) + } + if updated[0].Status != 200 { + t.Errorf("expected updated[0].Status == %d; got %d", 200, updated[0].Status) + } + if updated[0].Version != 2 { + t.Errorf("expected updated[0].Version == %d; got %d", 2, updated[0].Version) + } + + // Succeeded actions + succeeded := bulkResponse.Succeeded() + if succeeded == nil { + t.Fatal("expected succeeded to be != nil; got nil") + } + if len(succeeded) != 4 { + t.Fatalf("expected len(succeeded) == %d; got %d", 4, len(succeeded)) + } + + // ById + id1Results := bulkResponse.ById("1") + if id1Results == nil { + t.Fatal("expected id1Results to be != nil; got nil") + } + if len(id1Results) != 2 { + t.Fatalf("expected len(id1Results) == %d; got %d", 2, len(id1Results)) + } + if id1Results[0].Id != "1" { + t.Errorf("expected id1Results[0].Id == %s; got %s", "1", id1Results[0].Id) + } + if id1Results[0].Status != 201 { + t.Errorf("expected id1Results[0].Status == %d; got %d", 201, id1Results[0].Status) + } + if id1Results[0].Version != 1 { + t.Errorf("expected id1Results[0].Version == %d; got %d", 1, id1Results[0].Version) + } + if id1Results[1].Id != "1" { + t.Errorf("expected id1Results[1].Id == %s; got %s", "1", id1Results[1].Id) + } + if id1Results[1].Status != 200 { + t.Errorf("expected id1Results[1].Status == %d; got %d", 200, id1Results[1].Status) + } + if id1Results[1].Version != 2 { + t.Errorf("expected id1Results[1].Version == %d; got %d", 2, id1Results[1].Version) + } +} + +func TestFailedBulkRequests(t *testing.T) { + js := `{ + "took" : 2, + "errors" : true, + "items" : [ { + "index" : { + "_index" : "elastic-test", + "_type" : "tweet", + "_id" : "1", + "_version" : 1, + "status" : 201 + } + }, { + "create" : { + "_index" : "elastic-test", + "_type" : "tweet", + "_id" : "2", + "_version" : 1, + "status" : 423, + "error" : { + "type":"routing_missing_exception", + "reason":"routing is required for [elastic-test2]/[comment]/[1]" + } + } + }, { + "delete" : { + "_index" : "elastic-test", + "_type" : "tweet", + "_id" : "1", + "_version" : 2, + "status" : 404, + "found" : false + } + }, { + "update" : { + "_index" : "elastic-test", + "_type" : "tweet", + "_id" : "2", + "_version" : 2, + "status" : 200 + } + } ] +}` + + var resp BulkResponse + err := json.Unmarshal([]byte(js), &resp) + if err != nil { + t.Fatal(err) + } + failed := resp.Failed() + if len(failed) != 2 { + t.Errorf("expected %d failed items; got: %d", 2, len(failed)) + } +} + +func TestBulkEstimatedSizeInBytes(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."} + + index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1) + index2Req := NewBulkIndexRequest().OpType("create").Index(testIndexName).Type("tweet").Id("2").Doc(tweet2) + delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1") + update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2"). + Doc(struct { + Retweets int `json:"retweets"` + }{ + Retweets: 42, + }) + + bulkRequest := client.Bulk() + bulkRequest = bulkRequest.Add(index1Req) + bulkRequest = bulkRequest.Add(index2Req) + bulkRequest = bulkRequest.Add(delete1Req) + bulkRequest = bulkRequest.Add(update2Req) + + if bulkRequest.NumberOfActions() != 4 { + t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 4, bulkRequest.NumberOfActions()) + } + + // The estimated size of the bulk request in bytes must be at least + // the length of the body request. + raw, err := bulkRequest.bodyAsString() + if err != nil { + t.Fatal(err) + } + rawlen := int64(len([]byte(raw))) + + if got, want := bulkRequest.EstimatedSizeInBytes(), rawlen; got < want { + t.Errorf("expected an EstimatedSizeInBytes = %d; got: %v", want, got) + } + + // Reset should also reset the calculated estimated byte size + bulkRequest.reset() + + if got, want := bulkRequest.EstimatedSizeInBytes(), int64(0); got != want { + t.Errorf("expected an EstimatedSizeInBytes = %d; got: %v", want, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_update_request.go b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_update_request.go new file mode 100644 index 000000000..5adef7111 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_update_request.go @@ -0,0 +1,219 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Bulk request to update document in Elasticsearch. +type BulkUpdateRequest struct { + BulkableRequest + index string + typ string + id string + + routing string + parent string + script *Script + version int64 // default is MATCH_ANY + versionType string // default is "internal" + retryOnConflict *int + refresh *bool + upsert interface{} + docAsUpsert *bool + doc interface{} + ttl int64 + timestamp string +} + +func NewBulkUpdateRequest() *BulkUpdateRequest { + return &BulkUpdateRequest{} +} + +func (r *BulkUpdateRequest) Index(index string) *BulkUpdateRequest { + r.index = index + return r +} + +func (r *BulkUpdateRequest) Type(typ string) *BulkUpdateRequest { + r.typ = typ + return r +} + +func (r *BulkUpdateRequest) Id(id string) *BulkUpdateRequest { + r.id = id + return r +} + +func (r *BulkUpdateRequest) Routing(routing string) *BulkUpdateRequest { + r.routing = routing + return r +} + +func (r *BulkUpdateRequest) Parent(parent string) *BulkUpdateRequest { + r.parent = parent + return r +} + +func (r *BulkUpdateRequest) Script(script *Script) *BulkUpdateRequest { + r.script = script + return r +} + +func (r *BulkUpdateRequest) RetryOnConflict(retryOnConflict int) *BulkUpdateRequest { + r.retryOnConflict = &retryOnConflict + return r +} + +func (r *BulkUpdateRequest) Version(version int64) *BulkUpdateRequest { + r.version = version + return r +} + +// VersionType can be "internal" (default), "external", "external_gte", +// "external_gt", or "force". +func (r *BulkUpdateRequest) VersionType(versionType string) *BulkUpdateRequest { + r.versionType = versionType + return r +} + +func (r *BulkUpdateRequest) Refresh(refresh bool) *BulkUpdateRequest { + r.refresh = &refresh + return r +} + +func (r *BulkUpdateRequest) Doc(doc interface{}) *BulkUpdateRequest { + r.doc = doc + return r +} + +func (r *BulkUpdateRequest) DocAsUpsert(docAsUpsert bool) *BulkUpdateRequest { + r.docAsUpsert = &docAsUpsert + return r +} + +func (r *BulkUpdateRequest) Upsert(doc interface{}) *BulkUpdateRequest { + r.upsert = doc + return r +} + +func (r *BulkUpdateRequest) Ttl(ttl int64) *BulkUpdateRequest { + r.ttl = ttl + return r +} + +func (r *BulkUpdateRequest) Timestamp(timestamp string) *BulkUpdateRequest { + r.timestamp = timestamp + return r +} + +func (r *BulkUpdateRequest) String() string { + lines, err := r.Source() + if err == nil { + return strings.Join(lines, "\n") + } + return fmt.Sprintf("error: %v", err) +} + +func (r *BulkUpdateRequest) getSourceAsString(data interface{}) (string, error) { + switch t := data.(type) { + default: + body, err := json.Marshal(data) + if err != nil { + return "", err + } + return string(body), nil + case json.RawMessage: + return string(t), nil + case *json.RawMessage: + return string(*t), nil + case string: + return t, nil + case *string: + return *t, nil + } +} + +func (r BulkUpdateRequest) Source() ([]string, error) { + // { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } } + // { "doc" : { "field1" : "value1", ... } } + // or + // { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } } + // { "script" : { ... } } + + lines := make([]string, 2) + + // "update" ... + command := make(map[string]interface{}) + updateCommand := make(map[string]interface{}) + if r.index != "" { + updateCommand["_index"] = r.index + } + if r.typ != "" { + updateCommand["_type"] = r.typ + } + if r.id != "" { + updateCommand["_id"] = r.id + } + if r.routing != "" { + updateCommand["_routing"] = r.routing + } + if r.parent != "" { + updateCommand["_parent"] = r.parent + } + if r.timestamp != "" { + updateCommand["_timestamp"] = r.timestamp + } + if r.ttl > 0 { + updateCommand["_ttl"] = r.ttl + } + if r.version > 0 { + updateCommand["_version"] = r.version + } + if r.versionType != "" { + updateCommand["_version_type"] = r.versionType + } + if r.refresh != nil { + updateCommand["refresh"] = *r.refresh + } + if r.retryOnConflict != nil { + updateCommand["_retry_on_conflict"] = *r.retryOnConflict + } + if r.upsert != nil { + updateCommand["upsert"] = r.upsert + } + command["update"] = updateCommand + line, err := json.Marshal(command) + if err != nil { + return nil, err + } + lines[0] = string(line) + + // 2nd line: {"doc" : { ... }} or {"script": {...}} + source := make(map[string]interface{}) + if r.docAsUpsert != nil { + source["doc_as_upsert"] = *r.docAsUpsert + } + if r.doc != nil { + // {"doc":{...}} + source["doc"] = r.doc + } else if r.script != nil { + // {"script":...} + src, err := r.script.Source() + if err != nil { + return nil, err + } + source["script"] = src + } + lines[1], err = r.getSourceAsString(source) + if err != nil { + return nil, err + } + + return lines, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_update_request_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_update_request_test.go new file mode 100644 index 000000000..75c5b6d7f --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_update_request_test.go @@ -0,0 +1,77 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestBulkUpdateRequestSerialization(t *testing.T) { + tests := []struct { + Request BulkableRequest + Expected []string + }{ + // #0 + { + Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").Doc(struct { + Counter int64 `json:"counter"` + }{ + Counter: 42, + }), + Expected: []string{ + `{"update":{"_id":"1","_index":"index1","_type":"tweet"}}`, + `{"doc":{"counter":42}}`, + }, + }, + // #1 + { + Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1"). + RetryOnConflict(3). + DocAsUpsert(true). + Doc(struct { + Counter int64 `json:"counter"` + }{ + Counter: 42, + }), + Expected: []string{ + `{"update":{"_id":"1","_index":"index1","_retry_on_conflict":3,"_type":"tweet"}}`, + `{"doc":{"counter":42},"doc_as_upsert":true}`, + }, + }, + // #2 + { + Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1"). + RetryOnConflict(3). + Script(NewScript(`ctx._source.retweets += param1`).Lang("javascript").Param("param1", 42)). + Upsert(struct { + Counter int64 `json:"counter"` + }{ + Counter: 42, + }), + Expected: []string{ + `{"update":{"_id":"1","_index":"index1","_retry_on_conflict":3,"_type":"tweet","upsert":{"counter":42}}}`, + `{"script":{"inline":"ctx._source.retweets += param1","lang":"javascript","params":{"param1":42}}}`, + }, + }, + } + + for i, test := range tests { + lines, err := test.Request.Source() + if err != nil { + t.Fatalf("case #%d: expected no error, got: %v", i, err) + } + if lines == nil { + t.Fatalf("case #%d: expected lines, got nil", i) + } + if len(lines) != len(test.Expected) { + t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines)) + } + for j, line := range lines { + if line != test.Expected[j] { + t.Errorf("case #%d: expected line #%d to be\n%s\nbut got:\n%s", i, j, test.Expected[j], line) + } + } + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/canonicalize.go b/services/templeton/vendor/src/github.com/olivere/elastic/canonicalize.go new file mode 100644 index 000000000..645930859 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/canonicalize.go @@ -0,0 +1,28 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "net/url" + +// canonicalize takes a list of URLs and returns its canonicalized form, i.e. +// remove anything but scheme, userinfo, host, and port. It also removes the +// slash at the end. It also skips invalid URLs or URLs that do not use +// protocol http or https. +// +// Example: +// http://127.0.0.1:9200/path?query=1 -> http://127.0.0.1:9200 +func canonicalize(rawurls ...string) []string { + canonicalized := make([]string, 0) + for _, rawurl := range rawurls { + u, err := url.Parse(rawurl) + if err == nil && (u.Scheme == "http" || u.Scheme == "https") { + u.Fragment = "" + u.Path = "" + u.RawQuery = "" + canonicalized = append(canonicalized, u.String()) + } + } + return canonicalized +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/canonicalize_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/canonicalize_test.go new file mode 100644 index 000000000..ada2ff22d --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/canonicalize_test.go @@ -0,0 +1,41 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "reflect" + "testing" +) + +func TestCanonicalize(t *testing.T) { + tests := []struct { + Input []string + Output []string + }{ + { + Input: []string{"http://127.0.0.1/"}, + Output: []string{"http://127.0.0.1"}, + }, + { + Input: []string{"http://127.0.0.1:9200/", "gopher://golang.org/", "http://127.0.0.1:9201"}, + Output: []string{"http://127.0.0.1:9200", "http://127.0.0.1:9201"}, + }, + { + Input: []string{"http://user:secret@127.0.0.1/path?query=1#fragment"}, + Output: []string{"http://user:secret@127.0.0.1"}, + }, + { + Input: []string{"https://somewhere.on.mars:9999/path?query=1#fragment"}, + Output: []string{"https://somewhere.on.mars:9999"}, + }, + } + + for _, test := range tests { + got := canonicalize(test.Input...) + if !reflect.DeepEqual(got, test.Output) { + t.Errorf("expected %v; got: %v", test.Output, got) + } + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/clear_scroll.go b/services/templeton/vendor/src/github.com/olivere/elastic/clear_scroll.go new file mode 100644 index 000000000..c57093267 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/clear_scroll.go @@ -0,0 +1,102 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" +) + +// ClearScrollService clears one or more scroll contexts by their ids. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html#_clear_scroll_api +// for details. +type ClearScrollService struct { + client *Client + pretty bool + scrollId []string +} + +// NewClearScrollService creates a new ClearScrollService. +func NewClearScrollService(client *Client) *ClearScrollService { + return &ClearScrollService{ + client: client, + scrollId: make([]string, 0), + } +} + +// ScrollId is a list of scroll IDs to clear. +// Use _all to clear all search contexts. +func (s *ClearScrollService) ScrollId(scrollIds ...string) *ClearScrollService { + s.scrollId = append(s.scrollId, scrollIds...) + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ClearScrollService) Pretty(pretty bool) *ClearScrollService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *ClearScrollService) buildURL() (string, url.Values, error) { + // Build URL + path := "/_search/scroll/" + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ClearScrollService) Validate() error { + var invalid []string + if len(s.scrollId) == 0 { + invalid = append(invalid, "ScrollId") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *ClearScrollService) Do() (*ClearScrollResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + body := strings.Join(s.scrollId, ",") + + // Get HTTP response + res, err := s.client.PerformRequest("DELETE", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(ClearScrollResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// ClearScrollResponse is the response of ClearScrollService.Do. +type ClearScrollResponse struct { +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/clear_scroll_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/clear_scroll_test.go new file mode 100644 index 000000000..bbb659df9 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/clear_scroll_test.go @@ -0,0 +1,85 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + _ "net/http" + "testing" +) + +func TestClearScroll(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + res, err := client.Scroll(testIndexName).Size(1).Do() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Errorf("expected results != nil; got nil") + } + if res.ScrollId == "" { + t.Errorf("expected scrollId in results; got %q", res.ScrollId) + } + + // Search should succeed + _, err = client.Scroll(testIndexName).Size(1).ScrollId(res.ScrollId).Do() + if err != nil { + t.Fatal(err) + } + + // Clear scroll id + clearScrollRes, err := client.ClearScroll().ScrollId(res.ScrollId).Do() + if err != nil { + t.Fatal(err) + } + if clearScrollRes == nil { + t.Error("expected results != nil; got nil") + } + + // Search result should fail + _, err = client.Scroll(testIndexName).Size(1).ScrollId(res.ScrollId).Do() + if err == nil { + t.Fatalf("expected scroll to fail") + } +} + +func TestClearScrollValidate(t *testing.T) { + client := setupTestClient(t) + + // No scroll id -> fail with error + res, err := NewClearScrollService(client).Do() + if err == nil { + t.Fatalf("expected ClearScroll to fail without scroll ids") + } + if res != nil { + t.Fatalf("expected result to be nil; got: %v", res) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/client.go b/services/templeton/vendor/src/github.com/olivere/elastic/client.go new file mode 100644 index 000000000..556d2867f --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/client.go @@ -0,0 +1,1551 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "math/rand" + "net/http" + "net/http/httputil" + "net/url" + "regexp" + "strings" + "sync" + "time" +) + +const ( + // Version is the current version of Elastic. + Version = "3.0.21" + + // DefaultUrl is the default endpoint of Elasticsearch on the local machine. + // It is used e.g. when initializing a new Client without a specific URL. + DefaultURL = "http://127.0.0.1:9200" + + // DefaultScheme is the default protocol scheme to use when sniffing + // the Elasticsearch cluster. + DefaultScheme = "http" + + // DefaultHealthcheckEnabled specifies if healthchecks are enabled by default. + DefaultHealthcheckEnabled = true + + // DefaultHealthcheckTimeoutStartup is the time the healthcheck waits + // for a response from Elasticsearch on startup, i.e. when creating a + // client. After the client is started, a shorter timeout is commonly used + // (its default is specified in DefaultHealthcheckTimeout). + DefaultHealthcheckTimeoutStartup = 5 * time.Second + + // DefaultHealthcheckTimeout specifies the time a running client waits for + // a response from Elasticsearch. Notice that the healthcheck timeout + // when a client is created is larger by default (see DefaultHealthcheckTimeoutStartup). + DefaultHealthcheckTimeout = 1 * time.Second + + // DefaultHealthcheckInterval is the default interval between + // two health checks of the nodes in the cluster. + DefaultHealthcheckInterval = 60 * time.Second + + // DefaultSnifferEnabled specifies if the sniffer is enabled by default. + DefaultSnifferEnabled = true + + // DefaultSnifferInterval is the interval between two sniffing procedures, + // i.e. the lookup of all nodes in the cluster and their addition/removal + // from the list of actual connections. + DefaultSnifferInterval = 15 * time.Minute + + // DefaultSnifferTimeoutStartup is the default timeout for the sniffing + // process that is initiated while creating a new client. For subsequent + // sniffing processes, DefaultSnifferTimeout is used (by default). + DefaultSnifferTimeoutStartup = 5 * time.Second + + // DefaultSnifferTimeout is the default timeout after which the + // sniffing process times out. Notice that for the initial sniffing + // process, DefaultSnifferTimeoutStartup is used. + DefaultSnifferTimeout = 2 * time.Second + + // DefaultMaxRetries is the number of retries for a single request after + // Elastic will give up and return an error. It is zero by default, so + // retry is disabled by default. + DefaultMaxRetries = 0 + + // DefaultSendGetBodyAs is the HTTP method to use when elastic is sending + // a GET request with a body. + DefaultSendGetBodyAs = "GET" + + // DefaultGzipEnabled specifies if gzip compression is enabled by default. + DefaultGzipEnabled = false + + // off is used to disable timeouts. + off = -1 * time.Second +) + +var ( + // ErrNoClient is raised when no Elasticsearch node is available. + ErrNoClient = errors.New("no Elasticsearch node available") + + // ErrRetry is raised when a request cannot be executed after the configured + // number of retries. + ErrRetry = errors.New("cannot connect after several retries") + + // ErrTimeout is raised when a request timed out, e.g. when WaitForStatus + // didn't return in time. + ErrTimeout = errors.New("timeout") +) + +// ClientOptionFunc is a function that configures a Client. +// It is used in NewClient. +type ClientOptionFunc func(*Client) error + +// Client is an Elasticsearch client. Create one by calling NewClient. +type Client struct { + c *http.Client // net/http Client to use for requests + + connsMu sync.RWMutex // connsMu guards the next block + conns []*conn // all connections + cindex int // index into conns + + mu sync.RWMutex // guards the next block + urls []string // set of URLs passed initially to the client + running bool // true if the client's background processes are running + errorlog Logger // error log for critical messages + infolog Logger // information log for e.g. response times + tracelog Logger // trace log for debugging + maxRetries int // max. number of retries + scheme string // http or https + healthcheckEnabled bool // healthchecks enabled or disabled + healthcheckTimeoutStartup time.Duration // time the healthcheck waits for a response from Elasticsearch on startup + healthcheckTimeout time.Duration // time the healthcheck waits for a response from Elasticsearch + healthcheckInterval time.Duration // interval between healthchecks + healthcheckStop chan bool // notify healthchecker to stop, and notify back + snifferEnabled bool // sniffer enabled or disabled + snifferTimeoutStartup time.Duration // time the sniffer waits for a response from nodes info API on startup + snifferTimeout time.Duration // time the sniffer waits for a response from nodes info API + snifferInterval time.Duration // interval between sniffing + snifferStop chan bool // notify sniffer to stop, and notify back + decoder Decoder // used to decode data sent from Elasticsearch + basicAuth bool // indicates whether to send HTTP Basic Auth credentials + basicAuthUsername string // username for HTTP Basic Auth + basicAuthPassword string // password for HTTP Basic Auth + sendGetBodyAs string // override for when sending a GET with a body + requiredPlugins []string // list of required plugins + gzipEnabled bool // gzip compression enabled or disabled (default) +} + +// NewClient creates a new client to work with Elasticsearch. +// +// NewClient, by default, is meant to be long-lived and shared across +// your application. If you need a short-lived client, e.g. for request-scope, +// consider using NewSimpleClient instead. +// +// The caller can configure the new client by passing configuration options +// to the func. +// +// Example: +// +// client, err := elastic.NewClient( +// elastic.SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"), +// elastic.SetMaxRetries(10), +// elastic.SetBasicAuth("user", "secret")) +// +// If no URL is configured, Elastic uses DefaultURL by default. +// +// If the sniffer is enabled (the default), the new client then sniffes +// the cluster via the Nodes Info API +// (see http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cluster-nodes-info.html#cluster-nodes-info). +// It uses the URLs specified by the caller. The caller is responsible +// to only pass a list of URLs of nodes that belong to the same cluster. +// This sniffing process is run on startup and periodically. +// Use SnifferInterval to set the interval between two sniffs (default is +// 15 minutes). In other words: By default, the client will find new nodes +// in the cluster and remove those that are no longer available every +// 15 minutes. Disable the sniffer by passing SetSniff(false) to NewClient. +// +// The list of nodes found in the sniffing process will be used to make +// connections to the REST API of Elasticsearch. These nodes are also +// periodically checked in a shorter time frame. This process is called +// a health check. By default, a health check is done every 60 seconds. +// You can set a shorter or longer interval by SetHealthcheckInterval. +// Disabling health checks is not recommended, but can be done by +// SetHealthcheck(false). +// +// Connections are automatically marked as dead or healthy while +// making requests to Elasticsearch. When a request fails, Elastic will +// retry up to a maximum number of retries configured with SetMaxRetries. +// Retries are disabled by default. +// +// If no HttpClient is configured, then http.DefaultClient is used. +// You can use your own http.Client with some http.Transport for +// advanced scenarios. +// +// An error is also returned when some configuration option is invalid or +// the new client cannot sniff the cluster (if enabled). +func NewClient(options ...ClientOptionFunc) (*Client, error) { + // Set up the client + c := &Client{ + c: http.DefaultClient, + conns: make([]*conn, 0), + cindex: -1, + scheme: DefaultScheme, + decoder: &DefaultDecoder{}, + maxRetries: DefaultMaxRetries, + healthcheckEnabled: DefaultHealthcheckEnabled, + healthcheckTimeoutStartup: DefaultHealthcheckTimeoutStartup, + healthcheckTimeout: DefaultHealthcheckTimeout, + healthcheckInterval: DefaultHealthcheckInterval, + healthcheckStop: make(chan bool), + snifferEnabled: DefaultSnifferEnabled, + snifferTimeoutStartup: DefaultSnifferTimeoutStartup, + snifferTimeout: DefaultSnifferTimeout, + snifferInterval: DefaultSnifferInterval, + snifferStop: make(chan bool), + sendGetBodyAs: DefaultSendGetBodyAs, + gzipEnabled: DefaultGzipEnabled, + } + + // Run the options on it + for _, option := range options { + if err := option(c); err != nil { + return nil, err + } + } + + if len(c.urls) == 0 { + c.urls = []string{DefaultURL} + } + c.urls = canonicalize(c.urls...) + + // Check if we can make a request to any of the specified URLs + if c.healthcheckEnabled { + if err := c.startupHealthcheck(c.healthcheckTimeoutStartup); err != nil { + return nil, err + } + } + + if c.snifferEnabled { + // Sniff the cluster initially + if err := c.sniff(c.snifferTimeoutStartup); err != nil { + return nil, err + } + } else { + // Do not sniff the cluster initially. Use the provided URLs instead. + for _, url := range c.urls { + c.conns = append(c.conns, newConn(url, url)) + } + } + + if c.healthcheckEnabled { + // Perform an initial health check + c.healthcheck(c.healthcheckTimeoutStartup, true) + } + // Ensure that we have at least one connection available + if err := c.mustActiveConn(); err != nil { + return nil, err + } + + // Check the required plugins + for _, plugin := range c.requiredPlugins { + found, err := c.HasPlugin(plugin) + if err != nil { + return nil, err + } + if !found { + return nil, fmt.Errorf("elastic: plugin %s not found", plugin) + } + } + + if c.snifferEnabled { + go c.sniffer() // periodically update cluster information + } + if c.healthcheckEnabled { + go c.healthchecker() // start goroutine periodically ping all nodes of the cluster + } + + c.mu.Lock() + c.running = true + c.mu.Unlock() + + return c, nil +} + +// NewSimpleClient creates a new short-lived Client that can be used in +// use cases where you need e.g. one client per request. +// +// While NewClient by default sets up e.g. periodic health checks +// and sniffing for new nodes in separate goroutines, NewSimpleClient does +// not and is meant as a simple replacement where you don't need all the +// heavy lifting of NewClient. +// +// NewSimpleClient does the following by default: First, all health checks +// are disabled, including timeouts and periodic checks. Second, sniffing +// is disabled, including timeouts and periodic checks. The number of retries +// is set to 1. NewSimpleClient also does not start any goroutines. +// +// Notice that you can still override settings by passing additional options, +// just like with NewClient. +func NewSimpleClient(options ...ClientOptionFunc) (*Client, error) { + c := &Client{ + c: http.DefaultClient, + conns: make([]*conn, 0), + cindex: -1, + scheme: DefaultScheme, + decoder: &DefaultDecoder{}, + maxRetries: 1, + healthcheckEnabled: false, + healthcheckTimeoutStartup: off, + healthcheckTimeout: off, + healthcheckInterval: off, + healthcheckStop: make(chan bool), + snifferEnabled: false, + snifferTimeoutStartup: off, + snifferTimeout: off, + snifferInterval: off, + snifferStop: make(chan bool), + sendGetBodyAs: DefaultSendGetBodyAs, + gzipEnabled: DefaultGzipEnabled, + } + + // Run the options on it + for _, option := range options { + if err := option(c); err != nil { + return nil, err + } + } + + if len(c.urls) == 0 { + c.urls = []string{DefaultURL} + } + c.urls = canonicalize(c.urls...) + + for _, url := range c.urls { + c.conns = append(c.conns, newConn(url, url)) + } + + // Ensure that we have at least one connection available + if err := c.mustActiveConn(); err != nil { + return nil, err + } + + // Check the required plugins + for _, plugin := range c.requiredPlugins { + found, err := c.HasPlugin(plugin) + if err != nil { + return nil, err + } + if !found { + return nil, fmt.Errorf("elastic: plugin %s not found", plugin) + } + } + + c.mu.Lock() + c.running = true + c.mu.Unlock() + + return c, nil +} + +// SetHttpClient can be used to specify the http.Client to use when making +// HTTP requests to Elasticsearch. +func SetHttpClient(httpClient *http.Client) ClientOptionFunc { + return func(c *Client) error { + if httpClient != nil { + c.c = httpClient + } else { + c.c = http.DefaultClient + } + return nil + } +} + +// SetBasicAuth can be used to specify the HTTP Basic Auth credentials to +// use when making HTTP requests to Elasticsearch. +func SetBasicAuth(username, password string) ClientOptionFunc { + return func(c *Client) error { + c.basicAuthUsername = username + c.basicAuthPassword = password + c.basicAuth = c.basicAuthUsername != "" || c.basicAuthPassword != "" + return nil + } +} + +// SetURL defines the URL endpoints of the Elasticsearch nodes. Notice that +// when sniffing is enabled, these URLs are used to initially sniff the +// cluster on startup. +func SetURL(urls ...string) ClientOptionFunc { + return func(c *Client) error { + switch len(urls) { + case 0: + c.urls = []string{DefaultURL} + default: + c.urls = urls + } + return nil + } +} + +// SetScheme sets the HTTP scheme to look for when sniffing (http or https). +// This is http by default. +func SetScheme(scheme string) ClientOptionFunc { + return func(c *Client) error { + c.scheme = scheme + return nil + } +} + +// SetSniff enables or disables the sniffer (enabled by default). +func SetSniff(enabled bool) ClientOptionFunc { + return func(c *Client) error { + c.snifferEnabled = enabled + return nil + } +} + +// SetSnifferTimeoutStartup sets the timeout for the sniffer that is used +// when creating a new client. The default is 5 seconds. Notice that the +// timeout being used for subsequent sniffing processes is set with +// SetSnifferTimeout. +func SetSnifferTimeoutStartup(timeout time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.snifferTimeoutStartup = timeout + return nil + } +} + +// SetSnifferTimeout sets the timeout for the sniffer that finds the +// nodes in a cluster. The default is 2 seconds. Notice that the timeout +// used when creating a new client on startup is usually greater and can +// be set with SetSnifferTimeoutStartup. +func SetSnifferTimeout(timeout time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.snifferTimeout = timeout + return nil + } +} + +// SetSnifferInterval sets the interval between two sniffing processes. +// The default interval is 15 minutes. +func SetSnifferInterval(interval time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.snifferInterval = interval + return nil + } +} + +// SetHealthcheck enables or disables healthchecks (enabled by default). +func SetHealthcheck(enabled bool) ClientOptionFunc { + return func(c *Client) error { + c.healthcheckEnabled = enabled + return nil + } +} + +// SetHealthcheckTimeoutStartup sets the timeout for the initial health check. +// The default timeout is 5 seconds (see DefaultHealthcheckTimeoutStartup). +// Notice that timeouts for subsequent health checks can be modified with +// SetHealthcheckTimeout. +func SetHealthcheckTimeoutStartup(timeout time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.healthcheckTimeoutStartup = timeout + return nil + } +} + +// SetHealthcheckTimeout sets the timeout for periodic health checks. +// The default timeout is 1 second (see DefaultHealthcheckTimeout). +// Notice that a different (usually larger) timeout is used for the initial +// healthcheck, which is initiated while creating a new client. +// The startup timeout can be modified with SetHealthcheckTimeoutStartup. +func SetHealthcheckTimeout(timeout time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.healthcheckTimeout = timeout + return nil + } +} + +// SetHealthcheckInterval sets the interval between two health checks. +// The default interval is 60 seconds. +func SetHealthcheckInterval(interval time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.healthcheckInterval = interval + return nil + } +} + +// SetMaxRetries sets the maximum number of retries before giving up when +// performing a HTTP request to Elasticsearch. +func SetMaxRetries(maxRetries int) ClientOptionFunc { + return func(c *Client) error { + if maxRetries < 0 { + return errors.New("MaxRetries must be greater than or equal to 0") + } + c.maxRetries = maxRetries + return nil + } +} + +// SetGzip enables or disables gzip compression (disabled by default). +func SetGzip(enabled bool) ClientOptionFunc { + return func(c *Client) error { + c.gzipEnabled = enabled + return nil + } +} + +// SetDecoder sets the Decoder to use when decoding data from Elasticsearch. +// DefaultDecoder is used by default. +func SetDecoder(decoder Decoder) ClientOptionFunc { + return func(c *Client) error { + if decoder != nil { + c.decoder = decoder + } else { + c.decoder = &DefaultDecoder{} + } + return nil + } +} + +// SetRequiredPlugins can be used to indicate that some plugins are required +// before a Client will be created. +func SetRequiredPlugins(plugins ...string) ClientOptionFunc { + return func(c *Client) error { + if c.requiredPlugins == nil { + c.requiredPlugins = make([]string, 0) + } + c.requiredPlugins = append(c.requiredPlugins, plugins...) + return nil + } +} + +// SetErrorLog sets the logger for critical messages like nodes joining +// or leaving the cluster or failing requests. It is nil by default. +func SetErrorLog(logger Logger) ClientOptionFunc { + return func(c *Client) error { + c.errorlog = logger + return nil + } +} + +// SetInfoLog sets the logger for informational messages, e.g. requests +// and their response times. It is nil by default. +func SetInfoLog(logger Logger) ClientOptionFunc { + return func(c *Client) error { + c.infolog = logger + return nil + } +} + +// SetTraceLog specifies the log.Logger to use for output of HTTP requests +// and responses which is helpful during debugging. It is nil by default. +func SetTraceLog(logger Logger) ClientOptionFunc { + return func(c *Client) error { + c.tracelog = logger + return nil + } +} + +// SendGetBodyAs specifies the HTTP method to use when sending a GET request +// with a body. It is GET by default. +func SetSendGetBodyAs(httpMethod string) ClientOptionFunc { + return func(c *Client) error { + c.sendGetBodyAs = httpMethod + return nil + } +} + +// String returns a string representation of the client status. +func (c *Client) String() string { + c.connsMu.Lock() + conns := c.conns + c.connsMu.Unlock() + + var buf bytes.Buffer + for i, conn := range conns { + if i > 0 { + buf.WriteString(", ") + } + buf.WriteString(conn.String()) + } + return buf.String() +} + +// IsRunning returns true if the background processes of the client are +// running, false otherwise. +func (c *Client) IsRunning() bool { + c.mu.RLock() + defer c.mu.RUnlock() + return c.running +} + +// Start starts the background processes like sniffing the cluster and +// periodic health checks. You don't need to run Start when creating a +// client with NewClient; the background processes are run by default. +// +// If the background processes are already running, this is a no-op. +func (c *Client) Start() { + c.mu.RLock() + if c.running { + c.mu.RUnlock() + return + } + c.mu.RUnlock() + + if c.snifferEnabled { + go c.sniffer() + } + if c.healthcheckEnabled { + go c.healthchecker() + } + + c.mu.Lock() + c.running = true + c.mu.Unlock() + + c.infof("elastic: client started") +} + +// Stop stops the background processes that the client is running, +// i.e. sniffing the cluster periodically and running health checks +// on the nodes. +// +// If the background processes are not running, this is a no-op. +func (c *Client) Stop() { + c.mu.RLock() + if !c.running { + c.mu.RUnlock() + return + } + c.mu.RUnlock() + + if c.healthcheckEnabled { + c.healthcheckStop <- true + <-c.healthcheckStop + } + + if c.snifferEnabled { + c.snifferStop <- true + <-c.snifferStop + } + + c.mu.Lock() + c.running = false + c.mu.Unlock() + + c.infof("elastic: client stopped") +} + +// errorf logs to the error log. +func (c *Client) errorf(format string, args ...interface{}) { + if c.errorlog != nil { + c.errorlog.Printf(format, args...) + } +} + +// infof logs informational messages. +func (c *Client) infof(format string, args ...interface{}) { + if c.infolog != nil { + c.infolog.Printf(format, args...) + } +} + +// tracef logs to the trace log. +func (c *Client) tracef(format string, args ...interface{}) { + if c.tracelog != nil { + c.tracelog.Printf(format, args...) + } +} + +// dumpRequest dumps the given HTTP request to the trace log. +func (c *Client) dumpRequest(r *http.Request) { + if c.tracelog != nil { + out, err := httputil.DumpRequestOut(r, true) + if err == nil { + c.tracef("%s\n", string(out)) + } + } +} + +// dumpResponse dumps the given HTTP response to the trace log. +func (c *Client) dumpResponse(resp *http.Response) { + if c.tracelog != nil { + out, err := httputil.DumpResponse(resp, true) + if err == nil { + c.tracef("%s\n", string(out)) + } + } +} + +// sniffer periodically runs sniff. +func (c *Client) sniffer() { + for { + c.mu.RLock() + timeout := c.snifferTimeout + ticker := time.After(c.snifferInterval) + c.mu.RUnlock() + + select { + case <-c.snifferStop: + // we are asked to stop, so we signal back that we're stopping now + c.snifferStop <- true + return + case <-ticker: + c.sniff(timeout) + } + } +} + +// sniff uses the Node Info API to return the list of nodes in the cluster. +// It uses the list of URLs passed on startup plus the list of URLs found +// by the preceding sniffing process (if sniffing is enabled). +// +// If sniffing is disabled, this is a no-op. +func (c *Client) sniff(timeout time.Duration) error { + c.mu.RLock() + if !c.snifferEnabled { + c.mu.RUnlock() + return nil + } + + // Use all available URLs provided to sniff the cluster. + urlsMap := make(map[string]bool) + urls := make([]string, 0) + + // Add all URLs provided on startup + for _, url := range c.urls { + urlsMap[url] = true + urls = append(urls, url) + } + c.mu.RUnlock() + + // Add all URLs found by sniffing + c.connsMu.RLock() + for _, conn := range c.conns { + if !conn.IsDead() { + url := conn.URL() + if _, found := urlsMap[url]; !found { + urls = append(urls, url) + } + } + } + c.connsMu.RUnlock() + + if len(urls) == 0 { + return ErrNoClient + } + + // Start sniffing on all found URLs + ch := make(chan []*conn, len(urls)) + for _, url := range urls { + go func(url string) { ch <- c.sniffNode(url) }(url) + } + + // Wait for the results to come back, or the process times out. + for { + select { + case conns := <-ch: + if len(conns) > 0 { + c.updateConns(conns) + return nil + } + case <-time.After(timeout): + // We get here if no cluster responds in time + return ErrNoClient + } + } +} + +// reSniffHostAndPort is used to extract hostname and port from a result +// from a Nodes Info API (example: "inet[/127.0.0.1:9200]"). +var reSniffHostAndPort = regexp.MustCompile(`\/([^:]*):([0-9]+)\]`) + +// sniffNode sniffs a single node. This method is run as a goroutine +// in sniff. If successful, it returns the list of node URLs extracted +// from the result of calling Nodes Info API. Otherwise, an empty array +// is returned. +func (c *Client) sniffNode(url string) []*conn { + nodes := make([]*conn, 0) + + // Call the Nodes Info API at /_nodes/http + req, err := NewRequest("GET", url+"/_nodes/http") + if err != nil { + return nodes + } + + c.mu.RLock() + if c.basicAuth { + req.SetBasicAuth(c.basicAuthUsername, c.basicAuthPassword) + } + c.mu.RUnlock() + + res, err := c.c.Do((*http.Request)(req)) + if err != nil { + return nodes + } + if res == nil { + return nodes + } + + if res.Body != nil { + defer res.Body.Close() + } + + var info NodesInfoResponse + if err := json.NewDecoder(res.Body).Decode(&info); err == nil { + if len(info.Nodes) > 0 { + switch c.scheme { + case "https": + for nodeID, node := range info.Nodes { + if strings.HasPrefix(node.HTTPSAddress, "inet") { + m := reSniffHostAndPort.FindStringSubmatch(node.HTTPSAddress) + if len(m) == 3 { + url := fmt.Sprintf("https://%s:%s", m[1], m[2]) + nodes = append(nodes, newConn(nodeID, url)) + } + } else { + url := fmt.Sprintf("https://%s", node.HTTPSAddress) + nodes = append(nodes, newConn(nodeID, url)) + } + } + default: + for nodeID, node := range info.Nodes { + if strings.HasPrefix(node.HTTPAddress, "inet") { + m := reSniffHostAndPort.FindStringSubmatch(node.HTTPAddress) + if len(m) == 3 { + url := fmt.Sprintf("http://%s:%s", m[1], m[2]) + nodes = append(nodes, newConn(nodeID, url)) + } + } else { + url := fmt.Sprintf("http://%s", node.HTTPAddress) + nodes = append(nodes, newConn(nodeID, url)) + } + } + } + } + } + return nodes +} + +// updateConns updates the clients' connections with new information +// gather by a sniff operation. +func (c *Client) updateConns(conns []*conn) { + c.connsMu.Lock() + + newConns := make([]*conn, 0) + + // Build up new connections: + // If we find an existing connection, use that (including no. of failures etc.). + // If we find a new connection, add it. + for _, conn := range conns { + var found bool + for _, oldConn := range c.conns { + if oldConn.NodeID() == conn.NodeID() { + // Take over the old connection + newConns = append(newConns, oldConn) + found = true + break + } + } + if !found { + // New connection didn't exist, so add it to our list of new conns. + c.errorf("elastic: %s joined the cluster", conn.URL()) + newConns = append(newConns, conn) + } + } + + c.conns = newConns + c.cindex = -1 + c.connsMu.Unlock() +} + +// healthchecker periodically runs healthcheck. +func (c *Client) healthchecker() { + for { + c.mu.RLock() + timeout := c.healthcheckTimeout + ticker := time.After(c.healthcheckInterval) + c.mu.RUnlock() + + select { + case <-c.healthcheckStop: + // we are asked to stop, so we signal back that we're stopping now + c.healthcheckStop <- true + return + case <-ticker: + c.healthcheck(timeout, false) + } + } +} + +// healthcheck does a health check on all nodes in the cluster. Depending on +// the node state, it marks connections as dead, sets them alive etc. +// If healthchecks are disabled and force is false, this is a no-op. +// The timeout specifies how long to wait for a response from Elasticsearch. +func (c *Client) healthcheck(timeout time.Duration, force bool) { + c.mu.RLock() + if !c.healthcheckEnabled && !force { + c.mu.RUnlock() + return + } + basicAuth := c.basicAuth + basicAuthUsername := c.basicAuthUsername + basicAuthPassword := c.basicAuthPassword + c.mu.RUnlock() + + c.connsMu.RLock() + conns := c.conns + c.connsMu.RUnlock() + + timeoutInMillis := int64(timeout / time.Millisecond) + + for _, conn := range conns { + params := make(url.Values) + params.Set("timeout", fmt.Sprintf("%dms", timeoutInMillis)) + req, err := NewRequest("HEAD", conn.URL()+"/?"+params.Encode()) + if err == nil { + if basicAuth { + req.SetBasicAuth(basicAuthUsername, basicAuthPassword) + } + res, err := c.c.Do((*http.Request)(req)) + if err == nil { + if res.Body != nil { + defer res.Body.Close() + } + if res.StatusCode >= 200 && res.StatusCode < 300 { + conn.MarkAsAlive() + } else { + conn.MarkAsDead() + c.errorf("elastic: %s is dead [status=%d]", conn.URL(), res.StatusCode) + } + } else { + c.errorf("elastic: %s is dead", conn.URL()) + conn.MarkAsDead() + } + } else { + c.errorf("elastic: %s is dead", conn.URL()) + conn.MarkAsDead() + } + } +} + +// startupHealthcheck is used at startup to check if the server is available +// at all. +func (c *Client) startupHealthcheck(timeout time.Duration) error { + c.mu.Lock() + urls := c.urls + basicAuth := c.basicAuth + basicAuthUsername := c.basicAuthUsername + basicAuthPassword := c.basicAuthPassword + c.mu.Unlock() + + // If we don't get a connection after "timeout", we bail. + start := time.Now() + for { + cl := &http.Client{Timeout: timeout} + for _, url := range urls { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return err + } + if basicAuth { + req.SetBasicAuth(basicAuthUsername, basicAuthPassword) + } + res, err := cl.Do(req) + if err == nil && res != nil && res.StatusCode >= 200 && res.StatusCode < 300 { + return nil + } + } + time.Sleep(1 * time.Second) + if time.Now().Sub(start) > timeout { + break + } + } + return ErrNoClient +} + +// next returns the next available connection, or ErrNoClient. +func (c *Client) next() (*conn, error) { + // We do round-robin here. + // TODO(oe) This should be a pluggable strategy, like the Selector in the official clients. + c.connsMu.Lock() + defer c.connsMu.Unlock() + + i := 0 + numConns := len(c.conns) + for { + i += 1 + if i > numConns { + break // we visited all conns: they all seem to be dead + } + c.cindex += 1 + if c.cindex >= numConns { + c.cindex = 0 + } + conn := c.conns[c.cindex] + if !conn.IsDead() { + return conn, nil + } + } + + // We have a deadlock here: All nodes are marked as dead. + // If sniffing is disabled, connections will never be marked alive again. + // So we are marking them as alive--if sniffing is disabled. + // They'll then be picked up in the next call to PerformRequest. + if !c.snifferEnabled { + c.errorf("elastic: all %d nodes marked as dead; resurrecting them to prevent deadlock", len(c.conns)) + for _, conn := range c.conns { + conn.MarkAsAlive() + } + } + + // We tried hard, but there is no node available + return nil, ErrNoClient +} + +// mustActiveConn returns nil if there is an active connection, +// otherwise ErrNoClient is returned. +func (c *Client) mustActiveConn() error { + c.connsMu.Lock() + defer c.connsMu.Unlock() + + for _, c := range c.conns { + if !c.IsDead() { + return nil + } + } + return ErrNoClient +} + +// PerformRequest does a HTTP request to Elasticsearch. +// It returns a response and an error on failure. +// +// Optionally, a list of HTTP error codes to ignore can be passed. +// This is necessary for services that expect e.g. HTTP status 404 as a +// valid outcome (Exists, IndicesExists, IndicesTypeExists). +func (c *Client) PerformRequest(method, path string, params url.Values, body interface{}, ignoreErrors ...int) (*Response, error) { + start := time.Now().UTC() + + c.mu.RLock() + timeout := c.healthcheckTimeout + retries := c.maxRetries + basicAuth := c.basicAuth + basicAuthUsername := c.basicAuthUsername + basicAuthPassword := c.basicAuthPassword + sendGetBodyAs := c.sendGetBodyAs + gzipEnabled := c.gzipEnabled + c.mu.RUnlock() + + var err error + var conn *conn + var req *Request + var resp *Response + var retried bool + + // We wait between retries, using simple exponential back-off. + // TODO: Make this configurable, including the jitter. + retryWaitMsec := int64(100 + (rand.Intn(20) - 10)) + + // Change method if sendGetBodyAs is specified. + if method == "GET" && body != nil && sendGetBodyAs != "GET" { + method = sendGetBodyAs + } + + for { + pathWithParams := path + if len(params) > 0 { + pathWithParams += "?" + params.Encode() + } + + // Get a connection + conn, err = c.next() + if err == ErrNoClient { + if !retried { + // Force a healtcheck as all connections seem to be dead. + c.healthcheck(timeout, false) + } + retries -= 1 + if retries <= 0 { + return nil, err + } + retried = true + time.Sleep(time.Duration(retryWaitMsec) * time.Millisecond) + retryWaitMsec += retryWaitMsec + continue // try again + } + if err != nil { + c.errorf("elastic: cannot get connection from pool") + return nil, err + } + + req, err = NewRequest(method, conn.URL()+pathWithParams) + if err != nil { + c.errorf("elastic: cannot create request for %s %s: %v", strings.ToUpper(method), conn.URL()+pathWithParams, err) + return nil, err + } + + if basicAuth { + req.SetBasicAuth(basicAuthUsername, basicAuthPassword) + } + + // Set body + if body != nil { + err = req.SetBody(body, gzipEnabled) + if err != nil { + c.errorf("elastic: couldn't set body %+v for request: %v", body, err) + return nil, err + } + } + + // Tracing + c.dumpRequest((*http.Request)(req)) + + // Get response + res, err := c.c.Do((*http.Request)(req)) + if err != nil { + retries -= 1 + if retries <= 0 { + c.errorf("elastic: %s is dead", conn.URL()) + conn.MarkAsDead() + return nil, err + } + retried = true + time.Sleep(time.Duration(retryWaitMsec) * time.Millisecond) + retryWaitMsec += retryWaitMsec + continue // try again + } + if res.Body != nil { + defer res.Body.Close() + } + + // Check for errors + if err := checkResponse((*http.Request)(req), res, ignoreErrors...); err != nil { + // No retry if request succeeded + return nil, err + } + + // Tracing + c.dumpResponse(res) + + // We successfully made a request with this connection + conn.MarkAsHealthy() + + resp, err = c.newResponse(res) + if err != nil { + return nil, err + } + + break + } + + duration := time.Now().UTC().Sub(start) + c.infof("%s %s [status:%d, request:%.3fs]", + strings.ToUpper(method), + req.URL, + resp.StatusCode, + float64(int64(duration/time.Millisecond))/1000) + + return resp, nil +} + +// -- Document APIs -- + +// Index a document. +func (c *Client) Index() *IndexService { + return NewIndexService(c) +} + +// Get a document. +func (c *Client) Get() *GetService { + return NewGetService(c) +} + +// MultiGet retrieves multiple documents in one roundtrip. +func (c *Client) MultiGet() *MgetService { + return NewMgetService(c) +} + +// Mget retrieves multiple documents in one roundtrip. +func (c *Client) Mget() *MgetService { + return NewMgetService(c) +} + +// Delete a document. +func (c *Client) Delete() *DeleteService { + return NewDeleteService(c) +} + +// DeleteByQuery deletes documents as found by a query. +func (c *Client) DeleteByQuery(indices ...string) *DeleteByQueryService { + return NewDeleteByQueryService(c).Index(indices...) +} + +// Update a document. +func (c *Client) Update() *UpdateService { + return NewUpdateService(c) +} + +// Bulk is the entry point to mass insert/update/delete documents. +func (c *Client) Bulk() *BulkService { + return NewBulkService(c) +} + +// BulkProcessor allows setting up a concurrent processor of bulk requests. +func (c *Client) BulkProcessor() *BulkProcessorService { + return NewBulkProcessorService(c) +} + +// TODO Term Vectors +// TODO Multi termvectors API + +// -- Search APIs -- + +// Search is the entry point for searches. +func (c *Client) Search(indices ...string) *SearchService { + return NewSearchService(c).Index(indices...) +} + +// Suggest returns a service to return suggestions. +func (c *Client) Suggest(indices ...string) *SuggestService { + return NewSuggestService(c).Index(indices...) +} + +// MultiSearch is the entry point for multi searches. +func (c *Client) MultiSearch() *MultiSearchService { + return NewMultiSearchService(c) +} + +// Count documents. +func (c *Client) Count(indices ...string) *CountService { + return NewCountService(c).Index(indices...) +} + +// Explain computes a score explanation for a query and a specific document. +func (c *Client) Explain(index, typ, id string) *ExplainService { + return NewExplainService(c).Index(index).Type(typ).Id(id) +} + +// Percolate allows to send a document and return matching queries. +// See http://www.elastic.co/guide/en/elasticsearch/reference/current/search-percolate.html. +func (c *Client) Percolate() *PercolateService { + return NewPercolateService(c) +} + +// TODO Search Template +// TODO Search Shards API +// TODO Search Exists API +// TODO Validate API +// TODO Field Stats API + +// Exists checks if a document exists. +func (c *Client) Exists() *ExistsService { + return NewExistsService(c) +} + +// Scan through documents. Use this to iterate inside a server process +// where the results will be processed without returning them to a client. +func (c *Client) Scan(indices ...string) *ScanService { + return NewScanService(c).Index(indices...) +} + +// Scroll through documents. Use this to efficiently scroll through results +// while returning the results to a client. Use Scan when you don't need +// to return requests to a client (i.e. not paginating via request/response). +func (c *Client) Scroll(indices ...string) *ScrollService { + return NewScrollService(c).Index(indices...) +} + +// ClearScroll can be used to clear search contexts manually. +func (c *Client) ClearScroll(scrollIds ...string) *ClearScrollService { + return NewClearScrollService(c).ScrollId(scrollIds...) +} + +// -- Indices APIs -- + +// CreateIndex returns a service to create a new index. +func (c *Client) CreateIndex(name string) *IndicesCreateService { + return NewIndicesCreateService(c).Index(name) +} + +// DeleteIndex returns a service to delete an index. +func (c *Client) DeleteIndex(indices ...string) *IndicesDeleteService { + return NewIndicesDeleteService(c).Index(indices) +} + +// IndexExists allows to check if an index exists. +func (c *Client) IndexExists(indices ...string) *IndicesExistsService { + return NewIndicesExistsService(c).Index(indices) +} + +// TypeExists allows to check if one or more types exist in one or more indices. +func (c *Client) TypeExists() *IndicesExistsTypeService { + return NewIndicesExistsTypeService(c) +} + +// IndexStats provides statistics on different operations happining +// in one or more indices. +func (c *Client) IndexStats(indices ...string) *IndicesStatsService { + return NewIndicesStatsService(c).Index(indices...) +} + +// OpenIndex opens an index. +func (c *Client) OpenIndex(name string) *IndicesOpenService { + return NewIndicesOpenService(c).Index(name) +} + +// CloseIndex closes an index. +func (c *Client) CloseIndex(name string) *IndicesCloseService { + return NewIndicesCloseService(c).Index(name) +} + +// IndexGet retrieves information about one or more indices. +// IndexGet is only available for Elasticsearch 1.4 or later. +func (c *Client) IndexGet(indices ...string) *IndicesGetService { + return NewIndicesGetService(c).Index(indices...) +} + +// IndexGetSettings retrieves settings of all, one or more indices. +func (c *Client) IndexGetSettings(indices ...string) *IndicesGetSettingsService { + return NewIndicesGetSettingsService(c).Index(indices...) +} + +// IndexPutSettings sets settings for all, one or more indices. +func (c *Client) IndexPutSettings(indices ...string) *IndicesPutSettingsService { + return NewIndicesPutSettingsService(c).Index(indices...) +} + +// Optimize asks Elasticsearch to optimize one or more indices. +// Optimize is deprecated as of Elasticsearch 2.1 and replaced by Forcemerge. +func (c *Client) Optimize(indices ...string) *OptimizeService { + return NewOptimizeService(c).Index(indices...) +} + +// Forcemerge optimizes one or more indices. +// It replaces the deprecated Optimize API. +func (c *Client) Forcemerge(indices ...string) *IndicesForcemergeService { + return NewIndicesForcemergeService(c).Index(indices...) +} + +// Refresh asks Elasticsearch to refresh one or more indices. +func (c *Client) Refresh(indices ...string) *RefreshService { + return NewRefreshService(c).Index(indices...) +} + +// Flush asks Elasticsearch to free memory from the index and +// flush data to disk. +func (c *Client) Flush(indices ...string) *IndicesFlushService { + return NewIndicesFlushService(c).Index(indices...) +} + +// Alias enables the caller to add and/or remove aliases. +func (c *Client) Alias() *AliasService { + return NewAliasService(c) +} + +// Aliases returns aliases by index name(s). +func (c *Client) Aliases() *AliasesService { + return NewAliasesService(c) +} + +// GetTemplate gets a search template. +// Use IndexXXXTemplate funcs to manage index templates. +func (c *Client) GetTemplate() *GetTemplateService { + return NewGetTemplateService(c) +} + +// PutTemplate creates or updates a search template. +// Use IndexXXXTemplate funcs to manage index templates. +func (c *Client) PutTemplate() *PutTemplateService { + return NewPutTemplateService(c) +} + +// DeleteTemplate deletes a search template. +// Use IndexXXXTemplate funcs to manage index templates. +func (c *Client) DeleteTemplate() *DeleteTemplateService { + return NewDeleteTemplateService(c) +} + +// IndexGetTemplate gets an index template. +// Use XXXTemplate funcs to manage search templates. +func (c *Client) IndexGetTemplate(names ...string) *IndicesGetTemplateService { + return NewIndicesGetTemplateService(c).Name(names...) +} + +// IndexTemplateExists gets check if an index template exists. +// Use XXXTemplate funcs to manage search templates. +func (c *Client) IndexTemplateExists(name string) *IndicesExistsTemplateService { + return NewIndicesExistsTemplateService(c).Name(name) +} + +// IndexPutTemplate creates or updates an index template. +// Use XXXTemplate funcs to manage search templates. +func (c *Client) IndexPutTemplate(name string) *IndicesPutTemplateService { + return NewIndicesPutTemplateService(c).Name(name) +} + +// IndexDeleteTemplate deletes an index template. +// Use XXXTemplate funcs to manage search templates. +func (c *Client) IndexDeleteTemplate(name string) *IndicesDeleteTemplateService { + return NewIndicesDeleteTemplateService(c).Name(name) +} + +// GetMapping gets a mapping. +func (c *Client) GetMapping() *IndicesGetMappingService { + return NewIndicesGetMappingService(c) +} + +// PutMapping registers a mapping. +func (c *Client) PutMapping() *IndicesPutMappingService { + return NewIndicesPutMappingService(c) +} + +// GetWarmer gets one or more warmers by name. +func (c *Client) GetWarmer() *IndicesGetWarmerService { + return NewIndicesGetWarmerService(c) +} + +// PutWarmer registers a warmer. +func (c *Client) PutWarmer() *IndicesPutWarmerService { + return NewIndicesPutWarmerService(c) +} + +// DeleteWarmer deletes one or more warmers. +func (c *Client) DeleteWarmer() *IndicesDeleteWarmerService { + return NewIndicesDeleteWarmerService(c) +} + +// -- cat APIs -- + +// TODO cat aliases +// TODO cat allocation +// TODO cat count +// TODO cat fielddata +// TODO cat health +// TODO cat indices +// TODO cat master +// TODO cat nodes +// TODO cat pending tasks +// TODO cat plugins +// TODO cat recovery +// TODO cat thread pool +// TODO cat shards +// TODO cat segments + +// -- Cluster APIs -- + +// ClusterHealth retrieves the health of the cluster. +func (c *Client) ClusterHealth() *ClusterHealthService { + return NewClusterHealthService(c) +} + +// ClusterState retrieves the state of the cluster. +func (c *Client) ClusterState() *ClusterStateService { + return NewClusterStateService(c) +} + +// ClusterStats retrieves cluster statistics. +func (c *Client) ClusterStats() *ClusterStatsService { + return NewClusterStatsService(c) +} + +// NodesInfo retrieves one or more or all of the cluster nodes information. +func (c *Client) NodesInfo() *NodesInfoService { + return NewNodesInfoService(c) +} + +// TODO Pending cluster tasks +// TODO Cluster Reroute +// TODO Cluster Update Settings +// TODO Nodes Stats +// TODO Nodes hot_threads + +// -- Snapshot and Restore -- + +// TODO Snapshot Create +// TODO Snapshot Create Repository +// TODO Snapshot Delete +// TODO Snapshot Delete Repository +// TODO Snapshot Get +// TODO Snapshot Get Repository +// TODO Snapshot Restore +// TODO Snapshot Status +// TODO Snapshot Verify Repository + +// -- Helpers and shortcuts -- + +// ElasticsearchVersion returns the version number of Elasticsearch +// running on the given URL. +func (c *Client) ElasticsearchVersion(url string) (string, error) { + res, _, err := c.Ping(url).Do() + if err != nil { + return "", err + } + return res.Version.Number, nil +} + +// IndexNames returns the names of all indices in the cluster. +func (c *Client) IndexNames() ([]string, error) { + res, err := c.IndexGetSettings().Index("_all").Do() + if err != nil { + return nil, err + } + var names []string + for name, _ := range res { + names = append(names, name) + } + return names, nil +} + +// Ping checks if a given node in a cluster exists and (optionally) +// returns some basic information about the Elasticsearch server, +// e.g. the Elasticsearch version number. +// +// Notice that you need to specify a URL here explicitly. +func (c *Client) Ping(url string) *PingService { + return NewPingService(c).URL(url) +} + +// Reindex returns a service that will reindex documents from a source +// index into a target index. See +// http://www.elastic.co/guide/en/elasticsearch/guide/current/reindex.html +// for more information about reindexing. +func (c *Client) Reindex(sourceIndex, targetIndex string) *Reindexer { + return NewReindexer(c, sourceIndex, CopyToTargetIndex(targetIndex)) +} + +// WaitForStatus waits for the cluster to have the given status. +// This is a shortcut method for the ClusterHealth service. +// +// WaitForStatus waits for the specified timeout, e.g. "10s". +// If the cluster will have the given state within the timeout, nil is returned. +// If the request timed out, ErrTimeout is returned. +func (c *Client) WaitForStatus(status string, timeout string) error { + health, err := c.ClusterHealth().WaitForStatus(status).Timeout(timeout).Do() + if err != nil { + return err + } + if health.TimedOut { + return ErrTimeout + } + return nil +} + +// WaitForGreenStatus waits for the cluster to have the "green" status. +// See WaitForStatus for more details. +func (c *Client) WaitForGreenStatus(timeout string) error { + return c.WaitForStatus("green", timeout) +} + +// WaitForYellowStatus waits for the cluster to have the "yellow" status. +// See WaitForStatus for more details. +func (c *Client) WaitForYellowStatus(timeout string) error { + return c.WaitForStatus("yellow", timeout) +} + +// TermVectors returns information and statistics on terms in the fields +// of a particular document. +func (c *Client) TermVectors(index, typ string) *TermvectorsService { + builder := NewTermvectorsService(c) + builder = builder.Index(index).Type(typ) + return builder +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/client_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/client_test.go new file mode 100644 index 000000000..7bdcd2287 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/client_test.go @@ -0,0 +1,899 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "log" + "net/http" + "regexp" + "strings" + "testing" + "time" +) + +func findConn(s string, slice ...*conn) (int, bool) { + for i, t := range slice { + if s == t.URL() { + return i, true + } + } + return -1, false +} + +// -- NewClient -- + +func TestClientDefaults(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + if client.healthcheckEnabled != true { + t.Errorf("expected health checks to be enabled, got: %v", client.healthcheckEnabled) + } + if client.healthcheckTimeoutStartup != DefaultHealthcheckTimeoutStartup { + t.Errorf("expected health checks timeout on startup = %v, got: %v", DefaultHealthcheckTimeoutStartup, client.healthcheckTimeoutStartup) + } + if client.healthcheckTimeout != DefaultHealthcheckTimeout { + t.Errorf("expected health checks timeout = %v, got: %v", DefaultHealthcheckTimeout, client.healthcheckTimeout) + } + if client.healthcheckInterval != DefaultHealthcheckInterval { + t.Errorf("expected health checks interval = %v, got: %v", DefaultHealthcheckInterval, client.healthcheckInterval) + } + if client.snifferEnabled != true { + t.Errorf("expected sniffing to be enabled, got: %v", client.snifferEnabled) + } + if client.snifferTimeoutStartup != DefaultSnifferTimeoutStartup { + t.Errorf("expected sniffer timeout on startup = %v, got: %v", DefaultSnifferTimeoutStartup, client.snifferTimeoutStartup) + } + if client.snifferTimeout != DefaultSnifferTimeout { + t.Errorf("expected sniffer timeout = %v, got: %v", DefaultSnifferTimeout, client.snifferTimeout) + } + if client.snifferInterval != DefaultSnifferInterval { + t.Errorf("expected sniffer interval = %v, got: %v", DefaultSnifferInterval, client.snifferInterval) + } + if client.basicAuth != false { + t.Errorf("expected no basic auth; got: %v", client.basicAuth) + } + if client.basicAuthUsername != "" { + t.Errorf("expected no basic auth username; got: %q", client.basicAuthUsername) + } + if client.basicAuthPassword != "" { + t.Errorf("expected no basic auth password; got: %q", client.basicAuthUsername) + } + if client.sendGetBodyAs != "GET" { + t.Errorf("expected sendGetBodyAs to be GET; got: %q", client.sendGetBodyAs) + } +} + +func TestClientWithoutURL(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + // Two things should happen here: + // 1. The client starts sniffing the cluster on DefaultURL + // 2. The sniffing process should find (at least) one node in the cluster, i.e. the DefaultURL + if len(client.conns) == 0 { + t.Fatalf("expected at least 1 node in the cluster, got: %d (%v)", len(client.conns), client.conns) + } + if !isTravis() { + if _, found := findConn(DefaultURL, client.conns...); !found { + t.Errorf("expected to find node with default URL of %s in %v", DefaultURL, client.conns) + } + } +} + +func TestClientWithSingleURL(t *testing.T) { + client, err := NewClient(SetURL("http://127.0.0.1:9200")) + if err != nil { + t.Fatal(err) + } + // Two things should happen here: + // 1. The client starts sniffing the cluster on DefaultURL + // 2. The sniffing process should find (at least) one node in the cluster, i.e. the DefaultURL + if len(client.conns) == 0 { + t.Fatalf("expected at least 1 node in the cluster, got: %d (%v)", len(client.conns), client.conns) + } + if !isTravis() { + if _, found := findConn(DefaultURL, client.conns...); !found { + t.Errorf("expected to find node with default URL of %s in %v", DefaultURL, client.conns) + } + } +} + +func TestClientWithMultipleURLs(t *testing.T) { + client, err := NewClient(SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201")) + if err != nil { + t.Fatal(err) + } + // The client should sniff both URLs, but only 127.0.0.1:9200 should return nodes. + if len(client.conns) != 1 { + t.Fatalf("expected exactly 1 node in the local cluster, got: %d (%v)", len(client.conns), client.conns) + } + if !isTravis() { + if client.conns[0].URL() != DefaultURL { + t.Errorf("expected to find node with default URL of %s in %v", DefaultURL, client.conns) + } + } +} + +func TestClientWithBasicAuth(t *testing.T) { + client, err := NewClient(SetBasicAuth("user", "secret")) + if err != nil { + t.Fatal(err) + } + if client.basicAuth != true { + t.Errorf("expected basic auth; got: %v", client.basicAuth) + } + if got, want := client.basicAuthUsername, "user"; got != want { + t.Errorf("expected basic auth username %q; got: %q", want, got) + } + if got, want := client.basicAuthPassword, "secret"; got != want { + t.Errorf("expected basic auth password %q; got: %q", want, got) + } +} + +func TestClientSniffSuccess(t *testing.T) { + client, err := NewClient(SetURL("http://127.0.0.1:19200", "http://127.0.0.1:9200")) + if err != nil { + t.Fatal(err) + } + // The client should sniff both URLs, but only 127.0.0.1:9200 should return nodes. + if len(client.conns) != 1 { + t.Fatalf("expected exactly 1 node in the local cluster, got: %d (%v)", len(client.conns), client.conns) + } +} + +func TestClientSniffFailure(t *testing.T) { + _, err := NewClient(SetURL("http://127.0.0.1:19200", "http://127.0.0.1:19201")) + if err == nil { + t.Fatalf("expected cluster to fail with no nodes found") + } +} + +func TestClientSniffDisabled(t *testing.T) { + client, err := NewClient(SetSniff(false), SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201")) + if err != nil { + t.Fatal(err) + } + // The client should not sniff, so it should have two connections. + if len(client.conns) != 2 { + t.Fatalf("expected 2 nodes, got: %d (%v)", len(client.conns), client.conns) + } + // Make two requests, so that both connections are being used + for i := 0; i < len(client.conns); i++ { + client.Flush().Do() + } + // The first connection (127.0.0.1:9200) should now be okay. + if i, found := findConn("http://127.0.0.1:9200", client.conns...); !found { + t.Fatalf("expected connection to %q to be found", "http://127.0.0.1:9200") + } else { + if conn := client.conns[i]; conn.IsDead() { + t.Fatal("expected connection to be alive, but it is dead") + } + } + // The second connection (127.0.0.1:9201) should now be marked as dead. + if i, found := findConn("http://127.0.0.1:9201", client.conns...); !found { + t.Fatalf("expected connection to %q to be found", "http://127.0.0.1:9201") + } else { + if conn := client.conns[i]; !conn.IsDead() { + t.Fatal("expected connection to be dead, but it is alive") + } + } +} + +func TestClientWillMarkConnectionsAsAliveWhenAllAreDead(t *testing.T) { + client, err := NewClient(SetURL("http://127.0.0.1:9201"), + SetSniff(false), SetHealthcheck(false), SetMaxRetries(0)) + if err != nil { + t.Fatal(err) + } + // We should have a connection. + if len(client.conns) != 1 { + t.Fatalf("expected 1 node, got: %d (%v)", len(client.conns), client.conns) + } + + // Make a request, so that the connections is marked as dead. + client.Flush().Do() + + // The connection should now be marked as dead. + if i, found := findConn("http://127.0.0.1:9201", client.conns...); !found { + t.Fatalf("expected connection to %q to be found", "http://127.0.0.1:9201") + } else { + if conn := client.conns[i]; !conn.IsDead() { + t.Fatalf("expected connection to be dead, got: %v", conn) + } + } + + // Now send another request and the connection should be marked as alive again. + client.Flush().Do() + + if i, found := findConn("http://127.0.0.1:9201", client.conns...); !found { + t.Fatalf("expected connection to %q to be found", "http://127.0.0.1:9201") + } else { + if conn := client.conns[i]; conn.IsDead() { + t.Fatalf("expected connection to be alive, got: %v", conn) + } + } +} + +func TestClientWithRequiredPlugins(t *testing.T) { + _, err := NewClient(SetRequiredPlugins("no-such-plugin")) + if err == nil { + t.Fatal("expected error when creating client") + } + if got, want := err.Error(), "elastic: plugin no-such-plugin not found"; got != want { + t.Fatalf("expected error %q; got: %q", want, got) + } +} + +func TestClientHealthcheckStartupTimeout(t *testing.T) { + start := time.Now() + _, err := NewClient(SetURL("http://localhost:9299"), SetHealthcheckTimeoutStartup(5*time.Second)) + duration := time.Now().Sub(start) + if err != ErrNoClient { + t.Fatal(err) + } + if duration < 5*time.Second { + t.Fatalf("expected a timeout in more than 5 seconds; got: %v", duration) + } +} + +// -- NewSimpleClient -- + +func TestSimpleClientDefaults(t *testing.T) { + client, err := NewSimpleClient() + if err != nil { + t.Fatal(err) + } + if client.healthcheckEnabled != false { + t.Errorf("expected health checks to be disabled, got: %v", client.healthcheckEnabled) + } + if client.healthcheckTimeoutStartup != off { + t.Errorf("expected health checks timeout on startup = %v, got: %v", off, client.healthcheckTimeoutStartup) + } + if client.healthcheckTimeout != off { + t.Errorf("expected health checks timeout = %v, got: %v", off, client.healthcheckTimeout) + } + if client.healthcheckInterval != off { + t.Errorf("expected health checks interval = %v, got: %v", off, client.healthcheckInterval) + } + if client.snifferEnabled != false { + t.Errorf("expected sniffing to be disabled, got: %v", client.snifferEnabled) + } + if client.snifferTimeoutStartup != off { + t.Errorf("expected sniffer timeout on startup = %v, got: %v", off, client.snifferTimeoutStartup) + } + if client.snifferTimeout != off { + t.Errorf("expected sniffer timeout = %v, got: %v", off, client.snifferTimeout) + } + if client.snifferInterval != off { + t.Errorf("expected sniffer interval = %v, got: %v", off, client.snifferInterval) + } + if client.basicAuth != false { + t.Errorf("expected no basic auth; got: %v", client.basicAuth) + } + if client.basicAuthUsername != "" { + t.Errorf("expected no basic auth username; got: %q", client.basicAuthUsername) + } + if client.basicAuthPassword != "" { + t.Errorf("expected no basic auth password; got: %q", client.basicAuthUsername) + } + if client.sendGetBodyAs != "GET" { + t.Errorf("expected sendGetBodyAs to be GET; got: %q", client.sendGetBodyAs) + } +} + +// -- Start and stop -- + +func TestClientStartAndStop(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + + running := client.IsRunning() + if !running { + t.Fatalf("expected background processes to run; got: %v", running) + } + + // Stop + client.Stop() + running = client.IsRunning() + if running { + t.Fatalf("expected background processes to be stopped; got: %v", running) + } + + // Stop again => no-op + client.Stop() + running = client.IsRunning() + if running { + t.Fatalf("expected background processes to be stopped; got: %v", running) + } + + // Start + client.Start() + running = client.IsRunning() + if !running { + t.Fatalf("expected background processes to run; got: %v", running) + } + + // Start again => no-op + client.Start() + running = client.IsRunning() + if !running { + t.Fatalf("expected background processes to run; got: %v", running) + } +} + +func TestClientStartAndStopWithSnifferAndHealthchecksDisabled(t *testing.T) { + client, err := NewClient(SetSniff(false), SetHealthcheck(false)) + if err != nil { + t.Fatal(err) + } + + running := client.IsRunning() + if !running { + t.Fatalf("expected background processes to run; got: %v", running) + } + + // Stop + client.Stop() + running = client.IsRunning() + if running { + t.Fatalf("expected background processes to be stopped; got: %v", running) + } + + // Stop again => no-op + client.Stop() + running = client.IsRunning() + if running { + t.Fatalf("expected background processes to be stopped; got: %v", running) + } + + // Start + client.Start() + running = client.IsRunning() + if !running { + t.Fatalf("expected background processes to run; got: %v", running) + } + + // Start again => no-op + client.Start() + running = client.IsRunning() + if !running { + t.Fatalf("expected background processes to run; got: %v", running) + } +} + +// -- Sniffing -- + +func TestClientSniffNode(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + + ch := make(chan []*conn) + go func() { ch <- client.sniffNode(DefaultURL) }() + + select { + case nodes := <-ch: + if len(nodes) != 1 { + t.Fatalf("expected %d nodes; got: %d", 1, len(nodes)) + } + pattern := `http:\/\/[\d\.]+:9200` + matched, err := regexp.MatchString(pattern, nodes[0].URL()) + if err != nil { + t.Fatal(err) + } + if !matched { + t.Fatalf("expected node URL pattern %q; got: %q", pattern, nodes[0].URL()) + } + case <-time.After(2 * time.Second): + t.Fatal("expected no timeout in sniff node") + break + } +} + +func TestClientSniffOnDefaultURL(t *testing.T) { + client, _ := NewClient() + if client == nil { + t.Fatal("no client returned") + } + + ch := make(chan error, 1) + go func() { + ch <- client.sniff(DefaultSnifferTimeoutStartup) + }() + + select { + case err := <-ch: + if err != nil { + t.Fatalf("expected sniff to succeed; got: %v", err) + } + if len(client.conns) != 1 { + t.Fatalf("expected %d nodes; got: %d", 1, len(client.conns)) + } + pattern := `http:\/\/[\d\.]+:9200` + matched, err := regexp.MatchString(pattern, client.conns[0].URL()) + if err != nil { + t.Fatal(err) + } + if !matched { + t.Fatalf("expected node URL pattern %q; got: %q", pattern, client.conns[0].URL()) + } + case <-time.After(2 * time.Second): + t.Fatal("expected no timeout in sniff") + break + } +} + +// -- Selector -- + +func TestClientSelectConnHealthy(t *testing.T) { + client, err := NewClient( + SetSniff(false), + SetHealthcheck(false), + SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201")) + if err != nil { + t.Fatal(err) + } + + // Both are healthy, so we should get both URLs in round-robin + client.conns[0].MarkAsHealthy() + client.conns[1].MarkAsHealthy() + + // #1: Return 1st + c, err := client.next() + if err != nil { + t.Fatal(err) + } + if c.URL() != client.conns[0].URL() { + t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL()) + } + // #2: Return 2nd + c, err = client.next() + if err != nil { + t.Fatal(err) + } + if c.URL() != client.conns[1].URL() { + t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL()) + } + // #3: Return 1st + c, err = client.next() + if err != nil { + t.Fatal(err) + } + if c.URL() != client.conns[0].URL() { + t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL()) + } +} + +func TestClientSelectConnHealthyAndDead(t *testing.T) { + client, err := NewClient( + SetSniff(false), + SetHealthcheck(false), + SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201")) + if err != nil { + t.Fatal(err) + } + + // 1st is healthy, second is dead + client.conns[0].MarkAsHealthy() + client.conns[1].MarkAsDead() + + // #1: Return 1st + c, err := client.next() + if err != nil { + t.Fatal(err) + } + if c.URL() != client.conns[0].URL() { + t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL()) + } + // #2: Return 1st again + c, err = client.next() + if err != nil { + t.Fatal(err) + } + if c.URL() != client.conns[0].URL() { + t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL()) + } + // #3: Return 1st again and again + c, err = client.next() + if err != nil { + t.Fatal(err) + } + if c.URL() != client.conns[0].URL() { + t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL()) + } +} + +func TestClientSelectConnDeadAndHealthy(t *testing.T) { + client, err := NewClient( + SetSniff(false), + SetHealthcheck(false), + SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201")) + if err != nil { + t.Fatal(err) + } + + // 1st is dead, 2nd is healthy + client.conns[0].MarkAsDead() + client.conns[1].MarkAsHealthy() + + // #1: Return 2nd + c, err := client.next() + if err != nil { + t.Fatal(err) + } + if c.URL() != client.conns[1].URL() { + t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL()) + } + // #2: Return 2nd again + c, err = client.next() + if err != nil { + t.Fatal(err) + } + if c.URL() != client.conns[1].URL() { + t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL()) + } + // #3: Return 2nd again and again + c, err = client.next() + if err != nil { + t.Fatal(err) + } + if c.URL() != client.conns[1].URL() { + t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL()) + } +} + +func TestClientSelectConnAllDead(t *testing.T) { + client, err := NewClient( + SetSniff(false), + SetHealthcheck(false), + SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201")) + if err != nil { + t.Fatal(err) + } + + // Both are dead + client.conns[0].MarkAsDead() + client.conns[1].MarkAsDead() + + // If all connections are dead, next should make them alive again, but + // still return ErrNoClient when it first finds out. + c, err := client.next() + if err != ErrNoClient { + t.Fatal(err) + } + if c != nil { + t.Fatalf("expected no connection; got: %v", c) + } + // Return a connection + c, err = client.next() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if c == nil { + t.Fatalf("expected connection; got: %v", c) + } + // Return a connection + c, err = client.next() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if c == nil { + t.Fatalf("expected connection; got: %v", c) + } +} + +// -- ElasticsearchVersion -- + +func TestElasticsearchVersion(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + version, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + if version == "" { + t.Errorf("expected a version number, got: %q", version) + } +} + +// -- IndexNames -- + +func TestIndexNames(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + names, err := client.IndexNames() + if err != nil { + t.Fatal(err) + } + if len(names) == 0 { + t.Fatalf("expected some index names, got: %d", len(names)) + } + var found bool + for _, name := range names { + if name == testIndexName { + found = true + break + } + } + if !found { + t.Fatalf("expected to find index %q; got: %v", testIndexName, found) + } +} + +// -- PerformRequest -- + +func TestPerformRequest(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + res, err := client.PerformRequest("GET", "/", nil, nil) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected response to be != nil") + } + + ret := new(PingResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + t.Fatalf("expected no error on decode; got: %v", err) + } + if ret.ClusterName == "" { + t.Errorf("expected cluster name; got: %q", ret.ClusterName) + } +} + +func TestPerformRequestWithSimpleClient(t *testing.T) { + client, err := NewSimpleClient() + if err != nil { + t.Fatal(err) + } + res, err := client.PerformRequest("GET", "/", nil, nil) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected response to be != nil") + } + + ret := new(PingResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + t.Fatalf("expected no error on decode; got: %v", err) + } + if ret.ClusterName == "" { + t.Errorf("expected cluster name; got: %q", ret.ClusterName) + } +} + +func TestPerformRequestWithLogger(t *testing.T) { + var w bytes.Buffer + out := log.New(&w, "LOGGER ", log.LstdFlags) + + client, err := NewClient(SetInfoLog(out)) + if err != nil { + t.Fatal(err) + } + + res, err := client.PerformRequest("GET", "/", nil, nil) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected response to be != nil") + } + + ret := new(PingResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + t.Fatalf("expected no error on decode; got: %v", err) + } + if ret.ClusterName == "" { + t.Errorf("expected cluster name; got: %q", ret.ClusterName) + } + + got := w.String() + pattern := `^LOGGER \d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} GET http://.*/ \[status:200, request:\d+\.\d{3}s\]\n` + matched, err := regexp.MatchString(pattern, got) + if err != nil { + t.Fatalf("expected log line to match %q; got: %v", pattern, err) + } + if !matched { + t.Errorf("expected log line to match %q; got: %v", pattern, got) + } +} + +func TestPerformRequestWithLoggerAndTracer(t *testing.T) { + var lw bytes.Buffer + lout := log.New(&lw, "LOGGER ", log.LstdFlags) + + var tw bytes.Buffer + tout := log.New(&tw, "TRACER ", log.LstdFlags) + + client, err := NewClient(SetInfoLog(lout), SetTraceLog(tout)) + if err != nil { + t.Fatal(err) + } + + res, err := client.PerformRequest("GET", "/", nil, nil) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected response to be != nil") + } + + ret := new(PingResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + t.Fatalf("expected no error on decode; got: %v", err) + } + if ret.ClusterName == "" { + t.Errorf("expected cluster name; got: %q", ret.ClusterName) + } + + lgot := lw.String() + if lgot == "" { + t.Errorf("expected logger output; got: %q", lgot) + } + + tgot := tw.String() + if tgot == "" { + t.Errorf("expected tracer output; got: %q", tgot) + } +} + +type customLogger struct { + out bytes.Buffer +} + +func (l *customLogger) Printf(format string, v ...interface{}) { + l.out.WriteString(fmt.Sprintf(format, v...) + "\n") +} + +func TestPerformRequestWithCustomLogger(t *testing.T) { + logger := &customLogger{} + + client, err := NewClient(SetInfoLog(logger)) + if err != nil { + t.Fatal(err) + } + + res, err := client.PerformRequest("GET", "/", nil, nil) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected response to be != nil") + } + + ret := new(PingResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + t.Fatalf("expected no error on decode; got: %v", err) + } + if ret.ClusterName == "" { + t.Errorf("expected cluster name; got: %q", ret.ClusterName) + } + + got := logger.out.String() + pattern := `^GET http://.*/ \[status:200, request:\d+\.\d{3}s\]\n` + matched, err := regexp.MatchString(pattern, got) + if err != nil { + t.Fatalf("expected log line to match %q; got: %v", pattern, err) + } + if !matched { + t.Errorf("expected log line to match %q; got: %v", pattern, got) + } +} + +// failingTransport will run a fail callback if it sees a given URL path prefix. +type failingTransport struct { + path string // path prefix to look for + fail func(*http.Request) (*http.Response, error) // call when path prefix is found + next http.RoundTripper // next round-tripper (use http.DefaultTransport if nil) +} + +// RoundTrip implements a failing transport. +func (tr *failingTransport) RoundTrip(r *http.Request) (*http.Response, error) { + if strings.HasPrefix(r.URL.Path, tr.path) && tr.fail != nil { + return tr.fail(r) + } + if tr.next != nil { + return tr.next.RoundTrip(r) + } + return http.DefaultTransport.RoundTrip(r) +} + +func TestPerformRequestRetryOnHttpError(t *testing.T) { + var numFailedReqs int + fail := func(r *http.Request) (*http.Response, error) { + numFailedReqs += 1 + //return &http.Response{Request: r, StatusCode: 400}, nil + return nil, errors.New("request failed") + } + + // Run against a failing endpoint and see if PerformRequest + // retries correctly. + tr := &failingTransport{path: "/fail", fail: fail} + httpClient := &http.Client{Transport: tr} + + client, err := NewClient(SetHttpClient(httpClient), SetMaxRetries(5)) + if err != nil { + t.Fatal(err) + } + + res, err := client.PerformRequest("GET", "/fail", nil, nil) + if err == nil { + t.Fatal("expected error") + } + if res != nil { + t.Fatal("expected no response") + } + // Connection should be marked as dead after it failed + if numFailedReqs != 5 { + t.Errorf("expected %d failed requests; got: %d", 5, numFailedReqs) + } +} + +func TestPerformRequestNoRetryOnValidButUnsuccessfulHttpStatus(t *testing.T) { + var numFailedReqs int + fail := func(r *http.Request) (*http.Response, error) { + numFailedReqs += 1 + return &http.Response{Request: r, StatusCode: 500}, nil + } + + // Run against a failing endpoint and see if PerformRequest + // retries correctly. + tr := &failingTransport{path: "/fail", fail: fail} + httpClient := &http.Client{Transport: tr} + + client, err := NewClient(SetHttpClient(httpClient), SetMaxRetries(5)) + if err != nil { + t.Fatal(err) + } + + res, err := client.PerformRequest("GET", "/fail", nil, nil) + if err == nil { + t.Fatal("expected error") + } + if res != nil { + t.Fatal("expected no response") + } + // Retry should not have triggered additional requests because + if numFailedReqs != 1 { + t.Errorf("expected %d failed requests; got: %d", 1, numFailedReqs) + } +} + +// failingBody will return an error when json.Marshal is called on it. +type failingBody struct{} + +// MarshalJSON implements the json.Marshaler interface and always returns an error. +func (fb failingBody) MarshalJSON() ([]byte, error) { + return nil, errors.New("failing to marshal") +} + +func TestPerformRequestWithSetBodyError(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + res, err := client.PerformRequest("GET", "/", nil, failingBody{}) + if err == nil { + t.Fatal("expected error") + } + if res != nil { + t.Fatal("expected no response") + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/cluster-test/Makefile b/services/templeton/vendor/src/github.com/olivere/elastic/cluster-test/Makefile new file mode 100644 index 000000000..cc6261db5 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/cluster-test/Makefile @@ -0,0 +1,16 @@ +.PHONY: build run-omega-cluster-test + +default: build + +build: + go build cluster-test.go + +run-omega-cluster-test: + go run -race cluster-test.go \ + -nodes=http://192.168.2.65:8200,http://192.168.2.64:8200 \ + -n=5 \ + -retries=5 \ + -sniff=true -sniffer=10s \ + -healthcheck=true -healthchecker=5s \ + -errorlog=errors.log + diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/cluster-test/README.md b/services/templeton/vendor/src/github.com/olivere/elastic/cluster-test/README.md new file mode 100644 index 000000000..f10748cc2 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/cluster-test/README.md @@ -0,0 +1,63 @@ +# Cluster Test + +This directory contains a program you can use to test a cluster. + +Here's how: + +First, install a cluster of Elasticsearch nodes. You can install them on +different computers, or start several nodes on a single machine. + +Build cluster-test by `go build cluster-test.go` (or build with `make`). + +Run `./cluster-test -h` to get a list of flags: + +```sh +$ ./cluster-test -h +Usage of ./cluster-test: + -errorlog="": error log file + -healthcheck=true: enable or disable healthchecks + -healthchecker=1m0s: healthcheck interval + -index="twitter": name of ES index to use + -infolog="": info log file + -n=5: number of goroutines that run searches + -nodes="": comma-separated list of ES URLs (e.g. 'http://192.168.2.10:9200,http://192.168.2.11:9200') + -retries=0: number of retries + -sniff=true: enable or disable sniffer + -sniffer=15m0s: sniffer interval + -tracelog="": trace log file +``` + +Example: + +```sh +$ ./cluster-test -nodes=http://127.0.0.1:9200,http://127.0.0.1:9201,http://127.0.0.1:9202 -n=5 -index=twitter -retries=5 -sniff=true -sniffer=10s -healthcheck=true -healthchecker=5s -errorlog=error.log +``` + +The above example will create an index and start some search jobs on the +cluster defined by http://127.0.0.1:9200, http://127.0.0.1:9201, +and http://127.0.0.1:9202. + +* It will create an index called `twitter` on the cluster (`-index=twitter`) +* It will run 5 search jobs in parallel (`-n=5`). +* It will retry failed requests 5 times (`-retries=5`). +* It will sniff the cluster periodically (`-sniff=true`). +* It will sniff the cluster every 10 seconds (`-sniffer=10s`). +* It will perform health checks periodically (`-healthcheck=true`). +* It will perform health checks on the nodes every 5 seconds (`-healthchecker=5s`). +* It will write an error log file (`-errorlog=error.log`). + +If you want to test Elastic with nodes going up and down, you can use a +chaos monkey script like this and run it on the nodes of your cluster: + +```sh +#!/bin/bash +while true +do + echo "Starting ES node" + elasticsearch -d -Xmx4g -Xms1g -Des.config=elasticsearch.yml -p es.pid + sleep `jot -r 1 10 300` # wait for 10-300s + echo "Stopping ES node" + kill -TERM `cat es.pid` + sleep `jot -r 1 10 60` # wait for 10-60s +done +``` diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/cluster-test/cluster-test.go b/services/templeton/vendor/src/github.com/olivere/elastic/cluster-test/cluster-test.go new file mode 100644 index 000000000..8880992ef --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/cluster-test/cluster-test.go @@ -0,0 +1,356 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package main + +import ( + "encoding/json" + "errors" + "flag" + "fmt" + "log" + "math/rand" + "os" + "runtime" + "strings" + "sync/atomic" + "time" + + "gopkg.in/olivere/elastic.v3" +) + +type Tweet struct { + User string `json:"user"` + Message string `json:"message"` + Retweets int `json:"retweets"` + Image string `json:"image,omitempty"` + Created time.Time `json:"created,omitempty"` + Tags []string `json:"tags,omitempty"` + Location string `json:"location,omitempty"` + Suggest *elastic.SuggestField `json:"suggest_field,omitempty"` +} + +var ( + nodes = flag.String("nodes", "", "comma-separated list of ES URLs (e.g. 'http://192.168.2.10:9200,http://192.168.2.11:9200')") + n = flag.Int("n", 5, "number of goroutines that run searches") + index = flag.String("index", "twitter", "name of ES index to use") + errorlogfile = flag.String("errorlog", "", "error log file") + infologfile = flag.String("infolog", "", "info log file") + tracelogfile = flag.String("tracelog", "", "trace log file") + retries = flag.Int("retries", elastic.DefaultMaxRetries, "number of retries") + sniff = flag.Bool("sniff", elastic.DefaultSnifferEnabled, "enable or disable sniffer") + sniffer = flag.Duration("sniffer", elastic.DefaultSnifferInterval, "sniffer interval") + healthcheck = flag.Bool("healthcheck", elastic.DefaultHealthcheckEnabled, "enable or disable healthchecks") + healthchecker = flag.Duration("healthchecker", elastic.DefaultHealthcheckInterval, "healthcheck interval") +) + +func main() { + flag.Parse() + + runtime.GOMAXPROCS(runtime.NumCPU()) + + if *nodes == "" { + log.Fatal("no nodes specified") + } + urls := strings.SplitN(*nodes, ",", -1) + + testcase, err := NewTestCase(*index, urls) + if err != nil { + log.Fatal(err) + } + + testcase.SetErrorLogFile(*errorlogfile) + testcase.SetInfoLogFile(*infologfile) + testcase.SetTraceLogFile(*tracelogfile) + testcase.SetMaxRetries(*retries) + testcase.SetHealthcheck(*healthcheck) + testcase.SetHealthcheckInterval(*healthchecker) + testcase.SetSniff(*sniff) + testcase.SetSnifferInterval(*sniffer) + + if err := testcase.Run(*n); err != nil { + log.Fatal(err) + } + + select {} +} + +type RunInfo struct { + Success bool +} + +type TestCase struct { + nodes []string + client *elastic.Client + runs int64 + failures int64 + runCh chan RunInfo + index string + errorlogfile string + infologfile string + tracelogfile string + maxRetries int + healthcheck bool + healthcheckInterval time.Duration + sniff bool + snifferInterval time.Duration +} + +func NewTestCase(index string, nodes []string) (*TestCase, error) { + if index == "" { + return nil, errors.New("no index name specified") + } + + return &TestCase{ + index: index, + nodes: nodes, + runCh: make(chan RunInfo), + }, nil +} + +func (t *TestCase) SetIndex(name string) { + t.index = name +} + +func (t *TestCase) SetErrorLogFile(name string) { + t.errorlogfile = name +} + +func (t *TestCase) SetInfoLogFile(name string) { + t.infologfile = name +} + +func (t *TestCase) SetTraceLogFile(name string) { + t.tracelogfile = name +} + +func (t *TestCase) SetMaxRetries(n int) { + t.maxRetries = n +} + +func (t *TestCase) SetSniff(enabled bool) { + t.sniff = enabled +} + +func (t *TestCase) SetSnifferInterval(d time.Duration) { + t.snifferInterval = d +} + +func (t *TestCase) SetHealthcheck(enabled bool) { + t.healthcheck = enabled +} + +func (t *TestCase) SetHealthcheckInterval(d time.Duration) { + t.healthcheckInterval = d +} + +func (t *TestCase) Run(n int) error { + if err := t.setup(); err != nil { + return err + } + + for i := 1; i < n; i++ { + go t.search() + } + + go t.monitor() + + return nil +} + +func (t *TestCase) monitor() { + print := func() { + fmt.Printf("\033[32m%5d\033[0m; \033[31m%5d\033[0m: %s%s\r", t.runs, t.failures, t.client.String(), " ") + } + + for { + select { + case run := <-t.runCh: + atomic.AddInt64(&t.runs, 1) + if !run.Success { + atomic.AddInt64(&t.failures, 1) + fmt.Println() + } + print() + case <-time.After(5 * time.Second): + // Print stats after some inactivity + print() + break + } + } +} + +func (t *TestCase) setup() error { + var errorlogger *log.Logger + if t.errorlogfile != "" { + f, err := os.OpenFile(t.errorlogfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664) + if err != nil { + return err + } + errorlogger = log.New(f, "", log.Ltime|log.Lmicroseconds|log.Lshortfile) + } + + var infologger *log.Logger + if t.infologfile != "" { + f, err := os.OpenFile(t.infologfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664) + if err != nil { + return err + } + infologger = log.New(f, "", log.LstdFlags) + } + + // Trace request and response details like this + var tracelogger *log.Logger + if t.tracelogfile != "" { + f, err := os.OpenFile(t.tracelogfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664) + if err != nil { + return err + } + tracelogger = log.New(f, "", log.LstdFlags) + } + + client, err := elastic.NewClient( + elastic.SetURL(t.nodes...), + elastic.SetErrorLog(errorlogger), + elastic.SetInfoLog(infologger), + elastic.SetTraceLog(tracelogger), + elastic.SetMaxRetries(t.maxRetries), + elastic.SetSniff(t.sniff), + elastic.SetSnifferInterval(t.snifferInterval), + elastic.SetHealthcheck(t.healthcheck), + elastic.SetHealthcheckInterval(t.healthcheckInterval)) + if err != nil { + // Handle error + return err + } + t.client = client + + // Use the IndexExists service to check if a specified index exists. + exists, err := t.client.IndexExists(t.index).Do() + if err != nil { + return err + } + if exists { + deleteIndex, err := t.client.DeleteIndex(t.index).Do() + if err != nil { + return err + } + if !deleteIndex.Acknowledged { + return errors.New("delete index not acknowledged") + } + } + + // Create a new index. + createIndex, err := t.client.CreateIndex(t.index).Do() + if err != nil { + return err + } + if !createIndex.Acknowledged { + return errors.New("create index not acknowledged") + } + + // Index a tweet (using JSON serialization) + tweet1 := Tweet{User: "olivere", Message: "Take Five", Retweets: 0} + _, err = t.client.Index(). + Index(t.index). + Type("tweet"). + Id("1"). + BodyJson(tweet1). + Do() + if err != nil { + return err + } + + // Index a second tweet (by string) + tweet2 := `{"user" : "olivere", "message" : "It's a Raggy Waltz"}` + _, err = t.client.Index(). + Index(t.index). + Type("tweet"). + Id("2"). + BodyString(tweet2). + Do() + if err != nil { + return err + } + + // Flush to make sure the documents got written. + _, err = t.client.Flush().Index(t.index).Do() + if err != nil { + return err + } + + return nil +} + +func (t *TestCase) search() { + // Loop forever to check for connection issues + for { + // Get tweet with specified ID + get1, err := t.client.Get(). + Index(t.index). + Type("tweet"). + Id("1"). + Do() + if err != nil { + //failf("Get failed: %v", err) + t.runCh <- RunInfo{Success: false} + continue + } + if !get1.Found { + //log.Printf("Document %s not found\n", "1") + //fmt.Printf("Got document %s in version %d from index %s, type %s\n", get1.Id, get1.Version, get1.Index, get1.Type) + t.runCh <- RunInfo{Success: false} + continue + } + + // Search with a term query + searchResult, err := t.client.Search(). + Index(t.index). // search in index t.index + Query(elastic.NewTermQuery("user", "olivere")). // specify the query + Sort("user", true). // sort by "user" field, ascending + From(0).Size(10). // take documents 0-9 + Pretty(true). // pretty print request and response JSON + Do() // execute + if err != nil { + //failf("Search failed: %v\n", err) + t.runCh <- RunInfo{Success: false} + continue + } + + // searchResult is of type SearchResult and returns hits, suggestions, + // and all kinds of other information from Elasticsearch. + //fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis) + + // Number of hits + if searchResult.Hits != nil { + //fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits) + + // Iterate through results + for _, hit := range searchResult.Hits.Hits { + // hit.Index contains the name of the index + + // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}). + var tweet Tweet + err := json.Unmarshal(*hit.Source, &tweet) + if err != nil { + // Deserialization failed + //failf("Deserialize failed: %v\n", err) + t.runCh <- RunInfo{Success: false} + continue + } + + // Work with tweet + //fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } + } else { + // No hits + //fmt.Print("Found no tweets\n") + } + + t.runCh <- RunInfo{Success: true} + + // Sleep some time + time.Sleep(time.Duration(rand.Intn(500)) * time.Millisecond) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/cluster_health.go b/services/templeton/vendor/src/github.com/olivere/elastic/cluster_health.go new file mode 100644 index 000000000..0c51c6041 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/cluster_health.go @@ -0,0 +1,244 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// ClusterHealthService allows to get a very simple status on the health of the cluster. +// +// See http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html +// for details. +type ClusterHealthService struct { + client *Client + pretty bool + indices []string + level string + local *bool + masterTimeout string + timeout string + waitForActiveShards *int + waitForNodes string + waitForRelocatingShards *int + waitForStatus string +} + +// NewClusterHealthService creates a new ClusterHealthService. +func NewClusterHealthService(client *Client) *ClusterHealthService { + return &ClusterHealthService{ + client: client, + indices: make([]string, 0), + } +} + +// Index limits the information returned to specific indices. +func (s *ClusterHealthService) Index(indices ...string) *ClusterHealthService { + s.indices = append(s.indices, indices...) + return s +} + +// Level specifies the level of detail for returned information. +func (s *ClusterHealthService) Level(level string) *ClusterHealthService { + s.level = level + return s +} + +// Local indicates whether to return local information. If it is true, +// we do not retrieve the state from master node (default: false). +func (s *ClusterHealthService) Local(local bool) *ClusterHealthService { + s.local = &local + return s +} + +// MasterTimeout specifies an explicit operation timeout for connection to master node. +func (s *ClusterHealthService) MasterTimeout(masterTimeout string) *ClusterHealthService { + s.masterTimeout = masterTimeout + return s +} + +// Timeout specifies an explicit operation timeout. +func (s *ClusterHealthService) Timeout(timeout string) *ClusterHealthService { + s.timeout = timeout + return s +} + +// WaitForActiveShards can be used to wait until the specified number of shards are active. +func (s *ClusterHealthService) WaitForActiveShards(waitForActiveShards int) *ClusterHealthService { + s.waitForActiveShards = &waitForActiveShards + return s +} + +// WaitForNodes can be used to wait until the specified number of nodes are available. +// Example: "12" to wait for exact values, ">12" and "<12" for ranges. +func (s *ClusterHealthService) WaitForNodes(waitForNodes string) *ClusterHealthService { + s.waitForNodes = waitForNodes + return s +} + +// WaitForRelocatingShards can be used to wait until the specified number of relocating shards is finished. +func (s *ClusterHealthService) WaitForRelocatingShards(waitForRelocatingShards int) *ClusterHealthService { + s.waitForRelocatingShards = &waitForRelocatingShards + return s +} + +// WaitForStatus can be used to wait until the cluster is in a specific state. +// Valid values are: green, yellow, or red. +func (s *ClusterHealthService) WaitForStatus(waitForStatus string) *ClusterHealthService { + s.waitForStatus = waitForStatus + return s +} + +// WaitForGreenStatus will wait for the "green" state. +func (s *ClusterHealthService) WaitForGreenStatus() *ClusterHealthService { + return s.WaitForStatus("green") +} + +// WaitForYellowStatus will wait for the "yellow" state. +func (s *ClusterHealthService) WaitForYellowStatus() *ClusterHealthService { + return s.WaitForStatus("yellow") +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ClusterHealthService) Pretty(pretty bool) *ClusterHealthService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *ClusterHealthService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if len(s.indices) > 0 { + path, err = uritemplates.Expand("/_cluster/health/{index}", map[string]string{ + "index": strings.Join(s.indices, ","), + }) + } else { + path = "/_cluster/health" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.level != "" { + params.Set("level", s.level) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.waitForActiveShards != nil { + params.Set("wait_for_active_shards", fmt.Sprintf("%v", s.waitForActiveShards)) + } + if s.waitForNodes != "" { + params.Set("wait_for_nodes", s.waitForNodes) + } + if s.waitForRelocatingShards != nil { + params.Set("wait_for_relocating_shards", fmt.Sprintf("%v", s.waitForRelocatingShards)) + } + if s.waitForStatus != "" { + params.Set("wait_for_status", s.waitForStatus) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ClusterHealthService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *ClusterHealthService) Do() (*ClusterHealthResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + resp := new(ClusterHealthResponse) + if err := json.Unmarshal(res.Body, resp); err != nil { + return nil, err + } + return resp, nil +} + +// ClusterHealthResponse is the response of ClusterHealthService.Do. +type ClusterHealthResponse struct { + ClusterName string `json:"cluster_name"` + Status string `json:"status"` + TimedOut bool `json:"timed_out"` + NumberOfNodes int `json:"number_of_nodes"` + NumberOfDataNodes int `json:"number_of_data_nodes"` + ActivePrimaryShards int `json:"active_primary_shards"` + ActiveShards int `json:"active_shards"` + RelocatingShards int `json:"relocating_shards"` + InitializingShards int `json:"initializing_shards"` + UnassignedShards int `json:"unassigned_shards"` + DelayedUnassignedShards int `json:"delayed_unassigned_shards"` + NumberOfPendingTasks int `json:"number_of_pending_tasks"` + NumberOfInFlightFetch int `json:"number_of_in_flight_fetch"` + TaskMaxWaitTimeInQueueInMillis int `json:"task_max_waiting_in_queue_millis"` + ActiveShardsPercentAsNumber float64 `json:"active_shards_percent_as_number"` + + // Validation failures -> index name -> array of validation failures + ValidationFailures []map[string][]string `json:"validation_failures"` + + // Index name -> index health + Indices map[string]*ClusterIndexHealth `json:"indices"` +} + +// ClusterIndexHealth will be returned as part of ClusterHealthResponse. +type ClusterIndexHealth struct { + Status string `json:"status"` + NumberOfShards int `json:"number_of_shards"` + NumberOfReplicas int `json:"number_of_replicas"` + ActivePrimaryShards int `json:"active_primary_shards"` + ActiveShards int `json:"active_shards"` + RelocatingShards int `json:"relocating_shards"` + InitializingShards int `json:"initializing_shards"` + UnassignedShards int `json:"unassigned_shards"` + // Validation failures + ValidationFailures []string `json:"validation_failures"` + // Shards by id, e.g. "0" or "1" + Shards map[string]*ClusterShardHealth `json:"shards"` +} + +// ClusterShardHealth will be returned as part of ClusterHealthResponse. +type ClusterShardHealth struct { + Status string `json:"status"` + PrimaryActive bool `json:"primary_active"` + ActiveShards int `json:"active_shards"` + RelocatingShards int `json:"relocating_shards"` + InitializingShards int `json:"initializing_shards"` + UnassignedShards int `json:"unassigned_shards"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/cluster_health_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/cluster_health_test.go new file mode 100644 index 000000000..fcb612f19 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/cluster_health_test.go @@ -0,0 +1,109 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "net/url" + "testing" +) + +func TestClusterHealth(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Get cluster health + res, err := client.ClusterHealth().Index(testIndexName).Level("shards").Pretty(true).Do() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatalf("expected res to be != nil; got: %v", res) + } + if res.Status != "green" && res.Status != "red" && res.Status != "yellow" { + t.Fatalf("expected status \"green\", \"red\", or \"yellow\"; got: %q", res.Status) + } +} + +func TestClusterHealthURLs(t *testing.T) { + tests := []struct { + Service *ClusterHealthService + ExpectedPath string + ExpectedParams url.Values + }{ + { + Service: &ClusterHealthService{ + indices: []string{}, + }, + ExpectedPath: "/_cluster/health", + }, + { + Service: &ClusterHealthService{ + indices: []string{"twitter"}, + }, + ExpectedPath: "/_cluster/health/twitter", + }, + { + Service: &ClusterHealthService{ + indices: []string{"twitter", "gplus"}, + }, + ExpectedPath: "/_cluster/health/twitter%2Cgplus", + }, + { + Service: &ClusterHealthService{ + indices: []string{"twitter"}, + waitForStatus: "yellow", + }, + ExpectedPath: "/_cluster/health/twitter", + ExpectedParams: url.Values{"wait_for_status": []string{"yellow"}}, + }, + } + + for _, test := range tests { + gotPath, gotParams, err := test.Service.buildURL() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if gotPath != test.ExpectedPath { + t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath) + } + if gotParams.Encode() != test.ExpectedParams.Encode() { + t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams) + } + } +} + +func TestClusterHealthWaitForStatus(t *testing.T) { + client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) + + // Cluster health on an index that does not exist should never get to yellow + health, err := client.ClusterHealth().Index("no-such-index").WaitForStatus("yellow").Timeout("1s").Do() + if err == nil { + t.Fatalf("expected timeout error; got: %v", err) + } + if !IsTimeout(err) { + t.Fatalf("expected timeout error; got: %v", err) + } + if health != nil { + t.Fatalf("expected no response; got: %v", health) + } + + // Cluster wide health + health, err = client.ClusterHealth().WaitForGreenStatus().Timeout("10s").Do() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if health.TimedOut != false { + t.Fatalf("expected no timeout; got: %v "+ + "(does your local cluster contain unassigned shards?)", health.TimedOut) + } + if health.Status != "green" { + t.Fatalf("expected health = %q; got: %q", "green", health.Status) + } + + // Cluster wide health via shortcut on client + err = client.WaitForGreenStatus("10s") + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/cluster_state.go b/services/templeton/vendor/src/github.com/olivere/elastic/cluster_state.go new file mode 100644 index 000000000..9c3678c75 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/cluster_state.go @@ -0,0 +1,284 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// ClusterStateService allows to get a comprehensive state information of the whole cluster. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-state.html +// for details. +type ClusterStateService struct { + client *Client + pretty bool + indices []string + metrics []string + allowNoIndices *bool + expandWildcards string + flatSettings *bool + ignoreUnavailable *bool + local *bool + masterTimeout string +} + +// NewClusterStateService creates a new ClusterStateService. +func NewClusterStateService(client *Client) *ClusterStateService { + return &ClusterStateService{ + client: client, + indices: make([]string, 0), + metrics: make([]string, 0), + } +} + +// Index is a list of index names. Use _all or an empty string to +// perform the operation on all indices. +func (s *ClusterStateService) Index(indices ...string) *ClusterStateService { + s.indices = append(s.indices, indices...) + return s +} + +// Metric limits the information returned to the specified metric. +// It can be one of: version, master_node, nodes, routing_table, metadata, +// blocks, or customs. +func (s *ClusterStateService) Metric(metrics ...string) *ClusterStateService { + s.metrics = append(s.metrics, metrics...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *ClusterStateService) AllowNoIndices(allowNoIndices bool) *ClusterStateService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both.. +func (s *ClusterStateService) ExpandWildcards(expandWildcards string) *ClusterStateService { + s.expandWildcards = expandWildcards + return s +} + +// FlatSettings, when set, returns settings in flat format (default: false). +func (s *ClusterStateService) FlatSettings(flatSettings bool) *ClusterStateService { + s.flatSettings = &flatSettings + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *ClusterStateService) IgnoreUnavailable(ignoreUnavailable bool) *ClusterStateService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Local indicates whether to return local information. When set, it does not +// retrieve the state from master node (default: false). +func (s *ClusterStateService) Local(local bool) *ClusterStateService { + s.local = &local + return s +} + +// MasterTimeout specifies timeout for connection to master. +func (s *ClusterStateService) MasterTimeout(masterTimeout string) *ClusterStateService { + s.masterTimeout = masterTimeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ClusterStateService) Pretty(pretty bool) *ClusterStateService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *ClusterStateService) buildURL() (string, url.Values, error) { + // Build URL + metrics := strings.Join(s.metrics, ",") + if metrics == "" { + metrics = "_all" + } + indices := strings.Join(s.indices, ",") + if indices == "" { + indices = "_all" + } + path, err := uritemplates.Expand("/_cluster/state/{metrics}/{indices}", map[string]string{ + "metrics": metrics, + "indices": indices, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ClusterStateService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *ClusterStateService) Do() (*ClusterStateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(ClusterStateResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// ClusterStateResponse is the response of ClusterStateService.Do. +type ClusterStateResponse struct { + ClusterName string `json:"cluster_name"` + Version int64 `json:"version"` + StateUUID string `json:"state_uuid"` + MasterNode string `json:"master_node"` + Blocks map[string]*clusterBlocks `json:"blocks"` + Nodes map[string]*discoveryNode `json:"nodes"` + Metadata *clusterStateMetadata `json:"metadata"` + RoutingTable map[string]*clusterStateRoutingTable `json:"routing_table"` + RoutingNodes *clusterStateRoutingNode `json:"routing_nodes"` + Customs map[string]interface{} `json:"customs"` +} + +type clusterBlocks struct { + Global map[string]*clusterBlock `json:"global"` // id -> cluster block + Indices map[string]*clusterBlock `json:"indices"` // index name -> cluster block +} + +type clusterBlock struct { + Description string `json:"description"` + Retryable bool `json:"retryable"` + DisableStatePersistence bool `json:"disable_state_persistence"` + Levels []string `json:"levels"` +} + +type clusterStateMetadata struct { + ClusterUUID string `json:"cluster_uuid"` + Templates map[string]*indexTemplateMetaData `json:"templates"` // template name -> index template metadata + Indices map[string]*indexMetaData `json:"indices"` // index name _> meta data + RoutingTable struct { + Indices map[string]*indexRoutingTable `json:"indices"` // index name -> routing table + } `json:"routing_table"` + RoutingNodes struct { + Unassigned []*shardRouting `json:"unassigned"` + Nodes []*shardRouting `json:"nodes"` + } `json:"routing_nodes"` + Customs map[string]interface{} `json:"customs"` +} + +type discoveryNode struct { + Name string `json:"name"` // server name, e.g. "es1" + TransportAddress string `json:"transport_address"` // e.g. inet[/1.2.3.4:9300] + Attributes map[string]interface{} `json:"attributes"` // e.g. { "data": true, "master": true } +} + +type clusterStateRoutingTable struct { + Indices map[string]interface{} `json:"indices"` +} + +type clusterStateRoutingNode struct { + Unassigned []*shardRouting `json:"unassigned"` + // Node Id -> shardRouting + Nodes map[string][]*shardRouting `json:"nodes"` +} + +type indexTemplateMetaData struct { + Template string `json:"template"` // e.g. "store-*" + Order int `json:"order"` + Settings map[string]interface{} `json:"settings"` // index settings + Mappings map[string]interface{} `json:"mappings"` // type name -> mapping +} + +type indexMetaData struct { + State string `json:"state"` + Settings map[string]interface{} `json:"settings"` + Mappings map[string]interface{} `json:"mappings"` + Aliases []string `json:"aliases"` // e.g. [ "alias1", "alias2" ] +} + +type indexRoutingTable struct { + Shards map[string]*shardRouting `json:"shards"` +} + +type shardRouting struct { + State string `json:"state"` + Primary bool `json:"primary"` + Node string `json:"node"` + RelocatingNode string `json:"relocating_node"` + Shard int `json:"shard"` + Index string `json:"index"` + Version int64 `json:"state"` + RestoreSource *RestoreSource `json:"restore_source"` + AllocationId *allocationId `json:"allocation_id"` + UnassignedInfo *unassignedInfo `json:"unassigned_info"` +} + +type RestoreSource struct { + Repository string `json:"repository"` + Snapshot string `json:"snapshot"` + Version string `json:"version"` + Index string `json:"index"` +} + +type allocationId struct { + Id string `json:"id"` + RelocationId string `json:"relocation_id"` +} + +type unassignedInfo struct { + Reason string `json:"reason"` + At string `json:"at"` + Details string `json:"details"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/cluster_state_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/cluster_state_test.go new file mode 100644 index 000000000..e73a8eeb7 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/cluster_state_test.go @@ -0,0 +1,92 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "net/url" + "testing" +) + +func TestClusterState(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Get cluster state + res, err := client.ClusterState().Index("_all").Metric("_all").Pretty(true).Do() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatalf("expected res to be != nil; got: %v", res) + } + if res.ClusterName == "" { + t.Fatalf("expected a cluster name; got: %q", res.ClusterName) + } +} + +func TestClusterStateURLs(t *testing.T) { + tests := []struct { + Service *ClusterStateService + ExpectedPath string + ExpectedParams url.Values + }{ + { + Service: &ClusterStateService{ + indices: []string{}, + metrics: []string{}, + }, + ExpectedPath: "/_cluster/state/_all/_all", + }, + { + Service: &ClusterStateService{ + indices: []string{"twitter"}, + metrics: []string{}, + }, + ExpectedPath: "/_cluster/state/_all/twitter", + }, + { + Service: &ClusterStateService{ + indices: []string{"twitter", "gplus"}, + metrics: []string{}, + }, + ExpectedPath: "/_cluster/state/_all/twitter%2Cgplus", + }, + { + Service: &ClusterStateService{ + indices: []string{}, + metrics: []string{"nodes"}, + }, + ExpectedPath: "/_cluster/state/nodes/_all", + }, + { + Service: &ClusterStateService{ + indices: []string{"twitter"}, + metrics: []string{"nodes"}, + }, + ExpectedPath: "/_cluster/state/nodes/twitter", + }, + { + Service: &ClusterStateService{ + indices: []string{"twitter"}, + metrics: []string{"nodes"}, + masterTimeout: "1s", + }, + ExpectedPath: "/_cluster/state/nodes/twitter", + ExpectedParams: url.Values{"master_timeout": []string{"1s"}}, + }, + } + + for _, test := range tests { + gotPath, gotParams, err := test.Service.buildURL() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if gotPath != test.ExpectedPath { + t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath) + } + if gotParams.Encode() != test.ExpectedParams.Encode() { + t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams) + } + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/cluster_stats.go b/services/templeton/vendor/src/github.com/olivere/elastic/cluster_stats.go new file mode 100644 index 000000000..1f0430592 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/cluster_stats.go @@ -0,0 +1,349 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// ClusterStatsService is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/cluster-stats.html. +type ClusterStatsService struct { + client *Client + pretty bool + nodeId []string + flatSettings *bool + human *bool +} + +// NewClusterStatsService creates a new ClusterStatsService. +func NewClusterStatsService(client *Client) *ClusterStatsService { + return &ClusterStatsService{ + client: client, + nodeId: make([]string, 0), + } +} + +// NodeId is documented as: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes. +func (s *ClusterStatsService) NodeId(nodeId []string) *ClusterStatsService { + s.nodeId = nodeId + return s +} + +// FlatSettings is documented as: Return settings in flat format (default: false). +func (s *ClusterStatsService) FlatSettings(flatSettings bool) *ClusterStatsService { + s.flatSettings = &flatSettings + return s +} + +// Human is documented as: Whether to return time and byte values in human-readable format.. +func (s *ClusterStatsService) Human(human bool) *ClusterStatsService { + s.human = &human + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ClusterStatsService) Pretty(pretty bool) *ClusterStatsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *ClusterStatsService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + + if len(s.nodeId) > 0 { + path, err = uritemplates.Expand("/_cluster/stats/nodes/{node_id}", map[string]string{ + "node_id": strings.Join(s.nodeId, ","), + }) + if err != nil { + return "", url.Values{}, err + } + } else { + path, err = uritemplates.Expand("/_cluster/stats", map[string]string{}) + if err != nil { + return "", url.Values{}, err + } + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.human != nil { + params.Set("human", fmt.Sprintf("%v", *s.human)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ClusterStatsService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *ClusterStatsService) Do() (*ClusterStatsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(ClusterStatsResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// ClusterStatsResponse is the response of ClusterStatsService.Do. +type ClusterStatsResponse struct { + Timestamp int64 `json:"timestamp"` + ClusterName string `json:"cluster_name"` + ClusterUUID string `json:"uuid"` + Status string `json:"status"` + Indices *ClusterStatsIndices `json:"indices"` + Nodes *ClusterStatsNodes `json:"nodes"` +} + +type ClusterStatsIndices struct { + Count int `json:"count"` + Shards *ClusterStatsIndicesShards `json:"shards"` + Docs *ClusterStatsIndicesDocs `json:"docs"` + Store *ClusterStatsIndicesStore `json:"store"` + FieldData *ClusterStatsIndicesFieldData `json:"fielddata"` + FilterCache *ClusterStatsIndicesFilterCache `json:"filter_cache"` + IdCache *ClusterStatsIndicesIdCache `json:"id_cache"` + Completion *ClusterStatsIndicesCompletion `json:"completion"` + Segments *ClusterStatsIndicesSegments `json:"segments"` + Percolate *ClusterStatsIndicesPercolate `json:"percolate"` +} + +type ClusterStatsIndicesShards struct { + Total int `json:"total"` + Primaries int `json:"primaries"` + Replication float64 `json:"replication"` + Index *ClusterStatsIndicesShardsIndex `json:"index"` +} + +type ClusterStatsIndicesShardsIndex struct { + Shards *ClusterStatsIndicesShardsIndexIntMinMax `json:"shards"` + Primaries *ClusterStatsIndicesShardsIndexIntMinMax `json:"primaries"` + Replication *ClusterStatsIndicesShardsIndexFloat64MinMax `json:"replication"` +} + +type ClusterStatsIndicesShardsIndexIntMinMax struct { + Min int `json:"min"` + Max int `json:"max"` + Avg float64 `json:"avg"` +} + +type ClusterStatsIndicesShardsIndexFloat64MinMax struct { + Min float64 `json:"min"` + Max float64 `json:"max"` + Avg float64 `json:"avg"` +} + +type ClusterStatsIndicesDocs struct { + Count int `json:"count"` + Deleted int `json:"deleted"` +} + +type ClusterStatsIndicesStore struct { + Size string `json:"size"` // e.g. "5.3gb" + SizeInBytes int64 `json:"size_in_bytes"` + ThrottleTime string `json:"throttle_time"` // e.g. "0s" + ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"` +} + +type ClusterStatsIndicesFieldData struct { + MemorySize string `json:"memory_size"` // e.g. "61.3kb" + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + Evictions int64 `json:"evictions"` + Fields map[string]struct { + MemorySize string `json:"memory_size"` // e.g. "61.3kb" + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + } `json:"fields"` +} + +type ClusterStatsIndicesFilterCache struct { + MemorySize string `json:"memory_size"` // e.g. "61.3kb" + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + Evictions int64 `json:"evictions"` +} + +type ClusterStatsIndicesIdCache struct { + MemorySize string `json:"memory_size"` // e.g. "61.3kb" + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` +} + +type ClusterStatsIndicesCompletion struct { + Size string `json:"size"` // e.g. "61.3kb" + SizeInBytes int64 `json:"size_in_bytes"` + Fields map[string]struct { + Size string `json:"size"` // e.g. "61.3kb" + SizeInBytes int64 `json:"size_in_bytes"` + } `json:"fields"` +} + +type ClusterStatsIndicesSegments struct { + Count int64 `json:"count"` + Memory string `json:"memory"` // e.g. "61.3kb" + MemoryInBytes int64 `json:"memory_in_bytes"` + IndexWriterMemory string `json:"index_writer_memory"` // e.g. "61.3kb" + IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes"` + IndexWriterMaxMemory string `json:"index_writer_max_memory"` // e.g. "61.3kb" + IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes"` + VersionMapMemory string `json:"version_map_memory"` // e.g. "61.3kb" + VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes"` + FixedBitSet string `json:"fixed_bit_set"` // e.g. "61.3kb" + FixedBitSetInBytes int64 `json:"fixed_bit_set_memory_in_bytes"` +} + +type ClusterStatsIndicesPercolate struct { + Total int64 `json:"total"` + // TODO(oe) The JSON tag here is wrong as of ES 1.5.2 it seems + Time string `json:"get_time"` // e.g. "1s" + TimeInBytes int64 `json:"time_in_millis"` + Current int64 `json:"current"` + MemorySize string `json:"memory_size"` // e.g. "61.3kb" + MemorySizeInBytes int64 `json:"memory_sitze_in_bytes"` + Queries int64 `json:"queries"` +} + +// --- + +type ClusterStatsNodes struct { + Count *ClusterStatsNodesCounts `json:"counts"` + Versions []string `json:"versions"` + OS *ClusterStatsNodesOsStats `json:"os"` + Process *ClusterStatsNodesProcessStats `json:"process"` + JVM *ClusterStatsNodesJvmStats `json:"jvm"` + FS *ClusterStatsNodesFsStats `json:"fs"` + Plugins []*ClusterStatsNodesPlugin `json:"plugins"` +} + +type ClusterStatsNodesCounts struct { + Total int `json:"total"` + MasterOnly int `json:"master_only"` + DataOnly int `json:"data_only"` + MasterData int `json:"master_data"` + Client int `json:"client"` +} + +type ClusterStatsNodesOsStats struct { + AvailableProcessors int `json:"available_processors"` + Mem *ClusterStatsNodesOsStatsMem `json:"mem"` + CPU []*ClusterStatsNodesOsStatsCPU `json:"cpu"` +} + +type ClusterStatsNodesOsStatsMem struct { + Total string `json:"total"` // e.g. "16gb" + TotalInBytes int64 `json:"total_in_bytes"` +} + +type ClusterStatsNodesOsStatsCPU struct { + Vendor string `json:"vendor"` + Model string `json:"model"` + MHz int `json:"mhz"` + TotalCores int `json:"total_cores"` + TotalSockets int `json:"total_sockets"` + CoresPerSocket int `json:"cores_per_socket"` + CacheSize string `json:"cache_size"` // e.g. "256b" + CacheSizeInBytes int64 `json:"cache_size_in_bytes"` + Count int `json:"count"` +} + +type ClusterStatsNodesProcessStats struct { + CPU *ClusterStatsNodesProcessStatsCPU `json:"cpu"` + OpenFileDescriptors *ClusterStatsNodesProcessStatsOpenFileDescriptors `json:"open_file_descriptors"` +} + +type ClusterStatsNodesProcessStatsCPU struct { + Percent float64 `json:"percent"` +} + +type ClusterStatsNodesProcessStatsOpenFileDescriptors struct { + Min int64 `json:"min"` + Max int64 `json:"max"` + Avg int64 `json:"avg"` +} + +type ClusterStatsNodesJvmStats struct { + MaxUptime string `json:"max_uptime"` // e.g. "5h" + MaxUptimeInMillis int64 `json:"max_uptime_in_millis"` + Versions []*ClusterStatsNodesJvmStatsVersion `json:"versions"` + Mem *ClusterStatsNodesJvmStatsMem `json:"mem"` + Threads int64 `json:"threads"` +} + +type ClusterStatsNodesJvmStatsVersion struct { + Version string `json:"version"` // e.g. "1.8.0_45" + VMName string `json:"vm_name"` // e.g. "Java HotSpot(TM) 64-Bit Server VM" + VMVersion string `json:"vm_version"` // e.g. "25.45-b02" + VMVendor string `json:"vm_vendor"` // e.g. "Oracle Corporation" + Count int `json:"count"` +} + +type ClusterStatsNodesJvmStatsMem struct { + HeapUsed string `json:"heap_used"` + HeapUsedInBytes int64 `json:"heap_used_in_bytes"` + HeapMax string `json:"heap_max"` + HeapMaxInBytes int64 `json:"heap_max_in_bytes"` +} + +type ClusterStatsNodesFsStats struct { + Path string `json:"path"` + Mount string `json:"mount"` + Dev string `json:"dev"` + Total string `json:"total"` // e.g. "930.7gb"` + TotalInBytes int64 `json:"total_in_bytes"` + Free string `json:"free"` // e.g. "930.7gb"` + FreeInBytes int64 `json:"free_in_bytes"` + Available string `json:"available"` // e.g. "930.7gb"` + AvailableInBytes int64 `json:"available_in_bytes"` + DiskReads int64 `json:"disk_reads"` + DiskWrites int64 `json:"disk_writes"` + DiskIOOp int64 `json:"disk_io_op"` + DiskReadSize string `json:"disk_read_size"` // e.g. "0b"` + DiskReadSizeInBytes int64 `json:"disk_read_size_in_bytes"` + DiskWriteSize string `json:"disk_write_size"` // e.g. "0b"` + DiskWriteSizeInBytes int64 `json:"disk_write_size_in_bytes"` + DiskIOSize string `json:"disk_io_size"` // e.g. "0b"` + DiskIOSizeInBytes int64 `json:"disk_io_size_in_bytes"` + DiskQueue string `json:"disk_queue"` + DiskServiceTime string `json:"disk_service_time"` +} + +type ClusterStatsNodesPlugin struct { + Name string `json:"name"` + Version string `json:"version"` + Description string `json:"description"` + URL string `json:"url"` + JVM bool `json:"jvm"` + Site bool `json:"site"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/cluster_stats_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/cluster_stats_test.go new file mode 100644 index 000000000..74326a6e7 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/cluster_stats_test.go @@ -0,0 +1,85 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "net/url" + "testing" +) + +func TestClusterStats(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Get cluster stats + res, err := client.ClusterStats().Do() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatalf("expected res to be != nil; got: %v", res) + } + if res.ClusterName == "" { + t.Fatalf("expected a cluster name; got: %q", res.ClusterName) + } +} + +func TestClusterStatsURLs(t *testing.T) { + fFlag := false + tFlag := true + + tests := []struct { + Service *ClusterStatsService + ExpectedPath string + ExpectedParams url.Values + }{ + { + Service: &ClusterStatsService{ + nodeId: []string{}, + }, + ExpectedPath: "/_cluster/stats", + }, + { + Service: &ClusterStatsService{ + nodeId: []string{"node1"}, + }, + ExpectedPath: "/_cluster/stats/nodes/node1", + }, + { + Service: &ClusterStatsService{ + nodeId: []string{"node1", "node2"}, + }, + ExpectedPath: "/_cluster/stats/nodes/node1%2Cnode2", + }, + { + Service: &ClusterStatsService{ + nodeId: []string{}, + flatSettings: &tFlag, + }, + ExpectedPath: "/_cluster/stats", + ExpectedParams: url.Values{"flat_settings": []string{"true"}}, + }, + { + Service: &ClusterStatsService{ + nodeId: []string{"node1"}, + flatSettings: &fFlag, + }, + ExpectedPath: "/_cluster/stats/nodes/node1", + ExpectedParams: url.Values{"flat_settings": []string{"false"}}, + }, + } + + for _, test := range tests { + gotPath, gotParams, err := test.Service.buildURL() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if gotPath != test.ExpectedPath { + t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath) + } + if gotParams.Encode() != test.ExpectedParams.Encode() { + t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams) + } + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/config/elasticsearch.yml b/services/templeton/vendor/src/github.com/olivere/elastic/config/elasticsearch.yml new file mode 100644 index 000000000..b571a064c --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/config/elasticsearch.yml @@ -0,0 +1,103 @@ +# ======================== Elasticsearch Configuration ========================= +# +# NOTE: Elasticsearch comes with reasonable defaults for most settings. +# Before you set out to tweak and tune the configuration, make sure you +# understand what are you trying to accomplish and the consequences. +# +# The primary way of configuring a node is via this file. This template lists +# the most important settings you may want to configure for a production cluster. +# +# Please see the documentation for further information on configuration options: +# +# +# ---------------------------------- Cluster ----------------------------------- +# +# Use a descriptive name for your cluster: +# +# cluster.name: my-application +# +# ------------------------------------ Node ------------------------------------ +# +# Use a descriptive name for the node: +# +# node.name: node-1 +# +# Add custom attributes to the node: +# +# node.rack: r1 +# +# ----------------------------------- Paths ------------------------------------ +# +# Path to directory where to store the data (separate multiple locations by comma): +# +# path.data: /path/to/data +# +# Path to log files: +# +# path.logs: /path/to/logs +# +# ----------------------------------- Memory ----------------------------------- +# +# Lock the memory on startup: +# +# bootstrap.mlockall: true +# +# Make sure that the `ES_HEAP_SIZE` environment variable is set to about half the memory +# available on the system and that the owner of the process is allowed to use this limit. +# +# Elasticsearch performs poorly when the system is swapping the memory. +# +# ---------------------------------- Network ----------------------------------- +# +# Set the bind adress to a specific IP (IPv4 or IPv6): +# +# network.host: 192.168.0.1 +# +# Set a custom port for HTTP: +# +# http.port: 9200 +# +# For more information, see the documentation at: +# +# +# ---------------------------------- Gateway ----------------------------------- +# +# Block initial recovery after a full cluster restart until N nodes are started: +# +# gateway.recover_after_nodes: 3 +# +# For more information, see the documentation at: +# +# +# --------------------------------- Discovery ---------------------------------- +# +# Elasticsearch nodes will find each other via multicast, by default. +# +# To use the unicast discovery, disable the multicast discovery: +# +# discovery.zen.ping.multicast.enabled: false +# +# Pass an initial list of hosts to perform discovery when new node is started: +# +# discovery.zen.ping.unicast.hosts: ["host1", "host2"] +# +# Prevent the "split brain" by configuring the majority of nodes (total number of nodes / 2 + 1): +# +# discovery.zen.minimum_master_nodes: 3 +# +# For more information, see the documentation at: +# +# +# ---------------------------------- Various ----------------------------------- +# +# Disable starting multiple nodes on a single system: +# +# node.max_local_storage_nodes: 1 +# +# Require explicit names when deleting indices: +# +# action.destructive_requires_name: true + +# Enable scripting as described here: https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html +script.inline: on +script.indexed: on diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/connection.go b/services/templeton/vendor/src/github.com/olivere/elastic/connection.go new file mode 100644 index 000000000..b8b5bf8aa --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/connection.go @@ -0,0 +1,90 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "sync" + "time" +) + +// conn represents a single connection to a node in a cluster. +type conn struct { + sync.RWMutex + nodeID string // node ID + url string + failures int + dead bool + deadSince *time.Time +} + +// newConn creates a new connection to the given URL. +func newConn(nodeID, url string) *conn { + c := &conn{ + nodeID: nodeID, + url: url, + } + return c +} + +// String returns a representation of the connection status. +func (c *conn) String() string { + c.RLock() + defer c.RUnlock() + return fmt.Sprintf("%s [dead=%v,failures=%d,deadSince=%v]", c.url, c.dead, c.failures, c.deadSince) +} + +// NodeID returns the ID of the node of this connection. +func (c *conn) NodeID() string { + c.RLock() + defer c.RUnlock() + return c.nodeID +} + +// URL returns the URL of this connection. +func (c *conn) URL() string { + c.RLock() + defer c.RUnlock() + return c.url +} + +// IsDead returns true if this connection is marked as dead, i.e. a previous +// request to the URL has been unsuccessful. +func (c *conn) IsDead() bool { + c.RLock() + defer c.RUnlock() + return c.dead +} + +// MarkAsDead marks this connection as dead, increments the failures +// counter and stores the current time in dead since. +func (c *conn) MarkAsDead() { + c.Lock() + c.dead = true + if c.deadSince == nil { + utcNow := time.Now().UTC() + c.deadSince = &utcNow + } + c.failures += 1 + c.Unlock() +} + +// MarkAsAlive marks this connection as eligible to be returned from the +// pool of connections by the selector. +func (c *conn) MarkAsAlive() { + c.Lock() + c.dead = false + c.Unlock() +} + +// MarkAsHealthy marks this connection as healthy, i.e. a request has been +// successfully performed with it. +func (c *conn) MarkAsHealthy() { + c.Lock() + c.dead = false + c.deadSince = nil + c.failures = 0 + c.Unlock() +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/count.go b/services/templeton/vendor/src/github.com/olivere/elastic/count.go new file mode 100644 index 000000000..ebc878b2d --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/count.go @@ -0,0 +1,310 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// CountService is a convenient service for determining the +// number of documents in an index. Use SearchService with +// a SearchType of count for counting with queries etc. +type CountService struct { + client *Client + pretty bool + index []string + typ []string + allowNoIndices *bool + analyzeWildcard *bool + analyzer string + defaultOperator string + df string + expandWildcards string + ignoreUnavailable *bool + lenient *bool + lowercaseExpandedTerms *bool + minScore interface{} + preference string + q string + query Query + routing string + bodyJson interface{} + bodyString string +} + +// NewCountService creates a new CountService. +func NewCountService(client *Client) *CountService { + return &CountService{ + client: client, + } +} + +// Index sets the names of the indices to restrict the results. +func (s *CountService) Index(index ...string) *CountService { + if s.index == nil { + s.index = make([]string, 0) + } + s.index = append(s.index, index...) + return s +} + +// Type sets the types to use to restrict the results. +func (s *CountService) Type(typ ...string) *CountService { + if s.typ == nil { + s.typ = make([]string, 0) + } + s.typ = append(s.typ, typ...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. (This includes "_all" string +// or when no indices have been specified). +func (s *CountService) AllowNoIndices(allowNoIndices bool) *CountService { + s.allowNoIndices = &allowNoIndices + return s +} + +// AnalyzeWildcard specifies whether wildcard and prefix queries should be +// analyzed (default: false). +func (s *CountService) AnalyzeWildcard(analyzeWildcard bool) *CountService { + s.analyzeWildcard = &analyzeWildcard + return s +} + +// Analyzer specifies the analyzer to use for the query string. +func (s *CountService) Analyzer(analyzer string) *CountService { + s.analyzer = analyzer + return s +} + +// DefaultOperator specifies the default operator for query string query (AND or OR). +func (s *CountService) DefaultOperator(defaultOperator string) *CountService { + s.defaultOperator = defaultOperator + return s +} + +// Df specifies the field to use as default where no field prefix is given +// in the query string. +func (s *CountService) Df(df string) *CountService { + s.df = df + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *CountService) ExpandWildcards(expandWildcards string) *CountService { + s.expandWildcards = expandWildcards + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *CountService) IgnoreUnavailable(ignoreUnavailable bool) *CountService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Lenient specifies whether format-based query failures (such as +// providing text to a numeric field) should be ignored. +func (s *CountService) Lenient(lenient bool) *CountService { + s.lenient = &lenient + return s +} + +// LowercaseExpandedTerms specifies whether query terms should be lowercased. +func (s *CountService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *CountService { + s.lowercaseExpandedTerms = &lowercaseExpandedTerms + return s +} + +// MinScore indicates to include only documents with a specific `_score` +// value in the result. +func (s *CountService) MinScore(minScore interface{}) *CountService { + s.minScore = minScore + return s +} + +// Preference specifies the node or shard the operation should be +// performed on (default: random). +func (s *CountService) Preference(preference string) *CountService { + s.preference = preference + return s +} + +// Q in the Lucene query string syntax. You can also use Query to pass +// a Query struct. +func (s *CountService) Q(q string) *CountService { + s.q = q + return s +} + +// Query specifies the query to pass. You can also pass a query string with Q. +func (s *CountService) Query(query Query) *CountService { + s.query = query + return s +} + +// Routing specifies the routing value. +func (s *CountService) Routing(routing string) *CountService { + s.routing = routing + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *CountService) Pretty(pretty bool) *CountService { + s.pretty = pretty + return s +} + +// BodyJson specifies the query to restrict the results specified with the +// Query DSL (optional). The interface{} will be serialized to a JSON document, +// so use a map[string]interface{}. +func (s *CountService) BodyJson(body interface{}) *CountService { + s.bodyJson = body + return s +} + +// Body specifies a query to restrict the results specified with +// the Query DSL (optional). +func (s *CountService) BodyString(body string) *CountService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *CountService) buildURL() (string, url.Values, error) { + var err error + var path string + + if len(s.index) > 0 && len(s.typ) > 0 { + path, err = uritemplates.Expand("/{index}/{type}/_count", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + }) + } else if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_count", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else if len(s.typ) > 0 { + path, err = uritemplates.Expand("/_all/{type}/_count", map[string]string{ + "type": strings.Join(s.typ, ","), + }) + } else { + path = "/_all/_count" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.analyzeWildcard != nil { + params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard)) + } + if s.analyzer != "" { + params.Set("analyzer", s.analyzer) + } + if s.defaultOperator != "" { + params.Set("default_operator", s.defaultOperator) + } + if s.df != "" { + params.Set("df", s.df) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.lenient != nil { + params.Set("lenient", fmt.Sprintf("%v", *s.lenient)) + } + if s.lowercaseExpandedTerms != nil { + params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms)) + } + if s.minScore != nil { + params.Set("min_score", fmt.Sprintf("%v", s.minScore)) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.q != "" { + params.Set("q", s.q) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *CountService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *CountService) Do() (int64, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return 0, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return 0, err + } + + // Setup HTTP request body + var body interface{} + if s.query != nil { + src, err := s.query.Source() + if err != nil { + return 0, err + } + query := make(map[string]interface{}) + query["query"] = src + body = query + } else if s.bodyJson != nil { + body = s.bodyJson + } else if s.bodyString != "" { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest("POST", path, params, body) + if err != nil { + return 0, err + } + + // Return result + ret := new(CountResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return 0, err + } + if ret != nil { + return ret.Count, nil + } + + return int64(0), nil +} + +// CountResponse is the response of using the Count API. +type CountResponse struct { + Count int64 `json:"count"` + Shards shardsInfo `json:"_shards,omitempty"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/count_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/count_test.go new file mode 100644 index 000000000..bfc2a2955 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/count_test.go @@ -0,0 +1,124 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestCountURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Types []string + Expected string + }{ + { + []string{}, + []string{}, + "/_all/_count", + }, + { + []string{}, + []string{"tweet"}, + "/_all/tweet/_count", + }, + { + []string{"twitter-*"}, + []string{"tweet", "follower"}, + "/twitter-%2A/tweet%2Cfollower/_count", + }, + { + []string{"twitter-2014", "twitter-2015"}, + []string{"tweet", "follower"}, + "/twitter-2014%2Ctwitter-2015/tweet%2Cfollower/_count", + }, + } + + for _, test := range tests { + path, _, err := client.Count().Index(test.Indices...).Type(test.Types...).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} + +func TestCount(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Count documents + count, err := client.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if count != 3 { + t.Errorf("expected Count = %d; got %d", 3, count) + } + + // Count documents + count, err = client.Count(testIndexName).Type("tweet").Do() + if err != nil { + t.Fatal(err) + } + if count != 3 { + t.Errorf("expected Count = %d; got %d", 3, count) + } + + // Count documents + count, err = client.Count(testIndexName).Type("gezwitscher").Do() + if err != nil { + t.Fatal(err) + } + if count != 0 { + t.Errorf("expected Count = %d; got %d", 0, count) + } + + // Count with query + query := NewTermQuery("user", "olivere") + count, err = client.Count(testIndexName).Query(query).Do() + if err != nil { + t.Fatal(err) + } + if count != 2 { + t.Errorf("expected Count = %d; got %d", 2, count) + } + + // Count with query and type + query = NewTermQuery("user", "olivere") + count, err = client.Count(testIndexName).Type("tweet").Query(query).Do() + if err != nil { + t.Fatal(err) + } + if count != 2 { + t.Errorf("expected Count = %d; got %d", 2, count) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/decoder.go b/services/templeton/vendor/src/github.com/olivere/elastic/decoder.go new file mode 100644 index 000000000..765a5be30 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/decoder.go @@ -0,0 +1,26 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" +) + +// Decoder is used to decode responses from Elasticsearch. +// Users of elastic can implement their own marshaler for advanced purposes +// and set them per Client (see SetDecoder). If none is specified, +// DefaultDecoder is used. +type Decoder interface { + Decode(data []byte, v interface{}) error +} + +// DefaultDecoder uses json.Unmarshal from the Go standard library +// to decode JSON data. +type DefaultDecoder struct{} + +// Decode decodes with json.Unmarshal from the Go standard library. +func (u *DefaultDecoder) Decode(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/decoder_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/decoder_test.go new file mode 100644 index 000000000..5cfce9f5d --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/decoder_test.go @@ -0,0 +1,49 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "bytes" + "encoding/json" + "sync/atomic" + "testing" +) + +type decoder struct { + dec json.Decoder + + N int64 +} + +func (d *decoder) Decode(data []byte, v interface{}) error { + atomic.AddInt64(&d.N, 1) + dec := json.NewDecoder(bytes.NewReader(data)) + dec.UseNumber() + return dec.Decode(v) +} + +func TestDecoder(t *testing.T) { + dec := &decoder{} + client := setupTestClientAndCreateIndex(t, SetDecoder(dec), SetMaxRetries(0)) + + tweet := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + + // Add a document + indexResult, err := client.Index(). + Index(testIndexName). + Type("tweet"). + Id("1"). + BodyJson(&tweet). + Do() + if err != nil { + t.Fatal(err) + } + if indexResult == nil { + t.Errorf("expected result to be != nil; got: %v", indexResult) + } + if dec.N <= 0 { + t.Errorf("expected at least 1 call of decoder; got: %d", dec.N) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/delete.go b/services/templeton/vendor/src/github.com/olivere/elastic/delete.go new file mode 100644 index 000000000..dca135ee1 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/delete.go @@ -0,0 +1,214 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// DeleteService allows to delete a typed JSON document from a specified +// index based on its id. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html +// for details. +type DeleteService struct { + client *Client + pretty bool + id string + index string + typ string + routing string + timeout string + version interface{} + versionType string + consistency string + parent string + refresh *bool + replication string +} + +// NewDeleteService creates a new DeleteService. +func NewDeleteService(client *Client) *DeleteService { + return &DeleteService{ + client: client, + } +} + +// Type is the type of the document. +func (s *DeleteService) Type(typ string) *DeleteService { + s.typ = typ + return s +} + +// Id is the document ID. +func (s *DeleteService) Id(id string) *DeleteService { + s.id = id + return s +} + +// Index is the name of the index. +func (s *DeleteService) Index(index string) *DeleteService { + s.index = index + return s +} + +// Replication specifies a replication type. +func (s *DeleteService) Replication(replication string) *DeleteService { + s.replication = replication + return s +} + +// Routing is a specific routing value. +func (s *DeleteService) Routing(routing string) *DeleteService { + s.routing = routing + return s +} + +// Timeout is an explicit operation timeout. +func (s *DeleteService) Timeout(timeout string) *DeleteService { + s.timeout = timeout + return s +} + +// Version is an explicit version number for concurrency control. +func (s *DeleteService) Version(version interface{}) *DeleteService { + s.version = version + return s +} + +// VersionType is a specific version type. +func (s *DeleteService) VersionType(versionType string) *DeleteService { + s.versionType = versionType + return s +} + +// Consistency defines a specific write consistency setting for the operation. +func (s *DeleteService) Consistency(consistency string) *DeleteService { + s.consistency = consistency + return s +} + +// Parent is the ID of parent document. +func (s *DeleteService) Parent(parent string) *DeleteService { + s.parent = parent + return s +} + +// Refresh the index after performing the operation. +func (s *DeleteService) Refresh(refresh bool) *DeleteService { + s.refresh = &refresh + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *DeleteService) Pretty(pretty bool) *DeleteService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *DeleteService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ + "index": s.index, + "type": s.typ, + "id": s.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.refresh != nil { + params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) + } + if s.replication != "" { + params.Set("replication", s.replication) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + if s.consistency != "" { + params.Set("consistency", s.consistency) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *DeleteService) Validate() error { + var invalid []string + if s.typ == "" { + invalid = append(invalid, "Type") + } + if s.id == "" { + invalid = append(invalid, "Id") + } + if s.index == "" { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *DeleteService) Do() (*DeleteResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("DELETE", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(DeleteResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a delete request. + +// DeleteResponse is the outcome of running DeleteService.Do. +type DeleteResponse struct { + // TODO _shards { total, failed, successful } + Found bool `json:"found"` + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + Version int64 `json:"_version"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/delete_by_query.go b/services/templeton/vendor/src/github.com/olivere/elastic/delete_by_query.go new file mode 100644 index 000000000..3db9c0ce8 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/delete_by_query.go @@ -0,0 +1,302 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// DeleteByQueryService deletes documents that match a query. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/docs-delete-by-query.html. +type DeleteByQueryService struct { + client *Client + indices []string + types []string + analyzer string + consistency string + defaultOper string + df string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string + replication string + routing string + timeout string + pretty bool + q string + query Query +} + +// NewDeleteByQueryService creates a new DeleteByQueryService. +// You typically use the client's DeleteByQuery to get a reference to +// the service. +func NewDeleteByQueryService(client *Client) *DeleteByQueryService { + builder := &DeleteByQueryService{ + client: client, + } + return builder +} + +// Index sets the indices on which to perform the delete operation. +func (s *DeleteByQueryService) Index(indices ...string) *DeleteByQueryService { + if s.indices == nil { + s.indices = make([]string, 0) + } + s.indices = append(s.indices, indices...) + return s +} + +// Type limits the delete operation to the given types. +func (s *DeleteByQueryService) Type(types ...string) *DeleteByQueryService { + if s.types == nil { + s.types = make([]string, 0) + } + s.types = append(s.types, types...) + return s +} + +// Analyzer to use for the query string. +func (s *DeleteByQueryService) Analyzer(analyzer string) *DeleteByQueryService { + s.analyzer = analyzer + return s +} + +// Consistency represents the specific write consistency setting for the operation. +// It can be one, quorum, or all. +func (s *DeleteByQueryService) Consistency(consistency string) *DeleteByQueryService { + s.consistency = consistency + return s +} + +// DefaultOperator for query string query (AND or OR). +func (s *DeleteByQueryService) DefaultOperator(defaultOperator string) *DeleteByQueryService { + s.defaultOper = defaultOperator + return s +} + +// DF is the field to use as default where no field prefix is given in the query string. +func (s *DeleteByQueryService) DF(defaultField string) *DeleteByQueryService { + s.df = defaultField + return s +} + +// DefaultField is the field to use as default where no field prefix is given in the query string. +// It is an alias to the DF func. +func (s *DeleteByQueryService) DefaultField(defaultField string) *DeleteByQueryService { + s.df = defaultField + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *DeleteByQueryService) IgnoreUnavailable(ignore bool) *DeleteByQueryService { + s.ignoreUnavailable = &ignore + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices (including the _all string +// or when no indices have been specified). +func (s *DeleteByQueryService) AllowNoIndices(allow bool) *DeleteByQueryService { + s.allowNoIndices = &allow + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. It can be "open" or "closed". +func (s *DeleteByQueryService) ExpandWildcards(expand string) *DeleteByQueryService { + s.expandWildcards = expand + return s +} + +// Replication sets a specific replication type (sync or async). +func (s *DeleteByQueryService) Replication(replication string) *DeleteByQueryService { + s.replication = replication + return s +} + +// Q specifies the query in Lucene query string syntax. You can also use +// Query to programmatically specify the query. +func (s *DeleteByQueryService) Q(query string) *DeleteByQueryService { + s.q = query + return s +} + +// QueryString is an alias to Q. Notice that you can also use Query to +// programmatically set the query. +func (s *DeleteByQueryService) QueryString(query string) *DeleteByQueryService { + s.q = query + return s +} + +// Routing sets a specific routing value. +func (s *DeleteByQueryService) Routing(routing string) *DeleteByQueryService { + s.routing = routing + return s +} + +// Timeout sets an explicit operation timeout, e.g. "1s" or "10000ms". +func (s *DeleteByQueryService) Timeout(timeout string) *DeleteByQueryService { + s.timeout = timeout + return s +} + +// Pretty indents the JSON output from Elasticsearch. +func (s *DeleteByQueryService) Pretty(pretty bool) *DeleteByQueryService { + s.pretty = pretty + return s +} + +// Query sets the query programmatically. +func (s *DeleteByQueryService) Query(query Query) *DeleteByQueryService { + s.query = query + return s +} + +// Do executes the delete-by-query operation. +func (s *DeleteByQueryService) Do() (*DeleteByQueryResult, error) { + var err error + + // Build url + path := "/" + + // Indices part + indexPart := make([]string, 0) + for _, index := range s.indices { + index, err = uritemplates.Expand("{index}", map[string]string{ + "index": index, + }) + if err != nil { + return nil, err + } + indexPart = append(indexPart, index) + } + if len(indexPart) > 0 { + path += strings.Join(indexPart, ",") + } + + // Types part + typesPart := make([]string, 0) + for _, typ := range s.types { + typ, err = uritemplates.Expand("{type}", map[string]string{ + "type": typ, + }) + if err != nil { + return nil, err + } + typesPart = append(typesPart, typ) + } + if len(typesPart) > 0 { + path += "/" + strings.Join(typesPart, ",") + } + + // Search + path += "/_query" + + // Parameters + params := make(url.Values) + if s.analyzer != "" { + params.Set("analyzer", s.analyzer) + } + if s.consistency != "" { + params.Set("consistency", s.consistency) + } + if s.defaultOper != "" { + params.Set("default_operator", s.defaultOper) + } + if s.df != "" { + params.Set("df", s.df) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.replication != "" { + params.Set("replication", s.replication) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + if s.q != "" { + params.Set("q", s.q) + } + + // Set body if there is a query set + var body interface{} + if s.query != nil { + src, err := s.query.Source() + if err != nil { + return nil, err + } + query := make(map[string]interface{}) + query["query"] = src + body = query + } + + // Get response + res, err := s.client.PerformRequest("DELETE", path, params, body) + if err != nil { + return nil, err + } + + // Return result + ret := new(DeleteByQueryResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// DeleteByQueryResult is the outcome of executing Do with DeleteByQueryService. +type DeleteByQueryResult struct { + Took int64 `json:"took"` + TimedOut bool `json:"timed_out"` + Indices map[string]IndexDeleteByQueryResult `json:"_indices"` + Failures []shardOperationFailure `json:"failures"` +} + +// IndexNames returns the names of the indices the DeleteByQuery touched. +func (res DeleteByQueryResult) IndexNames() []string { + var indices []string + for index, _ := range res.Indices { + indices = append(indices, index) + } + return indices +} + +// All returns the index delete-by-query result of all indices. +func (res DeleteByQueryResult) All() IndexDeleteByQueryResult { + all, _ := res.Indices["_all"] + return all +} + +// IndexDeleteByQueryResult is the result of a delete-by-query for a specific +// index. +type IndexDeleteByQueryResult struct { + // Found documents, matching the query. + Found int `json:"found"` + // Deleted documents, successfully, from the given index. + Deleted int `json:"deleted"` + // Missing documents when trying to delete them. + Missing int `json:"missing"` + // Failed documents to be deleted for the given index. + Failed int `json:"failed"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/delete_by_query_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/delete_by_query_test.go new file mode 100644 index 000000000..71b786f6e --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/delete_by_query_test.go @@ -0,0 +1,114 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestDeleteByQuery(t *testing.T) { + client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) + + found, err := client.HasPlugin("delete-by-query") + if err != nil { + t.Fatal(err) + } + if !found { + t.Skip("DeleteByQuery in 2.0 is now a plugin (delete-by-query) and must be " + + "loaded in the configuration") + } + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Count documents + count, err := client.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if count != 3 { + t.Fatalf("expected count = %d; got: %d", 3, count) + } + + // Delete all documents by sandrae + q := NewTermQuery("user", "sandrae") + res, err := client.DeleteByQuery().Index(testIndexName).Type("tweet").Query(q).Do() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatalf("expected response != nil; got: %v", res) + } + + // Check response + if got, want := len(res.IndexNames()), 2; got != want { + t.Fatalf("expected %d indices; got: %d", want, got) + } + idx, found := res.Indices["_all"] + if !found { + t.Fatalf("expected to find index %q", "_all") + } + if got, want := idx.Found, 1; got != want { + t.Fatalf("expected Found = %v; got: %v", want, got) + } + if got, want := idx.Deleted, 1; got != want { + t.Fatalf("expected Deleted = %v; got: %v", want, got) + } + if got, want := idx.Missing, 0; got != want { + t.Fatalf("expected Missing = %v; got: %v", want, got) + } + if got, want := idx.Failed, 0; got != want { + t.Fatalf("expected Failed = %v; got: %v", want, got) + } + idx, found = res.Indices[testIndexName] + if !found { + t.Errorf("expected Found = true; got: %v", found) + } + if got, want := idx.Found, 1; got != want { + t.Fatalf("expected Found = %v; got: %v", want, got) + } + if got, want := idx.Deleted, 1; got != want { + t.Fatalf("expected Deleted = %v; got: %v", want, got) + } + if got, want := idx.Missing, 0; got != want { + t.Fatalf("expected Missing = %v; got: %v", want, got) + } + if got, want := idx.Failed, 0; got != want { + t.Fatalf("expected Failed = %v; got: %v", want, got) + } + + // Flush and check count + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + count, err = client.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if count != 2 { + t.Fatalf("expected Count = %d; got: %d", 2, count) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/delete_template.go b/services/templeton/vendor/src/github.com/olivere/elastic/delete_template.go new file mode 100644 index 000000000..b8d0223f6 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/delete_template.go @@ -0,0 +1,118 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// DeleteTemplateService deletes a search template. More information can +// be found at http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html. +type DeleteTemplateService struct { + client *Client + pretty bool + id string + version *int + versionType string +} + +// NewDeleteTemplateService creates a new DeleteTemplateService. +func NewDeleteTemplateService(client *Client) *DeleteTemplateService { + return &DeleteTemplateService{ + client: client, + } +} + +// Id is the template ID. +func (s *DeleteTemplateService) Id(id string) *DeleteTemplateService { + s.id = id + return s +} + +// Version an explicit version number for concurrency control. +func (s *DeleteTemplateService) Version(version int) *DeleteTemplateService { + s.version = &version + return s +} + +// VersionType specifies a version type. +func (s *DeleteTemplateService) VersionType(versionType string) *DeleteTemplateService { + s.versionType = versionType + return s +} + +// buildURL builds the URL for the operation. +func (s *DeleteTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{ + "id": s.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.version != nil { + params.Set("version", fmt.Sprintf("%d", *s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *DeleteTemplateService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *DeleteTemplateService) Do() (*DeleteTemplateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("DELETE", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(DeleteTemplateResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// DeleteTemplateResponse is the response of DeleteTemplateService.Do. +type DeleteTemplateResponse struct { + Found bool `json:"found"` + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + Version int `json:"_version"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/delete_template_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/delete_template_test.go new file mode 100644 index 000000000..85bb7ad55 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/delete_template_test.go @@ -0,0 +1,22 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestDeleteTemplateValidate(t *testing.T) { + client := setupTestClient(t) + + // No template id -> fail with error + res, err := NewDeleteTemplateService(client).Do() + if err == nil { + t.Fatalf("expected Delete to fail without index name") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/delete_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/delete_test.go new file mode 100644 index 000000000..418fdec7d --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/delete_test.go @@ -0,0 +1,118 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestDelete(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Count documents + count, err := client.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if count != 3 { + t.Errorf("expected Count = %d; got %d", 3, count) + } + + // Delete document 1 + res, err := client.Delete().Index(testIndexName).Type("tweet").Id("1").Do() + if err != nil { + t.Fatal(err) + } + if res.Found != true { + t.Errorf("expected Found = true; got %v", res.Found) + } + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + count, err = client.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if count != 2 { + t.Errorf("expected Count = %d; got %d", 2, count) + } + + // Delete non existent document 99 + res, err = client.Delete().Index(testIndexName).Type("tweet").Id("99").Refresh(true).Do() + if err == nil { + t.Fatalf("expected error; got: %v", err) + } + if !IsNotFound(err) { + t.Errorf("expected NotFound error; got %v", err) + } + if res != nil { + t.Fatalf("expected no response; got: %v", res) + } + + count, err = client.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if count != 2 { + t.Errorf("expected Count = %d; got %d", 2, count) + } +} + +func TestDeleteValidate(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) + + // No index name -> fail with error + res, err := NewDeleteService(client).Type("tweet").Id("1").Do() + if err == nil { + t.Fatalf("expected Delete to fail without index name") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } + + // No type -> fail with error + res, err = NewDeleteService(client).Index(testIndexName).Id("1").Do() + if err == nil { + t.Fatalf("expected Delete to fail without type") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } + + // No id -> fail with error + res, err = NewDeleteService(client).Index(testIndexName).Type("tweet").Do() + if err == nil { + t.Fatalf("expected Delete to fail without id") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/doc.go b/services/templeton/vendor/src/github.com/olivere/elastic/doc.go new file mode 100644 index 000000000..336a734de --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/doc.go @@ -0,0 +1,51 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +/* +Package elastic provides an interface to the Elasticsearch server +(http://www.elasticsearch.org/). + +The first thing you do is to create a Client. If you have Elasticsearch +installed and running with its default settings +(i.e. available at http://127.0.0.1:9200), all you need to do is: + + client, err := elastic.NewClient() + if err != nil { + // Handle error + } + +If your Elasticsearch server is running on a different IP and/or port, +just provide a URL to NewClient: + + // Create a client and connect to http://192.168.2.10:9201 + client, err := elastic.NewClient(elastic.SetURL("http://192.168.2.10:9201")) + if err != nil { + // Handle error + } + +You can pass many more configuration parameters to NewClient. Review the +documentation of NewClient for more information. + +If no Elasticsearch server is available, services will fail when creating +a new request and will return ErrNoClient. + +A Client provides services. The services usually come with a variety of +methods to prepare the query and a Do function to execute it against the +Elasticsearch REST interface and return a response. Here is an example +of the IndexExists service that checks if a given index already exists. + + exists, err := client.IndexExists("twitter").Do() + if err != nil { + // Handle error + } + if !exists { + // Index does not exist yet. + } + +Look up the documentation for Client to get an idea of the services provided +and what kinds of responses you get when executing the Do function of a service. +Also see the wiki on Github for more details. + +*/ +package elastic diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/errors.go b/services/templeton/vendor/src/github.com/olivere/elastic/errors.go new file mode 100644 index 000000000..93c2c6de5 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/errors.go @@ -0,0 +1,141 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" +) + +// checkResponse will return an error if the request/response indicates +// an error returned from Elasticsearch. +// +// HTTP status codes between in the range [200..299] are considered successful. +// All other errors are considered errors except they are specified in +// ignoreErrors. This is necessary because for some services, HTTP status 404 +// is a valid response from Elasticsearch (e.g. the Exists service). +// +// The func tries to parse error details as returned from Elasticsearch +// and encapsulates them in type elastic.Error. +func checkResponse(req *http.Request, res *http.Response, ignoreErrors ...int) error { + // 200-299 are valid status codes + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + // Ignore certain errors? + for _, code := range ignoreErrors { + if code == res.StatusCode { + return nil + } + } + return createResponseError(res) +} + +// createResponseError creates an Error structure from the HTTP response, +// its status code and the error information sent by Elasticsearch. +func createResponseError(res *http.Response) error { + if res.Body == nil { + return &Error{Status: res.StatusCode} + } + data, err := ioutil.ReadAll(res.Body) + if err != nil { + return &Error{Status: res.StatusCode} + } + errReply := new(Error) + err = json.Unmarshal(data, errReply) + if err != nil { + return &Error{Status: res.StatusCode} + } + if errReply != nil { + if errReply.Status == 0 { + errReply.Status = res.StatusCode + } + return errReply + } + return &Error{Status: res.StatusCode} +} + +// Error encapsulates error details as returned from Elasticsearch. +type Error struct { + Status int `json:"status"` + Details *ErrorDetails `json:"error,omitempty"` +} + +// ErrorDetails encapsulate error details from Elasticsearch. +// It is used in e.g. elastic.Error and elastic.BulkResponseItem. +type ErrorDetails struct { + Type string `json:"type"` + Reason string `json:"reason"` + ResourceType string `json:"resource.type,omitempty"` + ResourceId string `json:"resource.id,omitempty"` + Index string `json:"index,omitempty"` + Phase string `json:"phase,omitempty"` + Grouped bool `json:"grouped,omitempty"` + CausedBy map[string]interface{} `json:"caused_by,omitempty"` + RootCause []*ErrorDetails `json:"root_cause,omitempty"` + FailedShards []map[string]interface{} `json:"failed_shards,omitempty"` +} + +// Error returns a string representation of the error. +func (e *Error) Error() string { + if e.Details != nil && e.Details.Reason != "" { + return fmt.Sprintf("elastic: Error %d (%s): %s [type=%s]", e.Status, http.StatusText(e.Status), e.Details.Reason, e.Details.Type) + } else { + return fmt.Sprintf("elastic: Error %d (%s)", e.Status, http.StatusText(e.Status)) + } +} + +// IsNotFound returns true if the given error indicates that Elasticsearch +// returned HTTP status 404. The err parameter can be of type *elastic.Error, +// elastic.Error, *http.Response or int (indicating the HTTP status code). +func IsNotFound(err interface{}) bool { + switch e := err.(type) { + case *http.Response: + return e.StatusCode == http.StatusNotFound + case *Error: + return e.Status == http.StatusNotFound + case Error: + return e.Status == http.StatusNotFound + case int: + return e == http.StatusNotFound + } + return false +} + +// IsTimeout returns true if the given error indicates that Elasticsearch +// returned HTTP status 408. The err parameter can be of type *elastic.Error, +// elastic.Error, *http.Response or int (indicating the HTTP status code). +func IsTimeout(err interface{}) bool { + switch e := err.(type) { + case *http.Response: + return e.StatusCode == http.StatusRequestTimeout + case *Error: + return e.Status == http.StatusRequestTimeout + case Error: + return e.Status == http.StatusRequestTimeout + case int: + return e == http.StatusRequestTimeout + } + return false +} + +// -- General errors -- + +// shardsInfo represents information from a shard. +type shardsInfo struct { + Total int `json:"total"` + Successful int `json:"successful"` + Failed int `json:"failed"` +} + +// shardOperationFailure represents a shard failure. +type shardOperationFailure struct { + Shard int `json:"shard"` + Index string `json:"index"` + Status string `json:"status"` + // "reason" +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/errors_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/errors_test.go new file mode 100644 index 000000000..c33dc2d6d --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/errors_test.go @@ -0,0 +1,202 @@ +package elastic + +import ( + "bufio" + "fmt" + "net/http" + "strings" + "testing" +) + +func TestResponseError(t *testing.T) { + raw := "HTTP/1.1 404 Not Found\r\n" + + "\r\n" + + `{"error":{"root_cause":[{"type":"index_missing_exception","reason":"no such index","index":"elastic-test"}],"type":"index_missing_exception","reason":"no such index","index":"elastic-test"},"status":404}` + "\r\n" + r := bufio.NewReader(strings.NewReader(raw)) + + req, err := http.NewRequest("GET", "/", nil) + if err != nil { + t.Fatal(err) + } + + resp, err := http.ReadResponse(r, nil) + if err != nil { + t.Fatal(err) + } + err = checkResponse(req, resp) + if err == nil { + t.Fatalf("expected error; got: %v", err) + } + + // Check for correct error message + expected := fmt.Sprintf("elastic: Error %d (%s): no such index [type=index_missing_exception]", resp.StatusCode, http.StatusText(resp.StatusCode)) + got := err.Error() + if got != expected { + t.Fatalf("expected %q; got: %q", expected, got) + } + + // Check that error is of type *elastic.Error, which contains additional information + e, ok := err.(*Error) + if !ok { + t.Fatal("expected error to be of type *elastic.Error") + } + if e.Status != resp.StatusCode { + t.Fatalf("expected status code %d; got: %d", resp.StatusCode, e.Status) + } + if e.Details == nil { + t.Fatalf("expected error details; got: %v", e.Details) + } + if got, want := e.Details.Index, "elastic-test"; got != want { + t.Fatalf("expected error details index %q; got: %q", want, got) + } + if got, want := e.Details.Type, "index_missing_exception"; got != want { + t.Fatalf("expected error details type %q; got: %q", want, got) + } + if got, want := e.Details.Reason, "no such index"; got != want { + t.Fatalf("expected error details reason %q; got: %q", want, got) + } + if got, want := len(e.Details.RootCause), 1; got != want { + t.Fatalf("expected %d error details root causes; got: %d", want, got) + } + + if got, want := e.Details.RootCause[0].Index, "elastic-test"; got != want { + t.Fatalf("expected root cause index %q; got: %q", want, got) + } + if got, want := e.Details.RootCause[0].Type, "index_missing_exception"; got != want { + t.Fatalf("expected root cause type %q; got: %q", want, got) + } + if got, want := e.Details.RootCause[0].Reason, "no such index"; got != want { + t.Fatalf("expected root cause reason %q; got: %q", want, got) + } +} + +func TestResponseErrorHTML(t *testing.T) { + raw := "HTTP/1.1 413 Request Entity Too Large\r\n" + + "\r\n" + + ` +413 Request Entity Too Large + +

413 Request Entity Too Large

+
nginx/1.6.2
+ +` + "\r\n" + r := bufio.NewReader(strings.NewReader(raw)) + + req, err := http.NewRequest("GET", "/", nil) + if err != nil { + t.Fatal(err) + } + + resp, err := http.ReadResponse(r, nil) + if err != nil { + t.Fatal(err) + } + err = checkResponse(req, resp) + if err == nil { + t.Fatalf("expected error; got: %v", err) + } + + // Check for correct error message + expected := fmt.Sprintf("elastic: Error %d (%s)", http.StatusRequestEntityTooLarge, http.StatusText(http.StatusRequestEntityTooLarge)) + got := err.Error() + if got != expected { + t.Fatalf("expected %q; got: %q", expected, got) + } +} + +func TestResponseErrorWithIgnore(t *testing.T) { + raw := "HTTP/1.1 404 Not Found\r\n" + + "\r\n" + + `{"some":"response"}` + "\r\n" + r := bufio.NewReader(strings.NewReader(raw)) + + req, err := http.NewRequest("HEAD", "/", nil) + if err != nil { + t.Fatal(err) + } + + resp, err := http.ReadResponse(r, nil) + if err != nil { + t.Fatal(err) + } + err = checkResponse(req, resp) + if err == nil { + t.Fatalf("expected error; got: %v", err) + } + err = checkResponse(req, resp, 404) // ignore 404 errors + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } +} + +func TestIsNotFound(t *testing.T) { + if got, want := IsNotFound(nil), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsNotFound(""), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsNotFound(200), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsNotFound(404), true; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + + if got, want := IsNotFound(&Error{Status: 404}), true; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsNotFound(&Error{Status: 200}), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + + if got, want := IsNotFound(Error{Status: 404}), true; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsNotFound(Error{Status: 200}), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + + if got, want := IsNotFound(&http.Response{StatusCode: 404}), true; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsNotFound(&http.Response{StatusCode: 200}), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } +} + +func TestIsTimeout(t *testing.T) { + if got, want := IsTimeout(nil), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsTimeout(""), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsTimeout(200), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsTimeout(408), true; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + + if got, want := IsTimeout(&Error{Status: 408}), true; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsTimeout(&Error{Status: 200}), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + + if got, want := IsTimeout(Error{Status: 408}), true; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsTimeout(Error{Status: 200}), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + + if got, want := IsTimeout(&http.Response{StatusCode: 408}), true; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsTimeout(&http.Response{StatusCode: 200}), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/example_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/example_test.go new file mode 100644 index 000000000..8fc03ec1a --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/example_test.go @@ -0,0 +1,547 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic_test + +import ( + "encoding/json" + "fmt" + "log" + "os" + "reflect" + "time" + + "gopkg.in/olivere/elastic.v3" +) + +type Tweet struct { + User string `json:"user"` + Message string `json:"message"` + Retweets int `json:"retweets"` + Image string `json:"image,omitempty"` + Created time.Time `json:"created,omitempty"` + Tags []string `json:"tags,omitempty"` + Location string `json:"location,omitempty"` + Suggest *elastic.SuggestField `json:"suggest_field,omitempty"` +} + +func Example() { + errorlog := log.New(os.Stdout, "APP ", log.LstdFlags) + + // Obtain a client. You can provide your own HTTP client here. + client, err := elastic.NewClient(elastic.SetErrorLog(errorlog)) + if err != nil { + // Handle error + panic(err) + } + + // Trace request and response details like this + //client.SetTracer(log.New(os.Stdout, "", 0)) + + // Ping the Elasticsearch server to get e.g. the version number + info, code, err := client.Ping("http://127.0.0.1:9200").Do() + if err != nil { + // Handle error + panic(err) + } + fmt.Printf("Elasticsearch returned with code %d and version %s", code, info.Version.Number) + + // Getting the ES version number is quite common, so there's a shortcut + esversion, err := client.ElasticsearchVersion("http://127.0.0.1:9200") + if err != nil { + // Handle error + panic(err) + } + fmt.Printf("Elasticsearch version %s", esversion) + + // Use the IndexExists service to check if a specified index exists. + exists, err := client.IndexExists("twitter").Do() + if err != nil { + // Handle error + panic(err) + } + if !exists { + // Create a new index. + createIndex, err := client.CreateIndex("twitter").Do() + if err != nil { + // Handle error + panic(err) + } + if !createIndex.Acknowledged { + // Not acknowledged + } + } + + // Index a tweet (using JSON serialization) + tweet1 := Tweet{User: "olivere", Message: "Take Five", Retweets: 0} + put1, err := client.Index(). + Index("twitter"). + Type("tweet"). + Id("1"). + BodyJson(tweet1). + Do() + if err != nil { + // Handle error + panic(err) + } + fmt.Printf("Indexed tweet %s to index %s, type %s\n", put1.Id, put1.Index, put1.Type) + + // Index a second tweet (by string) + tweet2 := `{"user" : "olivere", "message" : "It's a Raggy Waltz"}` + put2, err := client.Index(). + Index("twitter"). + Type("tweet"). + Id("2"). + BodyString(tweet2). + Do() + if err != nil { + // Handle error + panic(err) + } + fmt.Printf("Indexed tweet %s to index %s, type %s\n", put2.Id, put2.Index, put2.Type) + + // Get tweet with specified ID + get1, err := client.Get(). + Index("twitter"). + Type("tweet"). + Id("1"). + Do() + if err != nil { + // Handle error + panic(err) + } + if get1.Found { + fmt.Printf("Got document %s in version %d from index %s, type %s\n", get1.Id, get1.Version, get1.Index, get1.Type) + } + + // Flush to make sure the documents got written. + _, err = client.Flush().Index("twitter").Do() + if err != nil { + panic(err) + } + + // Search with a term query + termQuery := elastic.NewTermQuery("user", "olivere") + searchResult, err := client.Search(). + Index("twitter"). // search in index "twitter" + Query(termQuery). // specify the query + Sort("user", true). // sort by "user" field, ascending + From(0).Size(10). // take documents 0-9 + Pretty(true). // pretty print request and response JSON + Do() // execute + if err != nil { + // Handle error + panic(err) + } + + // searchResult is of type SearchResult and returns hits, suggestions, + // and all kinds of other information from Elasticsearch. + fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis) + + // Each is a convenience function that iterates over hits in a search result. + // It makes sure you don't need to check for nil values in the response. + // However, it ignores errors in serialization. If you want full control + // over iterating the hits, see below. + var ttyp Tweet + for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) { + t := item.(Tweet) + fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } + // TotalHits is another convenience function that works even when something goes wrong. + fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits()) + + // Here's how you iterate through results with full control over each step. + if searchResult.Hits != nil { + fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits) + + // Iterate through results + for _, hit := range searchResult.Hits.Hits { + // hit.Index contains the name of the index + + // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}). + var t Tweet + err := json.Unmarshal(*hit.Source, &t) + if err != nil { + // Deserialization failed + } + + // Work with tweet + fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } + } else { + // No hits + fmt.Print("Found no tweets\n") + } + + // Update a tweet by the update API of Elasticsearch. + // We just increment the number of retweets. + script := elastic.NewScript("ctx._source.retweets += num").Param("num", 1) + update, err := client.Update().Index("twitter").Type("tweet").Id("1"). + Script(script). + Upsert(map[string]interface{}{"retweets": 0}). + Do() + if err != nil { + // Handle error + panic(err) + } + fmt.Printf("New version of tweet %q is now %d", update.Id, update.Version) + + // ... + + // Delete an index. + deleteIndex, err := client.DeleteIndex("twitter").Do() + if err != nil { + // Handle error + panic(err) + } + if !deleteIndex.Acknowledged { + // Not acknowledged + } +} + +func ExampleClient_NewClient_default() { + // Obtain a client to the Elasticsearch instance on http://127.0.0.1:9200. + client, err := elastic.NewClient() + if err != nil { + // Handle error + fmt.Printf("connection failed: %v\n", err) + } else { + fmt.Println("connected") + } + _ = client + // Output: + // connected +} + +func ExampleClient_NewClient_cluster() { + // Obtain a client for an Elasticsearch cluster of two nodes, + // running on 10.0.1.1 and 10.0.1.2. + client, err := elastic.NewClient(elastic.SetURL("http://10.0.1.1:9200", "http://10.0.1.2:9200")) + if err != nil { + // Handle error + panic(err) + } + _ = client +} + +func ExampleClient_NewClient_manyOptions() { + // Obtain a client for an Elasticsearch cluster of two nodes, + // running on 10.0.1.1 and 10.0.1.2. Do not run the sniffer. + // Set the healthcheck interval to 10s. When requests fail, + // retry 5 times. Print error messages to os.Stderr and informational + // messages to os.Stdout. + client, err := elastic.NewClient( + elastic.SetURL("http://10.0.1.1:9200", "http://10.0.1.2:9200"), + elastic.SetSniff(false), + elastic.SetHealthcheckInterval(10*time.Second), + elastic.SetMaxRetries(5), + elastic.SetErrorLog(log.New(os.Stderr, "ELASTIC ", log.LstdFlags)), + elastic.SetInfoLog(log.New(os.Stdout, "", log.LstdFlags))) + if err != nil { + // Handle error + panic(err) + } + _ = client +} + +func ExampleIndexExistsService() { + // Get a client to the local Elasticsearch instance. + client, err := elastic.NewClient() + if err != nil { + // Handle error + panic(err) + } + // Use the IndexExists service to check if the index "twitter" exists. + exists, err := client.IndexExists("twitter").Do() + if err != nil { + // Handle error + panic(err) + } + if exists { + // ... + } +} + +func ExampleCreateIndexService() { + // Get a client to the local Elasticsearch instance. + client, err := elastic.NewClient() + if err != nil { + // Handle error + panic(err) + } + // Create a new index. + createIndex, err := client.CreateIndex("twitter").Do() + if err != nil { + // Handle error + panic(err) + } + if !createIndex.Acknowledged { + // Not acknowledged + } +} + +func ExampleDeleteIndexService() { + // Get a client to the local Elasticsearch instance. + client, err := elastic.NewClient() + if err != nil { + // Handle error + panic(err) + } + // Delete an index. + deleteIndex, err := client.DeleteIndex("twitter").Do() + if err != nil { + // Handle error + panic(err) + } + if !deleteIndex.Acknowledged { + // Not acknowledged + } +} + +func ExampleSearchService() { + // Get a client to the local Elasticsearch instance. + client, err := elastic.NewClient() + if err != nil { + // Handle error + panic(err) + } + + // Search with a term query + termQuery := elastic.NewTermQuery("user", "olivere") + searchResult, err := client.Search(). + Index("twitter"). // search in index "twitter" + Query(termQuery). // specify the query + Sort("user", true). // sort by "user" field, ascending + From(0).Size(10). // take documents 0-9 + Pretty(true). // pretty print request and response JSON + Do() // execute + if err != nil { + // Handle error + panic(err) + } + + // searchResult is of type SearchResult and returns hits, suggestions, + // and all kinds of other information from Elasticsearch. + fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis) + + // Number of hits + if searchResult.Hits != nil { + fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits) + + // Iterate through results + for _, hit := range searchResult.Hits.Hits { + // hit.Index contains the name of the index + + // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}). + var t Tweet + err := json.Unmarshal(*hit.Source, &t) + if err != nil { + // Deserialization failed + } + + // Work with tweet + fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } + } else { + // No hits + fmt.Print("Found no tweets\n") + } +} + +func ExampleAggregations() { + // Get a client to the local Elasticsearch instance. + client, err := elastic.NewClient() + if err != nil { + // Handle error + panic(err) + } + + // Create an aggregation for users and a sub-aggregation for a date histogram of tweets (per year). + timeline := elastic.NewTermsAggregation().Field("user").Size(10).OrderByCountDesc() + histogram := elastic.NewDateHistogramAggregation().Field("created").Interval("year") + timeline = timeline.SubAggregation("history", histogram) + + // Search with a term query + searchResult, err := client.Search(). + Index("twitter"). // search in index "twitter" + Query(elastic.NewMatchAllQuery()). // return all results, but ... + SearchType("count"). // ... do not return hits, just the count + Aggregation("timeline", timeline). // add our aggregation to the query + Pretty(true). // pretty print request and response JSON + Do() // execute + if err != nil { + // Handle error + panic(err) + } + + // Access "timeline" aggregate in search result. + agg, found := searchResult.Aggregations.Terms("timeline") + if !found { + log.Fatalf("we sould have a terms aggregation called %q", "timeline") + } + for _, userBucket := range agg.Buckets { + // Every bucket should have the user field as key. + user := userBucket.Key + + // The sub-aggregation history should have the number of tweets per year. + histogram, found := userBucket.DateHistogram("history") + if found { + for _, year := range histogram.Buckets { + fmt.Printf("user %q has %d tweets in %q\n", user, year.DocCount, year.KeyAsString) + } + } + } +} + +func ExampleSearchResult() { + client, err := elastic.NewClient() + if err != nil { + panic(err) + } + + // Do a search + searchResult, err := client.Search().Index("twitter").Query(elastic.NewMatchAllQuery()).Do() + if err != nil { + panic(err) + } + + // searchResult is of type SearchResult and returns hits, suggestions, + // and all kinds of other information from Elasticsearch. + fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis) + + // Each is a utility function that iterates over hits in a search result. + // It makes sure you don't need to check for nil values in the response. + // However, it ignores errors in serialization. If you want full control + // over iterating the hits, see below. + var ttyp Tweet + for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) { + t := item.(Tweet) + fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } + fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits()) + + // Here's how you iterate hits with full control. + if searchResult.Hits != nil { + fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits) + + // Iterate through results + for _, hit := range searchResult.Hits.Hits { + // hit.Index contains the name of the index + + // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}). + var t Tweet + err := json.Unmarshal(*hit.Source, &t) + if err != nil { + // Deserialization failed + } + + // Work with tweet + fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } + } else { + // No hits + fmt.Print("Found no tweets\n") + } +} + +func ExamplePutTemplateService() { + client, err := elastic.NewClient() + if err != nil { + panic(err) + } + + // Create search template + tmpl := `{"template":{"query":{"match":{"title":"{{query_string}}"}}}}` + + // Create template + resp, err := client.PutTemplate(). + Id("my-search-template"). // Name of the template + BodyString(tmpl). // Search template itself + Do() // Execute + if err != nil { + panic(err) + } + if resp.Created { + fmt.Println("search template created") + } +} + +func ExampleGetTemplateService() { + client, err := elastic.NewClient() + if err != nil { + panic(err) + } + + // Get template stored under "my-search-template" + resp, err := client.GetTemplate().Id("my-search-template").Do() + if err != nil { + panic(err) + } + fmt.Printf("search template is: %q\n", resp.Template) +} + +func ExampleDeleteTemplateService() { + client, err := elastic.NewClient() + if err != nil { + panic(err) + } + + // Delete template + resp, err := client.DeleteTemplate().Id("my-search-template").Do() + if err != nil { + panic(err) + } + if resp != nil && resp.Found { + fmt.Println("template deleted") + } +} + +func ExampleClusterHealthService() { + client, err := elastic.NewClient() + if err != nil { + panic(err) + } + + // Get cluster health + res, err := client.ClusterHealth().Index("twitter").Do() + if err != nil { + panic(err) + } + if res == nil { + panic(err) + } + fmt.Printf("Cluster status is %q\n", res.Status) +} + +func ExampleClusterHealthService_WaitForGreen() { + client, err := elastic.NewClient() + if err != nil { + panic(err) + } + + // Wait for status green + res, err := client.ClusterHealth().WaitForStatus("green").Timeout("15s").Do() + if err != nil { + panic(err) + } + if res.TimedOut { + fmt.Printf("time out waiting for cluster status %q\n", "green") + } else { + fmt.Printf("cluster status is %q\n", res.Status) + } +} + +func ExampleClusterStateService() { + client, err := elastic.NewClient() + if err != nil { + panic(err) + } + + // Get cluster state + res, err := client.ClusterState().Metric("version").Do() + if err != nil { + panic(err) + } + fmt.Printf("Cluster %q has version %d", res.ClusterName, res.Version) +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/exists.go b/services/templeton/vendor/src/github.com/olivere/elastic/exists.go new file mode 100644 index 000000000..7a42d53c9 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/exists.go @@ -0,0 +1,175 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/http" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// ExistsService checks for the existence of a document using HEAD. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html +// for details. +type ExistsService struct { + client *Client + pretty bool + id string + index string + typ string + preference string + realtime *bool + refresh *bool + routing string + parent string +} + +// NewExistsService creates a new ExistsService. +func NewExistsService(client *Client) *ExistsService { + return &ExistsService{ + client: client, + } +} + +// Id is the document ID. +func (s *ExistsService) Id(id string) *ExistsService { + s.id = id + return s +} + +// Index is the name of the index. +func (s *ExistsService) Index(index string) *ExistsService { + s.index = index + return s +} + +// Type is the type of the document (use `_all` to fetch the first document +// matching the ID across all types). +func (s *ExistsService) Type(typ string) *ExistsService { + s.typ = typ + return s +} + +// Preference specifies the node or shard the operation should be performed on (default: random). +func (s *ExistsService) Preference(preference string) *ExistsService { + s.preference = preference + return s +} + +// Realtime specifies whether to perform the operation in realtime or search mode. +func (s *ExistsService) Realtime(realtime bool) *ExistsService { + s.realtime = &realtime + return s +} + +// Refresh the shard containing the document before performing the operation. +func (s *ExistsService) Refresh(refresh bool) *ExistsService { + s.refresh = &refresh + return s +} + +// Routing is a specific routing value. +func (s *ExistsService) Routing(routing string) *ExistsService { + s.routing = routing + return s +} + +// Parent is the ID of the parent document. +func (s *ExistsService) Parent(parent string) *ExistsService { + s.parent = parent + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ExistsService) Pretty(pretty bool) *ExistsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *ExistsService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ + "id": s.id, + "index": s.index, + "type": s.typ, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.realtime != nil { + params.Set("realtime", fmt.Sprintf("%v", *s.realtime)) + } + if s.refresh != nil { + params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ExistsService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *ExistsService) Do() (bool, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return false, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return false, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("HEAD", path, params, nil, 404) + if err != nil { + return false, err + } + + // Return operation response + switch res.StatusCode { + case http.StatusOK: + return true, nil + case http.StatusNotFound: + return false, nil + default: + return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/exists_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/exists_test.go new file mode 100644 index 000000000..58a4fe707 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/exists_test.go @@ -0,0 +1,52 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestExists(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) + + exists, err := client.Exists().Index(testIndexName).Type("comment").Id("1").Parent("tweet").Do() + if err != nil { + t.Fatal(err) + } + if !exists { + t.Fatal("expected document to exist") + } +} + +func TestExistsValidate(t *testing.T) { + client := setupTestClient(t) + + // No index -> fail with error + res, err := NewExistsService(client).Type("tweet").Id("1").Do() + if err == nil { + t.Fatalf("expected Delete to fail without index name") + } + if res != false { + t.Fatalf("expected result to be false; got: %v", res) + } + + // No type -> fail with error + res, err = NewExistsService(client).Index(testIndexName).Id("1").Do() + if err == nil { + t.Fatalf("expected Delete to fail without index name") + } + if res != false { + t.Fatalf("expected result to be false; got: %v", res) + } + + // No id -> fail with error + res, err = NewExistsService(client).Index(testIndexName).Type("tweet").Do() + if err == nil { + t.Fatalf("expected Delete to fail without index name") + } + if res != false { + t.Fatalf("expected result to be false; got: %v", res) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/explain.go b/services/templeton/vendor/src/github.com/olivere/elastic/explain.go new file mode 100644 index 000000000..e922bc9b5 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/explain.go @@ -0,0 +1,330 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "log" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +var ( + _ = fmt.Print + _ = log.Print + _ = strings.Index + _ = uritemplates.Expand + _ = url.Parse +) + +// ExplainService computes a score explanation for a query and +// a specific document. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-explain.html. +type ExplainService struct { + client *Client + pretty bool + id string + index string + typ string + q string + routing string + lenient *bool + analyzer string + df string + fields []string + lowercaseExpandedTerms *bool + xSourceInclude []string + analyzeWildcard *bool + parent string + preference string + xSource []string + defaultOperator string + xSourceExclude []string + source string + bodyJson interface{} + bodyString string +} + +// NewExplainService creates a new ExplainService. +func NewExplainService(client *Client) *ExplainService { + return &ExplainService{ + client: client, + xSource: make([]string, 0), + xSourceExclude: make([]string, 0), + fields: make([]string, 0), + xSourceInclude: make([]string, 0), + } +} + +// Id is the document ID. +func (s *ExplainService) Id(id string) *ExplainService { + s.id = id + return s +} + +// Index is the name of the index. +func (s *ExplainService) Index(index string) *ExplainService { + s.index = index + return s +} + +// Type is the type of the document. +func (s *ExplainService) Type(typ string) *ExplainService { + s.typ = typ + return s +} + +// Source is the URL-encoded query definition (instead of using the request body). +func (s *ExplainService) Source(source string) *ExplainService { + s.source = source + return s +} + +// XSourceExclude is a list of fields to exclude from the returned _source field. +func (s *ExplainService) XSourceExclude(xSourceExclude ...string) *ExplainService { + s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...) + return s +} + +// Lenient specifies whether format-based query failures +// (such as providing text to a numeric field) should be ignored. +func (s *ExplainService) Lenient(lenient bool) *ExplainService { + s.lenient = &lenient + return s +} + +// Query in the Lucene query string syntax. +func (s *ExplainService) Q(q string) *ExplainService { + s.q = q + return s +} + +// Routing sets a specific routing value. +func (s *ExplainService) Routing(routing string) *ExplainService { + s.routing = routing + return s +} + +// AnalyzeWildcard specifies whether wildcards and prefix queries +// in the query string query should be analyzed (default: false). +func (s *ExplainService) AnalyzeWildcard(analyzeWildcard bool) *ExplainService { + s.analyzeWildcard = &analyzeWildcard + return s +} + +// Analyzer is the analyzer for the query string query. +func (s *ExplainService) Analyzer(analyzer string) *ExplainService { + s.analyzer = analyzer + return s +} + +// Df is the default field for query string query (default: _all). +func (s *ExplainService) Df(df string) *ExplainService { + s.df = df + return s +} + +// Fields is a list of fields to return in the response. +func (s *ExplainService) Fields(fields ...string) *ExplainService { + s.fields = append(s.fields, fields...) + return s +} + +// LowercaseExpandedTerms specifies whether query terms should be lowercased. +func (s *ExplainService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *ExplainService { + s.lowercaseExpandedTerms = &lowercaseExpandedTerms + return s +} + +// XSourceInclude is a list of fields to extract and return from the _source field. +func (s *ExplainService) XSourceInclude(xSourceInclude ...string) *ExplainService { + s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...) + return s +} + +// DefaultOperator is the default operator for query string query (AND or OR). +func (s *ExplainService) DefaultOperator(defaultOperator string) *ExplainService { + s.defaultOperator = defaultOperator + return s +} + +// Parent is the ID of the parent document. +func (s *ExplainService) Parent(parent string) *ExplainService { + s.parent = parent + return s +} + +// Preference specifies the node or shard the operation should be performed on (default: random). +func (s *ExplainService) Preference(preference string) *ExplainService { + s.preference = preference + return s +} + +// XSource is true or false to return the _source field or not, or a list of fields to return. +func (s *ExplainService) XSource(xSource ...string) *ExplainService { + s.xSource = append(s.xSource, xSource...) + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ExplainService) Pretty(pretty bool) *ExplainService { + s.pretty = pretty + return s +} + +// Query sets a query definition using the Query DSL. +func (s *ExplainService) Query(query Query) *ExplainService { + src, err := query.Source() + if err != nil { + // Do nothing in case of an error + return s + } + body := make(map[string]interface{}) + body["query"] = src + s.bodyJson = body + return s +} + +// BodyJson sets the query definition using the Query DSL. +func (s *ExplainService) BodyJson(body interface{}) *ExplainService { + s.bodyJson = body + return s +} + +// BodyString sets the query definition using the Query DSL as a string. +func (s *ExplainService) BodyString(body string) *ExplainService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *ExplainService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/{type}/{id}/_explain", map[string]string{ + "id": s.id, + "index": s.index, + "type": s.typ, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if len(s.xSource) > 0 { + params.Set("_source", strings.Join(s.xSource, ",")) + } + if s.defaultOperator != "" { + params.Set("default_operator", s.defaultOperator) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.source != "" { + params.Set("source", s.source) + } + if len(s.xSourceExclude) > 0 { + params.Set("_source_exclude", strings.Join(s.xSourceExclude, ",")) + } + if s.lenient != nil { + params.Set("lenient", fmt.Sprintf("%v", *s.lenient)) + } + if s.q != "" { + params.Set("q", s.q) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if len(s.fields) > 0 { + params.Set("fields", strings.Join(s.fields, ",")) + } + if s.lowercaseExpandedTerms != nil { + params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms)) + } + if len(s.xSourceInclude) > 0 { + params.Set("_source_include", strings.Join(s.xSourceInclude, ",")) + } + if s.analyzeWildcard != nil { + params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard)) + } + if s.analyzer != "" { + params.Set("analyzer", s.analyzer) + } + if s.df != "" { + params.Set("df", s.df) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ExplainService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if s.id == "" { + invalid = append(invalid, "Id") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *ExplainService) Do() (*ExplainResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(ExplainResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// ExplainResponse is the response of ExplainService.Do. +type ExplainResponse struct { + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + Matched bool `json:"matched"` + Explanation map[string]interface{} `json:"explanation"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/explain_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/explain_test.go new file mode 100644 index 000000000..e799d6c52 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/explain_test.go @@ -0,0 +1,41 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestExplain(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + + // Add a document + indexResult, err := client.Index(). + Index(testIndexName). + Type("tweet"). + Id("1"). + BodyJson(&tweet1). + Refresh(true). + Do() + if err != nil { + t.Fatal(err) + } + if indexResult == nil { + t.Errorf("expected result to be != nil; got: %v", indexResult) + } + + // Explain + query := NewTermQuery("user", "olivere") + expl, err := client.Explain(testIndexName, "tweet", "1").Query(query).Do() + if err != nil { + t.Fatal(err) + } + if expl == nil { + t.Fatal("expected to return an explanation") + } + if !expl.Matched { + t.Errorf("expected matched to be %v; got: %v", true, expl.Matched) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/fetch_source_context.go b/services/templeton/vendor/src/github.com/olivere/elastic/fetch_source_context.go new file mode 100644 index 000000000..e13c9eb47 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/fetch_source_context.go @@ -0,0 +1,74 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "net/url" + "strings" +) + +type FetchSourceContext struct { + fetchSource bool + transformSource bool + includes []string + excludes []string +} + +func NewFetchSourceContext(fetchSource bool) *FetchSourceContext { + return &FetchSourceContext{ + fetchSource: fetchSource, + includes: make([]string, 0), + excludes: make([]string, 0), + } +} + +func (fsc *FetchSourceContext) FetchSource() bool { + return fsc.fetchSource +} + +func (fsc *FetchSourceContext) SetFetchSource(fetchSource bool) { + fsc.fetchSource = fetchSource +} + +func (fsc *FetchSourceContext) Include(includes ...string) *FetchSourceContext { + fsc.includes = append(fsc.includes, includes...) + return fsc +} + +func (fsc *FetchSourceContext) Exclude(excludes ...string) *FetchSourceContext { + fsc.excludes = append(fsc.excludes, excludes...) + return fsc +} + +func (fsc *FetchSourceContext) TransformSource(transformSource bool) *FetchSourceContext { + fsc.transformSource = transformSource + return fsc +} + +func (fsc *FetchSourceContext) Source() (interface{}, error) { + if !fsc.fetchSource { + return false, nil + } + return map[string]interface{}{ + "includes": fsc.includes, + "excludes": fsc.excludes, + }, nil +} + +// Query returns the parameters in a form suitable for a URL query string. +func (fsc *FetchSourceContext) Query() url.Values { + params := url.Values{} + if !fsc.fetchSource { + params.Add("_source", "false") + return params + } + if len(fsc.includes) > 0 { + params.Add("_source_include", strings.Join(fsc.includes, ",")) + } + if len(fsc.excludes) > 0 { + params.Add("_source_exclude", strings.Join(fsc.excludes, ",")) + } + return params +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/fetch_source_context_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/fetch_source_context_test.go new file mode 100644 index 000000000..2bb683d69 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/fetch_source_context_test.go @@ -0,0 +1,125 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestFetchSourceContextNoFetchSource(t *testing.T) { + builder := NewFetchSourceContext(false) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `false` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFetchSourceContextNoFetchSourceIgnoreIncludesAndExcludes(t *testing.T) { + builder := NewFetchSourceContext(false).Include("a", "b").Exclude("c") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `false` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFetchSourceContextFetchSource(t *testing.T) { + builder := NewFetchSourceContext(true) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"excludes":[],"includes":[]}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFetchSourceContextFetchSourceWithIncludesOnly(t *testing.T) { + builder := NewFetchSourceContext(true).Include("a", "b") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"excludes":[],"includes":["a","b"]}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFetchSourceContextFetchSourceWithIncludesAndExcludes(t *testing.T) { + builder := NewFetchSourceContext(true).Include("a", "b").Exclude("c") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"excludes":["c"],"includes":["a","b"]}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFetchSourceContextQueryDefaults(t *testing.T) { + builder := NewFetchSourceContext(true) + values := builder.Query() + got := values.Encode() + expected := "" + if got != expected { + t.Errorf("expected %q; got: %q", expected, got) + } +} + +func TestFetchSourceContextQueryNoFetchSource(t *testing.T) { + builder := NewFetchSourceContext(false) + values := builder.Query() + got := values.Encode() + expected := "_source=false" + if got != expected { + t.Errorf("expected %q; got: %q", expected, got) + } +} + +func TestFetchSourceContextQueryFetchSourceWithIncludesAndExcludes(t *testing.T) { + builder := NewFetchSourceContext(true).Include("a", "b").Exclude("c") + values := builder.Query() + got := values.Encode() + expected := "_source_exclude=c&_source_include=a%2Cb" + if got != expected { + t.Errorf("expected %q; got: %q", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/geo_point.go b/services/templeton/vendor/src/github.com/olivere/elastic/geo_point.go new file mode 100644 index 000000000..a09351ca2 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/geo_point.go @@ -0,0 +1,48 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "strconv" + "strings" +) + +// GeoPoint is a geographic position described via latitude and longitude. +type GeoPoint struct { + Lat float64 `json:"lat"` + Lon float64 `json:"lon"` +} + +// Source returns the object to be serialized in Elasticsearch DSL. +func (pt *GeoPoint) Source() map[string]float64 { + return map[string]float64{ + "lat": pt.Lat, + "lon": pt.Lon, + } +} + +// GeoPointFromLatLon initializes a new GeoPoint by latitude and longitude. +func GeoPointFromLatLon(lat, lon float64) *GeoPoint { + return &GeoPoint{Lat: lat, Lon: lon} +} + +// GeoPointFromString initializes a new GeoPoint by a string that is +// formatted as "{latitude},{longitude}", e.g. "40.10210,-70.12091". +func GeoPointFromString(latLon string) (*GeoPoint, error) { + latlon := strings.SplitN(latLon, ",", 2) + if len(latlon) != 2 { + return nil, fmt.Errorf("elastic: %s is not a valid geo point string", latLon) + } + lat, err := strconv.ParseFloat(latlon[0], 64) + if err != nil { + return nil, err + } + lon, err := strconv.ParseFloat(latlon[1], 64) + if err != nil { + return nil, err + } + return &GeoPoint{Lat: lat, Lon: lon}, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/geo_point_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/geo_point_test.go new file mode 100644 index 000000000..ebc28c2ec --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/geo_point_test.go @@ -0,0 +1,24 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestGeoPointSource(t *testing.T) { + pt := GeoPoint{Lat: 40, Lon: -70} + + data, err := json.Marshal(pt.Source()) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"lat":40,"lon":-70}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/get.go b/services/templeton/vendor/src/github.com/olivere/elastic/get.go new file mode 100644 index 000000000..eb2221755 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/get.go @@ -0,0 +1,271 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// GetService allows to get a typed JSON document from the index based +// on its id. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html +// for details. +type GetService struct { + client *Client + pretty bool + index string + typ string + id string + routing string + preference string + fields []string + refresh *bool + realtime *bool + fsc *FetchSourceContext + version interface{} + versionType string + parent string + ignoreErrorsOnGeneratedFields *bool +} + +// NewGetService creates a new GetService. +func NewGetService(client *Client) *GetService { + return &GetService{ + client: client, + typ: "_all", + } +} + +/* +// String returns a string representation of the GetService request. +func (s *GetService) String() string { + return fmt.Sprintf("[%v][%v][%v]: routing [%v]", + s.index, + s.typ, + s.id, + s.routing) +} +*/ + +// Index is the name of the index. +func (s *GetService) Index(index string) *GetService { + s.index = index + return s +} + +// Type is the type of the document (use `_all` to fetch the first document +// matching the ID across all types). +func (s *GetService) Type(typ string) *GetService { + s.typ = typ + return s +} + +// Id is the document ID. +func (s *GetService) Id(id string) *GetService { + s.id = id + return s +} + +// Parent is the ID of the parent document. +func (s *GetService) Parent(parent string) *GetService { + s.parent = parent + return s +} + +// Routing is the specific routing value. +func (s *GetService) Routing(routing string) *GetService { + s.routing = routing + return s +} + +// Preference specifies the node or shard the operation should be performed on (default: random). +func (s *GetService) Preference(preference string) *GetService { + s.preference = preference + return s +} + +// Fields is a list of fields to return in the response. +func (s *GetService) Fields(fields ...string) *GetService { + if s.fields == nil { + s.fields = make([]string, 0) + } + s.fields = append(s.fields, fields...) + return s +} + +func (s *GetService) FetchSource(fetchSource bool) *GetService { + if s.fsc == nil { + s.fsc = NewFetchSourceContext(fetchSource) + } else { + s.fsc.SetFetchSource(fetchSource) + } + return s +} + +func (s *GetService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *GetService { + s.fsc = fetchSourceContext + return s +} + +// Refresh the shard containing the document before performing the operation. +func (s *GetService) Refresh(refresh bool) *GetService { + s.refresh = &refresh + return s +} + +// Realtime specifies whether to perform the operation in realtime or search mode. +func (s *GetService) Realtime(realtime bool) *GetService { + s.realtime = &realtime + return s +} + +// VersionType is the specific version type. +func (s *GetService) VersionType(versionType string) *GetService { + s.versionType = versionType + return s +} + +// Version is an explicit version number for concurrency control. +func (s *GetService) Version(version interface{}) *GetService { + s.version = version + return s +} + +// IgnoreErrorsOnGeneratedFields indicates whether to ignore fields that +// are generated if the transaction log is accessed. +func (s *GetService) IgnoreErrorsOnGeneratedFields(ignore bool) *GetService { + s.ignoreErrorsOnGeneratedFields = &ignore + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *GetService) Pretty(pretty bool) *GetService { + s.pretty = pretty + return s +} + +// Validate checks if the operation is valid. +func (s *GetService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// buildURL builds the URL for the operation. +func (s *GetService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ + "id": s.id, + "index": s.index, + "type": s.typ, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if len(s.fields) > 0 { + params.Set("fields", strings.Join(s.fields, ",")) + } + if s.refresh != nil { + params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + if s.realtime != nil { + params.Set("realtime", fmt.Sprintf("%v", *s.realtime)) + } + if s.ignoreErrorsOnGeneratedFields != nil { + params.Add("ignore_errors_on_generated_fields", fmt.Sprintf("%v", *s.ignoreErrorsOnGeneratedFields)) + } + if s.fsc != nil { + for k, values := range s.fsc.Query() { + params.Add(k, strings.Join(values, ",")) + } + } + return path, params, nil +} + +// Do executes the operation. +func (s *GetService) Do() (*GetResult, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(GetResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a get request. + +// GetResult is the outcome of GetService.Do. +type GetResult struct { + Index string `json:"_index"` // index meta field + Type string `json:"_type"` // type meta field + Id string `json:"_id"` // id meta field + Uid string `json:"_uid"` // uid meta field (see MapperService.java for all meta fields) + Timestamp int64 `json:"_timestamp"` // timestamp meta field + TTL int64 `json:"_ttl"` // ttl meta field + Routing string `json:"_routing"` // routing meta field + Parent string `json:"_parent"` // parent meta field + Version *int64 `json:"_version"` // version number, when Version is set to true in SearchService + Source *json.RawMessage `json:"_source,omitempty"` + Found bool `json:"found,omitempty"` + Fields map[string]interface{} `json:"fields,omitempty"` + //Error string `json:"error,omitempty"` // used only in MultiGet + // TODO double-check that MultiGet now returns details error information + Error *ErrorDetails `json:"error,omitempty"` // only used in MultiGet +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/get_template.go b/services/templeton/vendor/src/github.com/olivere/elastic/get_template.go new file mode 100644 index 000000000..328d6e516 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/get_template.go @@ -0,0 +1,113 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// GetTemplateService reads a search template. +// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html. +type GetTemplateService struct { + client *Client + pretty bool + id string + version interface{} + versionType string +} + +// NewGetTemplateService creates a new GetTemplateService. +func NewGetTemplateService(client *Client) *GetTemplateService { + return &GetTemplateService{ + client: client, + } +} + +// Id is the template ID. +func (s *GetTemplateService) Id(id string) *GetTemplateService { + s.id = id + return s +} + +// Version is an explicit version number for concurrency control. +func (s *GetTemplateService) Version(version interface{}) *GetTemplateService { + s.version = version + return s +} + +// VersionType is a specific version type. +func (s *GetTemplateService) VersionType(versionType string) *GetTemplateService { + s.versionType = versionType + return s +} + +// buildURL builds the URL for the operation. +func (s *GetTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{ + "id": s.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *GetTemplateService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation and returns the template. +func (s *GetTemplateService) Do() (*GetTemplateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return result + ret := new(GetTemplateResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +type GetTemplateResponse struct { + Template string `json:"template"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/get_template_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/get_template_test.go new file mode 100644 index 000000000..00aea6899 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/get_template_test.go @@ -0,0 +1,51 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestGetPutDeleteTemplate(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // This is a search template, not an index template! + tmpl := `{ + "template": { + "query" : { "term" : { "{{my_field}}" : "{{my_value}}" } }, + "size" : "{{my_size}}" + }, + "params":{ + "my_field" : "user", + "my_value" : "olivere", + "my_size" : 5 + } +}` + putres, err := client.PutTemplate().Id("elastic-template").BodyString(tmpl).Do() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if putres == nil { + t.Fatalf("expected response; got: %v", putres) + } + if !putres.Created { + t.Fatalf("expected template to be created; got: %v", putres.Created) + } + + // Always delete template + defer client.DeleteTemplate().Id("elastic-template").Do() + + // Get template + getres, err := client.GetTemplate().Id("elastic-template").Do() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if getres == nil { + t.Fatalf("expected response; got: %v", getres) + } + if getres.Template == "" { + t.Errorf("expected template %q; got: %q", tmpl, getres.Template) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/get_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/get_test.go new file mode 100644 index 000000000..25dbe7391 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/get_test.go @@ -0,0 +1,165 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestGet(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + // Get document 1 + res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").Do() + if err != nil { + t.Fatal(err) + } + if res.Found != true { + t.Errorf("expected Found = true; got %v", res.Found) + } + if res.Source == nil { + t.Errorf("expected Source != nil; got %v", res.Source) + } + + // Get non existent document 99 + res, err = client.Get().Index(testIndexName).Type("tweet").Id("99").Do() + if err == nil { + t.Fatalf("expected error; got: %v", err) + } + if !IsNotFound(err) { + t.Errorf("expected NotFound error; got: %v", err) + } + if res != nil { + t.Errorf("expected no response; got: %v", res) + } +} + +func TestGetWithSourceFiltering(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + // Get document 1, without source + res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").FetchSource(false).Do() + if err != nil { + t.Fatal(err) + } + if res.Found != true { + t.Errorf("expected Found = true; got %v", res.Found) + } + if res.Source != nil { + t.Errorf("expected Source == nil; got %v", res.Source) + } + + // Get document 1, exclude Message field + fsc := NewFetchSourceContext(true).Exclude("message") + res, err = client.Get().Index(testIndexName).Type("tweet").Id("1").FetchSourceContext(fsc).Do() + if err != nil { + t.Fatal(err) + } + if res.Found != true { + t.Errorf("expected Found = true; got %v", res.Found) + } + if res.Source == nil { + t.Errorf("expected Source != nil; got %v", res.Source) + } + var tw tweet + err = json.Unmarshal(*res.Source, &tw) + if err != nil { + t.Fatal(err) + } + if tw.User != "olivere" { + t.Errorf("expected user %q; got: %q", "olivere", tw.User) + } + if tw.Message != "" { + t.Errorf("expected message %q; got: %q", "", tw.Message) + } +} + +func TestGetWithFields(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + // Get document 1, specifying fields + res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").Fields("message").Do() + if err != nil { + t.Fatal(err) + } + if res.Found != true { + t.Errorf("expected Found = true; got: %v", res.Found) + } + + // We must NOT have the "user" field + _, ok := res.Fields["user"] + if ok { + t.Fatalf("expected no field %q in document", "user") + } + + // We must have the "message" field + messageField, ok := res.Fields["message"] + if !ok { + t.Fatalf("expected field %q in document", "message") + } + + // Depending on the version of elasticsearch the message field will be returned + // as a string or a slice of strings. This test works in both cases. + + messageString, ok := messageField.(string) + if !ok { + messageArray, ok := messageField.([]interface{}) + if !ok { + t.Fatalf("expected field %q to be a string or a slice of strings; got: %T", "message", messageField) + } else { + messageString, ok = messageArray[0].(string) + if !ok { + t.Fatalf("expected field %q to be a string or a slice of strings; got: %T", "message", messageField) + } + } + } + + if messageString != tweet1.Message { + t.Errorf("expected message %q; got: %q", tweet1.Message, messageString) + } +} + +func TestGetValidate(t *testing.T) { + // Mitigate against http://stackoverflow.com/questions/27491738/elasticsearch-go-index-failures-no-feature-for-name + client := setupTestClientAndCreateIndex(t) + + if _, err := client.Get().Do(); err == nil { + t.Fatal("expected Get to fail") + } + if _, err := client.Get().Index(testIndexName).Do(); err == nil { + t.Fatal("expected Get to fail") + } + if _, err := client.Get().Type("tweet").Do(); err == nil { + t.Fatal("expected Get to fail") + } + if _, err := client.Get().Id("1").Do(); err == nil { + t.Fatal("expected Get to fail") + } + if _, err := client.Get().Index(testIndexName).Type("tweet").Do(); err == nil { + t.Fatal("expected Get to fail") + } + if _, err := client.Get().Type("tweet").Id("1").Do(); err == nil { + t.Fatal("expected Get to fail") + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/highlight.go b/services/templeton/vendor/src/github.com/olivere/elastic/highlight.go new file mode 100644 index 000000000..44501a731 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/highlight.go @@ -0,0 +1,455 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// Highlight allows highlighting search results on one or more fields. +// For details, see: +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-highlighting.html +type Highlight struct { + fields []*HighlighterField + tagsSchema *string + highlightFilter *bool + fragmentSize *int + numOfFragments *int + preTags []string + postTags []string + order *string + encoder *string + requireFieldMatch *bool + boundaryMaxScan *int + boundaryChars []rune + highlighterType *string + fragmenter *string + highlightQuery Query + noMatchSize *int + phraseLimit *int + options map[string]interface{} + forceSource *bool + useExplicitFieldOrder bool +} + +func NewHighlight() *Highlight { + hl := &Highlight{ + fields: make([]*HighlighterField, 0), + preTags: make([]string, 0), + postTags: make([]string, 0), + boundaryChars: make([]rune, 0), + options: make(map[string]interface{}), + } + return hl +} + +func (hl *Highlight) Fields(fields ...*HighlighterField) *Highlight { + hl.fields = append(hl.fields, fields...) + return hl +} + +func (hl *Highlight) Field(name string) *Highlight { + field := NewHighlighterField(name) + hl.fields = append(hl.fields, field) + return hl +} + +func (hl *Highlight) TagsSchema(schemaName string) *Highlight { + hl.tagsSchema = &schemaName + return hl +} + +func (hl *Highlight) HighlightFilter(highlightFilter bool) *Highlight { + hl.highlightFilter = &highlightFilter + return hl +} + +func (hl *Highlight) FragmentSize(fragmentSize int) *Highlight { + hl.fragmentSize = &fragmentSize + return hl +} + +func (hl *Highlight) NumOfFragments(numOfFragments int) *Highlight { + hl.numOfFragments = &numOfFragments + return hl +} + +func (hl *Highlight) Encoder(encoder string) *Highlight { + hl.encoder = &encoder + return hl +} + +func (hl *Highlight) PreTags(preTags ...string) *Highlight { + hl.preTags = append(hl.preTags, preTags...) + return hl +} + +func (hl *Highlight) PostTags(postTags ...string) *Highlight { + hl.postTags = append(hl.postTags, postTags...) + return hl +} + +func (hl *Highlight) Order(order string) *Highlight { + hl.order = &order + return hl +} + +func (hl *Highlight) RequireFieldMatch(requireFieldMatch bool) *Highlight { + hl.requireFieldMatch = &requireFieldMatch + return hl +} + +func (hl *Highlight) BoundaryMaxScan(boundaryMaxScan int) *Highlight { + hl.boundaryMaxScan = &boundaryMaxScan + return hl +} + +func (hl *Highlight) BoundaryChars(boundaryChars ...rune) *Highlight { + hl.boundaryChars = append(hl.boundaryChars, boundaryChars...) + return hl +} + +func (hl *Highlight) HighlighterType(highlighterType string) *Highlight { + hl.highlighterType = &highlighterType + return hl +} + +func (hl *Highlight) Fragmenter(fragmenter string) *Highlight { + hl.fragmenter = &fragmenter + return hl +} + +func (hl *Highlight) HighlighQuery(highlightQuery Query) *Highlight { + hl.highlightQuery = highlightQuery + return hl +} + +func (hl *Highlight) NoMatchSize(noMatchSize int) *Highlight { + hl.noMatchSize = &noMatchSize + return hl +} + +func (hl *Highlight) Options(options map[string]interface{}) *Highlight { + hl.options = options + return hl +} + +func (hl *Highlight) ForceSource(forceSource bool) *Highlight { + hl.forceSource = &forceSource + return hl +} + +func (hl *Highlight) UseExplicitFieldOrder(useExplicitFieldOrder bool) *Highlight { + hl.useExplicitFieldOrder = useExplicitFieldOrder + return hl +} + +// Creates the query source for the bool query. +func (hl *Highlight) Source() (interface{}, error) { + // Returns the map inside of "highlight": + // "highlight":{ + // ... this ... + // } + source := make(map[string]interface{}) + if hl.tagsSchema != nil { + source["tags_schema"] = *hl.tagsSchema + } + if hl.preTags != nil && len(hl.preTags) > 0 { + source["pre_tags"] = hl.preTags + } + if hl.postTags != nil && len(hl.postTags) > 0 { + source["post_tags"] = hl.postTags + } + if hl.order != nil { + source["order"] = *hl.order + } + if hl.highlightFilter != nil { + source["highlight_filter"] = *hl.highlightFilter + } + if hl.fragmentSize != nil { + source["fragment_size"] = *hl.fragmentSize + } + if hl.numOfFragments != nil { + source["number_of_fragments"] = *hl.numOfFragments + } + if hl.encoder != nil { + source["encoder"] = *hl.encoder + } + if hl.requireFieldMatch != nil { + source["require_field_match"] = *hl.requireFieldMatch + } + if hl.boundaryMaxScan != nil { + source["boundary_max_scan"] = *hl.boundaryMaxScan + } + if hl.boundaryChars != nil && len(hl.boundaryChars) > 0 { + source["boundary_chars"] = hl.boundaryChars + } + if hl.highlighterType != nil { + source["type"] = *hl.highlighterType + } + if hl.fragmenter != nil { + source["fragmenter"] = *hl.fragmenter + } + if hl.highlightQuery != nil { + src, err := hl.highlightQuery.Source() + if err != nil { + return nil, err + } + source["highlight_query"] = src + } + if hl.noMatchSize != nil { + source["no_match_size"] = *hl.noMatchSize + } + if hl.phraseLimit != nil { + source["phrase_limit"] = *hl.phraseLimit + } + if hl.options != nil && len(hl.options) > 0 { + source["options"] = hl.options + } + if hl.forceSource != nil { + source["force_source"] = *hl.forceSource + } + + if hl.fields != nil && len(hl.fields) > 0 { + if hl.useExplicitFieldOrder { + // Use a slice for the fields + fields := make([]map[string]interface{}, 0) + for _, field := range hl.fields { + src, err := field.Source() + if err != nil { + return nil, err + } + fmap := make(map[string]interface{}) + fmap[field.Name] = src + fields = append(fields, fmap) + } + source["fields"] = fields + } else { + // Use a map for the fields + fields := make(map[string]interface{}, 0) + for _, field := range hl.fields { + src, err := field.Source() + if err != nil { + return nil, err + } + fields[field.Name] = src + } + source["fields"] = fields + } + } + + return source, nil +} + +// HighlighterField specifies a highlighted field. +type HighlighterField struct { + Name string + + preTags []string + postTags []string + fragmentSize int + fragmentOffset int + numOfFragments int + highlightFilter *bool + order *string + requireFieldMatch *bool + boundaryMaxScan int + boundaryChars []rune + highlighterType *string + fragmenter *string + highlightQuery Query + noMatchSize *int + matchedFields []string + phraseLimit *int + options map[string]interface{} + forceSource *bool + + /* + Name string + preTags []string + postTags []string + fragmentSize int + numOfFragments int + fragmentOffset int + highlightFilter *bool + order string + requireFieldMatch *bool + boundaryMaxScan int + boundaryChars []rune + highlighterType string + fragmenter string + highlightQuery Query + noMatchSize *int + matchedFields []string + options map[string]interface{} + forceSource *bool + */ +} + +func NewHighlighterField(name string) *HighlighterField { + return &HighlighterField{ + Name: name, + preTags: make([]string, 0), + postTags: make([]string, 0), + fragmentSize: -1, + fragmentOffset: -1, + numOfFragments: -1, + boundaryMaxScan: -1, + boundaryChars: make([]rune, 0), + matchedFields: make([]string, 0), + options: make(map[string]interface{}), + } +} + +func (f *HighlighterField) PreTags(preTags ...string) *HighlighterField { + f.preTags = append(f.preTags, preTags...) + return f +} + +func (f *HighlighterField) PostTags(postTags ...string) *HighlighterField { + f.postTags = append(f.postTags, postTags...) + return f +} + +func (f *HighlighterField) FragmentSize(fragmentSize int) *HighlighterField { + f.fragmentSize = fragmentSize + return f +} + +func (f *HighlighterField) FragmentOffset(fragmentOffset int) *HighlighterField { + f.fragmentOffset = fragmentOffset + return f +} + +func (f *HighlighterField) NumOfFragments(numOfFragments int) *HighlighterField { + f.numOfFragments = numOfFragments + return f +} + +func (f *HighlighterField) HighlightFilter(highlightFilter bool) *HighlighterField { + f.highlightFilter = &highlightFilter + return f +} + +func (f *HighlighterField) Order(order string) *HighlighterField { + f.order = &order + return f +} + +func (f *HighlighterField) RequireFieldMatch(requireFieldMatch bool) *HighlighterField { + f.requireFieldMatch = &requireFieldMatch + return f +} + +func (f *HighlighterField) BoundaryMaxScan(boundaryMaxScan int) *HighlighterField { + f.boundaryMaxScan = boundaryMaxScan + return f +} + +func (f *HighlighterField) BoundaryChars(boundaryChars ...rune) *HighlighterField { + f.boundaryChars = append(f.boundaryChars, boundaryChars...) + return f +} + +func (f *HighlighterField) HighlighterType(highlighterType string) *HighlighterField { + f.highlighterType = &highlighterType + return f +} + +func (f *HighlighterField) Fragmenter(fragmenter string) *HighlighterField { + f.fragmenter = &fragmenter + return f +} + +func (f *HighlighterField) HighlightQuery(highlightQuery Query) *HighlighterField { + f.highlightQuery = highlightQuery + return f +} + +func (f *HighlighterField) NoMatchSize(noMatchSize int) *HighlighterField { + f.noMatchSize = &noMatchSize + return f +} + +func (f *HighlighterField) Options(options map[string]interface{}) *HighlighterField { + f.options = options + return f +} + +func (f *HighlighterField) MatchedFields(matchedFields ...string) *HighlighterField { + f.matchedFields = append(f.matchedFields, matchedFields...) + return f +} + +func (f *HighlighterField) PhraseLimit(phraseLimit int) *HighlighterField { + f.phraseLimit = &phraseLimit + return f +} + +func (f *HighlighterField) ForceSource(forceSource bool) *HighlighterField { + f.forceSource = &forceSource + return f +} + +func (f *HighlighterField) Source() (interface{}, error) { + source := make(map[string]interface{}) + + if f.preTags != nil && len(f.preTags) > 0 { + source["pre_tags"] = f.preTags + } + if f.postTags != nil && len(f.postTags) > 0 { + source["post_tags"] = f.postTags + } + if f.fragmentSize != -1 { + source["fragment_size"] = f.fragmentSize + } + if f.numOfFragments != -1 { + source["number_of_fragments"] = f.numOfFragments + } + if f.fragmentOffset != -1 { + source["fragment_offset"] = f.fragmentOffset + } + if f.highlightFilter != nil { + source["highlight_filter"] = *f.highlightFilter + } + if f.order != nil { + source["order"] = *f.order + } + if f.requireFieldMatch != nil { + source["require_field_match"] = *f.requireFieldMatch + } + if f.boundaryMaxScan != -1 { + source["boundary_max_scan"] = f.boundaryMaxScan + } + if f.boundaryChars != nil && len(f.boundaryChars) > 0 { + source["boundary_chars"] = f.boundaryChars + } + if f.highlighterType != nil { + source["type"] = *f.highlighterType + } + if f.fragmenter != nil { + source["fragmenter"] = *f.fragmenter + } + if f.highlightQuery != nil { + src, err := f.highlightQuery.Source() + if err != nil { + return nil, err + } + source["highlight_query"] = src + } + if f.noMatchSize != nil { + source["no_match_size"] = *f.noMatchSize + } + if f.matchedFields != nil && len(f.matchedFields) > 0 { + source["matched_fields"] = f.matchedFields + } + if f.phraseLimit != nil { + source["phrase_limit"] = *f.phraseLimit + } + if f.options != nil && len(f.options) > 0 { + source["options"] = f.options + } + if f.forceSource != nil { + source["force_source"] = *f.forceSource + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/highlight_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/highlight_test.go new file mode 100644 index 000000000..be5cd963e --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/highlight_test.go @@ -0,0 +1,192 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + _ "net/http" + "testing" +) + +func TestHighlighterField(t *testing.T) { + field := NewHighlighterField("grade") + src, err := field.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestHighlighterFieldWithOptions(t *testing.T) { + field := NewHighlighterField("grade").FragmentSize(2).NumOfFragments(1) + src, err := field.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"fragment_size":2,"number_of_fragments":1}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestHighlightWithStringField(t *testing.T) { + builder := NewHighlight().Field("grade") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"fields":{"grade":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestHighlightWithFields(t *testing.T) { + gradeField := NewHighlighterField("grade") + builder := NewHighlight().Fields(gradeField) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"fields":{"grade":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestHighlightWithMultipleFields(t *testing.T) { + gradeField := NewHighlighterField("grade") + colorField := NewHighlighterField("color") + builder := NewHighlight().Fields(gradeField, colorField) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"fields":{"color":{},"grade":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestHighlighterWithExplicitFieldOrder(t *testing.T) { + gradeField := NewHighlighterField("grade").FragmentSize(2) + colorField := NewHighlighterField("color").FragmentSize(2).NumOfFragments(1) + builder := NewHighlight().Fields(gradeField, colorField).UseExplicitFieldOrder(true) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"fields":[{"grade":{"fragment_size":2}},{"color":{"fragment_size":2,"number_of_fragments":1}}]}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestHighlightWithTermQuery(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun to do."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Specify highlighter + hl := NewHighlight() + hl = hl.Fields(NewHighlighterField("message")) + hl = hl.PreTags("").PostTags("") + + // Match all should return all documents + query := NewPrefixQuery("message", "golang") + searchResult, err := client.Search(). + Index(testIndexName). + Highlight(hl). + Query(query). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Fatalf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 1 { + t.Fatalf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 1 { + t.Fatalf("expected len(SearchResult.Hits.Hits) = %d; got %d", 1, len(searchResult.Hits.Hits)) + } + + hit := searchResult.Hits.Hits[0] + var tw tweet + if err := json.Unmarshal(*hit.Source, &tw); err != nil { + t.Fatal(err) + } + if hit.Highlight == nil || len(hit.Highlight) == 0 { + t.Fatal("expected hit to have a highlight; got nil") + } + if hl, found := hit.Highlight["message"]; found { + if len(hl) != 1 { + t.Fatalf("expected to have one highlight for field \"message\"; got %d", len(hl)) + } + expected := "Welcome to Golang and Elasticsearch." + if hl[0] != expected { + t.Errorf("expected to have highlight \"%s\"; got \"%s\"", expected, hl[0]) + } + } else { + t.Fatal("expected to have a highlight on field \"message\"; got none") + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/index.go b/services/templeton/vendor/src/github.com/olivere/elastic/index.go new file mode 100644 index 000000000..bdaba0560 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/index.go @@ -0,0 +1,284 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndexService adds or updates a typed JSON document in a specified index, +// making it searchable. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html +// for details. +type IndexService struct { + client *Client + pretty bool + id string + index string + typ string + parent string + replication string + routing string + timeout string + timestamp string + ttl string + version interface{} + opType string + versionType string + refresh *bool + consistency string + bodyJson interface{} + bodyString string +} + +// NewIndexService creates a new IndexService. +func NewIndexService(client *Client) *IndexService { + return &IndexService{ + client: client, + } +} + +// Id is the document ID. +func (s *IndexService) Id(id string) *IndexService { + s.id = id + return s +} + +// Index is the name of the index. +func (s *IndexService) Index(index string) *IndexService { + s.index = index + return s +} + +// Type is the type of the document. +func (s *IndexService) Type(typ string) *IndexService { + s.typ = typ + return s +} + +// Consistency is an explicit write consistency setting for the operation. +func (s *IndexService) Consistency(consistency string) *IndexService { + s.consistency = consistency + return s +} + +// Refresh the index after performing the operation. +func (s *IndexService) Refresh(refresh bool) *IndexService { + s.refresh = &refresh + return s +} + +// Ttl is an expiration time for the document. +func (s *IndexService) Ttl(ttl string) *IndexService { + s.ttl = ttl + return s +} + +// TTL is an expiration time for the document (alias for Ttl). +func (s *IndexService) TTL(ttl string) *IndexService { + s.ttl = ttl + return s +} + +// Version is an explicit version number for concurrency control. +func (s *IndexService) Version(version interface{}) *IndexService { + s.version = version + return s +} + +// OpType is an explicit operation type, i.e. "create" or "index" (default). +func (s *IndexService) OpType(opType string) *IndexService { + s.opType = opType + return s +} + +// Parent is the ID of the parent document. +func (s *IndexService) Parent(parent string) *IndexService { + s.parent = parent + return s +} + +// Replication is a specific replication type. +func (s *IndexService) Replication(replication string) *IndexService { + s.replication = replication + return s +} + +// Routing is a specific routing value. +func (s *IndexService) Routing(routing string) *IndexService { + s.routing = routing + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndexService) Timeout(timeout string) *IndexService { + s.timeout = timeout + return s +} + +// Timestamp is an explicit timestamp for the document. +func (s *IndexService) Timestamp(timestamp string) *IndexService { + s.timestamp = timestamp + return s +} + +// VersionType is a specific version type. +func (s *IndexService) VersionType(versionType string) *IndexService { + s.versionType = versionType + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndexService) Pretty(pretty bool) *IndexService { + s.pretty = pretty + return s +} + +// BodyJson is the document as a serializable JSON interface. +func (s *IndexService) BodyJson(body interface{}) *IndexService { + s.bodyJson = body + return s +} + +// BodyString is the document encoded as a string. +func (s *IndexService) BodyString(body string) *IndexService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *IndexService) buildURL() (string, string, url.Values, error) { + var err error + var method, path string + + if s.id != "" { + // Create document with manual id + method = "PUT" + path, err = uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ + "id": s.id, + "index": s.index, + "type": s.typ, + }) + } else { + // Automatic ID generation + // See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#index-creation + method = "POST" + path, err = uritemplates.Expand("/{index}/{type}/", map[string]string{ + "index": s.index, + "type": s.typ, + }) + } + if err != nil { + return "", "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.consistency != "" { + params.Set("consistency", s.consistency) + } + if s.refresh != nil { + params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) + } + if s.opType != "" { + params.Set("op_type", s.opType) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.replication != "" { + params.Set("replication", s.replication) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.timestamp != "" { + params.Set("timestamp", s.timestamp) + } + if s.ttl != "" { + params.Set("ttl", s.ttl) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + return method, path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndexService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndexService) Do() (*IndexResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + method, path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest(method, path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndexResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndexResponse is the result of indexing a document in Elasticsearch. +type IndexResponse struct { + // TODO _shards { total, failed, successful } + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + Version int `json:"_version"` + Created bool `json:"created"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/index_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/index_test.go new file mode 100644 index 000000000..01722b3e3 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/index_test.go @@ -0,0 +1,279 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestIndexLifecycle(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + + // Add a document + indexResult, err := client.Index(). + Index(testIndexName). + Type("tweet"). + Id("1"). + BodyJson(&tweet1). + Do() + if err != nil { + t.Fatal(err) + } + if indexResult == nil { + t.Errorf("expected result to be != nil; got: %v", indexResult) + } + + // Exists + exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do() + if err != nil { + t.Fatal(err) + } + if !exists { + t.Errorf("expected exists %v; got %v", true, exists) + } + + // Get document + getResult, err := client.Get(). + Index(testIndexName). + Type("tweet"). + Id("1"). + Do() + if err != nil { + t.Fatal(err) + } + if getResult.Index != testIndexName { + t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index) + } + if getResult.Type != "tweet" { + t.Errorf("expected GetResult.Type %q; got %q", "tweet", getResult.Type) + } + if getResult.Id != "1" { + t.Errorf("expected GetResult.Id %q; got %q", "1", getResult.Id) + } + if getResult.Source == nil { + t.Errorf("expected GetResult.Source to be != nil; got nil") + } + + // Decode the Source field + var tweetGot tweet + err = json.Unmarshal(*getResult.Source, &tweetGot) + if err != nil { + t.Fatal(err) + } + if tweetGot.User != tweet1.User { + t.Errorf("expected Tweet.User to be %q; got %q", tweet1.User, tweetGot.User) + } + if tweetGot.Message != tweet1.Message { + t.Errorf("expected Tweet.Message to be %q; got %q", tweet1.Message, tweetGot.Message) + } + + // Delete document again + deleteResult, err := client.Delete().Index(testIndexName).Type("tweet").Id("1").Do() + if err != nil { + t.Fatal(err) + } + if deleteResult == nil { + t.Errorf("expected result to be != nil; got: %v", deleteResult) + } + + // Exists + exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("1").Do() + if err != nil { + t.Fatal(err) + } + if exists { + t.Errorf("expected exists %v; got %v", false, exists) + } +} + +func TestIndexLifecycleWithAutomaticIDGeneration(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + + // Add a document + indexResult, err := client.Index(). + Index(testIndexName). + Type("tweet"). + BodyJson(&tweet1). + Do() + if err != nil { + t.Fatal(err) + } + if indexResult == nil { + t.Errorf("expected result to be != nil; got: %v", indexResult) + } + if indexResult.Id == "" { + t.Fatalf("expected Es to generate an automatic ID, got: %v", indexResult.Id) + } + id := indexResult.Id + + // Exists + exists, err := client.Exists().Index(testIndexName).Type("tweet").Id(id).Do() + if err != nil { + t.Fatal(err) + } + if !exists { + t.Errorf("expected exists %v; got %v", true, exists) + } + + // Get document + getResult, err := client.Get(). + Index(testIndexName). + Type("tweet"). + Id(id). + Do() + if err != nil { + t.Fatal(err) + } + if getResult.Index != testIndexName { + t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index) + } + if getResult.Type != "tweet" { + t.Errorf("expected GetResult.Type %q; got %q", "tweet", getResult.Type) + } + if getResult.Id != id { + t.Errorf("expected GetResult.Id %q; got %q", id, getResult.Id) + } + if getResult.Source == nil { + t.Errorf("expected GetResult.Source to be != nil; got nil") + } + + // Decode the Source field + var tweetGot tweet + err = json.Unmarshal(*getResult.Source, &tweetGot) + if err != nil { + t.Fatal(err) + } + if tweetGot.User != tweet1.User { + t.Errorf("expected Tweet.User to be %q; got %q", tweet1.User, tweetGot.User) + } + if tweetGot.Message != tweet1.Message { + t.Errorf("expected Tweet.Message to be %q; got %q", tweet1.Message, tweetGot.Message) + } + + // Delete document again + deleteResult, err := client.Delete().Index(testIndexName).Type("tweet").Id(id).Do() + if err != nil { + t.Fatal(err) + } + if deleteResult == nil { + t.Errorf("expected result to be != nil; got: %v", deleteResult) + } + + // Exists + exists, err = client.Exists().Index(testIndexName).Type("tweet").Id(id).Do() + if err != nil { + t.Fatal(err) + } + if exists { + t.Errorf("expected exists %v; got %v", false, exists) + } +} + +func TestIndexValidate(t *testing.T) { + client := setupTestClient(t) + + tweet := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + + // No index name -> fail with error + res, err := NewIndexService(client).Type("tweet").Id("1").BodyJson(&tweet).Do() + if err == nil { + t.Fatalf("expected Index to fail without index name") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } + + // No index name -> fail with error + res, err = NewIndexService(client).Index(testIndexName).Id("1").BodyJson(&tweet).Do() + if err == nil { + t.Fatalf("expected Index to fail without type") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } +} + +func TestIndexCreateExistsOpenCloseDelete(t *testing.T) { + // TODO: Find out how to make these test robust + t.Skip("test fails regularly with 409 (Conflict): " + + "IndexPrimaryShardNotAllocatedException[[elastic-test] " + + "primary not allocated post api... skipping") + + client := setupTestClient(t) + + // Create index + createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do() + if err != nil { + t.Fatal(err) + } + if createIndex == nil { + t.Fatalf("expected response; got: %v", createIndex) + } + if !createIndex.Acknowledged { + t.Errorf("expected ack for creating index; got: %v", createIndex.Acknowledged) + } + + // Exists + indexExists, err := client.IndexExists(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if !indexExists { + t.Fatalf("expected index exists=%v; got %v", true, indexExists) + } + + // Flush + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Close index + closeIndex, err := client.CloseIndex(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if closeIndex == nil { + t.Fatalf("expected response; got: %v", closeIndex) + } + if !closeIndex.Acknowledged { + t.Errorf("expected ack for closing index; got: %v", closeIndex.Acknowledged) + } + + // Open index + openIndex, err := client.OpenIndex(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if openIndex == nil { + t.Fatalf("expected response; got: %v", openIndex) + } + if !openIndex.Acknowledged { + t.Errorf("expected ack for opening index; got: %v", openIndex.Acknowledged) + } + + // Flush + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Delete index + deleteIndex, err := client.DeleteIndex(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if deleteIndex == nil { + t.Fatalf("expected response; got: %v", deleteIndex) + } + if !deleteIndex.Acknowledged { + t.Errorf("expected ack for deleting index; got %v", deleteIndex.Acknowledged) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_close.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_close.go new file mode 100644 index 000000000..ad344cb26 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_close.go @@ -0,0 +1,153 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesCloseService closes an index. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html +// for details. +type IndicesCloseService struct { + client *Client + pretty bool + index string + timeout string + masterTimeout string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string +} + +// NewIndicesCloseService creates and initializes a new IndicesCloseService. +func NewIndicesCloseService(client *Client) *IndicesCloseService { + return &IndicesCloseService{client: client} +} + +// Index is the name of the index to close. +func (s *IndicesCloseService) Index(index string) *IndicesCloseService { + s.index = index + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesCloseService) Timeout(timeout string) *IndicesCloseService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesCloseService) MasterTimeout(masterTimeout string) *IndicesCloseService { + s.masterTimeout = masterTimeout + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesCloseService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesCloseService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified). +func (s *IndicesCloseService) AllowNoIndices(allowNoIndices bool) *IndicesCloseService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesCloseService) ExpandWildcards(expandWildcards string) *IndicesCloseService { + s.expandWildcards = expandWildcards + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesCloseService) Pretty(pretty bool) *IndicesCloseService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesCloseService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/_close", map[string]string{ + "index": s.index, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesCloseService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesCloseService) Do() (*IndicesCloseResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesCloseResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesCloseResponse is the response of IndicesCloseService.Do. +type IndicesCloseResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_close_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_close_test.go new file mode 100644 index 000000000..7293bb1c4 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_close_test.go @@ -0,0 +1,81 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +// TODO(oe): Find out why this test fails on Travis CI. +/* +func TestIndicesOpenAndClose(t *testing.T) { + client := setupTestClient(t) + + // Create index + createIndex, err := client.CreateIndex(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if !createIndex.Acknowledged { + t.Errorf("expected CreateIndexResult.Acknowledged %v; got %v", true, createIndex.Acknowledged) + } + defer func() { + // Delete index + deleteIndex, err := client.DeleteIndex(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if !deleteIndex.Acknowledged { + t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged) + } + }() + + waitForYellow := func() { + // Wait for status yellow + res, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("15s").Do() + if err != nil { + t.Fatal(err) + } + if res != nil && res.TimedOut { + t.Fatalf("cluster time out waiting for status %q", "yellow") + } + } + + // Wait for cluster + waitForYellow() + + // Close index + cresp, err := client.CloseIndex(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if !cresp.Acknowledged { + t.Fatalf("expected close index of %q to be acknowledged\n", testIndexName) + } + + // Wait for cluster + waitForYellow() + + // Open index again + oresp, err := client.OpenIndex(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if !oresp.Acknowledged { + t.Fatalf("expected open index of %q to be acknowledged\n", testIndexName) + } +} +*/ + +func TestIndicesCloseValidate(t *testing.T) { + client := setupTestClient(t) + + // No index name -> fail with error + res, err := NewIndicesCloseService(client).Do() + if err == nil { + t.Fatalf("expected IndicesClose to fail without index name") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_create.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_create.go new file mode 100644 index 000000000..1e98447ea --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_create.go @@ -0,0 +1,129 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "errors" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesCreateService creates a new index. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html +// for details. +type IndicesCreateService struct { + client *Client + pretty bool + index string + timeout string + masterTimeout string + bodyJson interface{} + bodyString string +} + +// NewIndicesCreateService returns a new IndicesCreateService. +func NewIndicesCreateService(client *Client) *IndicesCreateService { + return &IndicesCreateService{client: client} +} + +// Index is the name of the index to create. +func (b *IndicesCreateService) Index(index string) *IndicesCreateService { + b.index = index + return b +} + +// Timeout the explicit operation timeout, e.g. "5s". +func (s *IndicesCreateService) Timeout(timeout string) *IndicesCreateService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesCreateService) MasterTimeout(masterTimeout string) *IndicesCreateService { + s.masterTimeout = masterTimeout + return s +} + +// Body specifies the configuration of the index as a string. +// It is an alias for BodyString. +func (b *IndicesCreateService) Body(body string) *IndicesCreateService { + b.bodyString = body + return b +} + +// BodyString specifies the configuration of the index as a string. +func (b *IndicesCreateService) BodyString(body string) *IndicesCreateService { + b.bodyString = body + return b +} + +// BodyJson specifies the configuration of the index. The interface{} will +// be serializes as a JSON document, so use a map[string]interface{}. +func (b *IndicesCreateService) BodyJson(body interface{}) *IndicesCreateService { + b.bodyJson = body + return b +} + +// Pretty indicates that the JSON response be indented and human readable. +func (b *IndicesCreateService) Pretty(pretty bool) *IndicesCreateService { + b.pretty = pretty + return b +} + +// Do executes the operation. +func (b *IndicesCreateService) Do() (*IndicesCreateResult, error) { + if b.index == "" { + return nil, errors.New("missing index name") + } + + // Build url + path, err := uritemplates.Expand("/{index}", map[string]string{ + "index": b.index, + }) + if err != nil { + return nil, err + } + + params := make(url.Values) + if b.pretty { + params.Set("pretty", "1") + } + if b.masterTimeout != "" { + params.Set("master_timeout", b.masterTimeout) + } + if b.timeout != "" { + params.Set("timeout", b.timeout) + } + + // Setup HTTP request body + var body interface{} + if b.bodyJson != nil { + body = b.bodyJson + } else { + body = b.bodyString + } + + // Get response + res, err := b.client.PerformRequest("PUT", path, params, body) + if err != nil { + return nil, err + } + + ret := new(IndicesCreateResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a create index request. + +// IndicesCreateResult is the outcome of creating a new index. +type IndicesCreateResult struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_create_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_create_test.go new file mode 100644 index 000000000..b3723950a --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_create_test.go @@ -0,0 +1,60 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestIndicesLifecycle(t *testing.T) { + client := setupTestClient(t) + + // Create index + createIndex, err := client.CreateIndex(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if !createIndex.Acknowledged { + t.Errorf("expected IndicesCreateResult.Acknowledged %v; got %v", true, createIndex.Acknowledged) + } + + // Check if index exists + indexExists, err := client.IndexExists(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if !indexExists { + t.Fatalf("index %s should exist, but doesn't\n", testIndexName) + } + + // Delete index + deleteIndex, err := client.DeleteIndex(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if !deleteIndex.Acknowledged { + t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged) + } + + // Check if index exists + indexExists, err = client.IndexExists(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if indexExists { + t.Fatalf("index %s should not exist, but does\n", testIndexName) + } +} + +func TestIndicesCreateValidate(t *testing.T) { + client := setupTestClient(t) + + // No index name -> fail with error + res, err := NewIndicesCreateService(client).Body(testMapping).Do() + if err == nil { + t.Fatalf("expected IndicesCreate to fail without index name") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete.go new file mode 100644 index 000000000..e2582dc6f --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete.go @@ -0,0 +1,129 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesDeleteService allows to delete existing indices. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html +// for details. +type IndicesDeleteService struct { + client *Client + pretty bool + index []string + timeout string + masterTimeout string +} + +// NewIndicesDeleteService creates and initializes a new IndicesDeleteService. +func NewIndicesDeleteService(client *Client) *IndicesDeleteService { + return &IndicesDeleteService{ + client: client, + index: make([]string, 0), + } +} + +// Index adds the list of indices to delete. +// Use `_all` or `*` string to delete all indices. +func (s *IndicesDeleteService) Index(index []string) *IndicesDeleteService { + s.index = index + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesDeleteService) Timeout(timeout string) *IndicesDeleteService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesDeleteService) MasterTimeout(masterTimeout string) *IndicesDeleteService { + s.masterTimeout = masterTimeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesDeleteService) Pretty(pretty bool) *IndicesDeleteService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesDeleteService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}", map[string]string{ + "index": strings.Join(s.index, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesDeleteService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesDeleteService) Do() (*IndicesDeleteResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("DELETE", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesDeleteResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a delete index request. + +// IndicesDeleteResponse is the response of IndicesDeleteService.Do. +type IndicesDeleteResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_template.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_template.go new file mode 100644 index 000000000..2c62a06cd --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_template.go @@ -0,0 +1,122 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesDeleteTemplateService deletes index templates. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html. +type IndicesDeleteTemplateService struct { + client *Client + pretty bool + name string + timeout string + masterTimeout string +} + +// NewIndicesDeleteTemplateService creates a new IndicesDeleteTemplateService. +func NewIndicesDeleteTemplateService(client *Client) *IndicesDeleteTemplateService { + return &IndicesDeleteTemplateService{ + client: client, + } +} + +// Name is the name of the template. +func (s *IndicesDeleteTemplateService) Name(name string) *IndicesDeleteTemplateService { + s.name = name + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesDeleteTemplateService) Timeout(timeout string) *IndicesDeleteTemplateService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesDeleteTemplateService) MasterTimeout(masterTimeout string) *IndicesDeleteTemplateService { + s.masterTimeout = masterTimeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesDeleteTemplateService) Pretty(pretty bool) *IndicesDeleteTemplateService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesDeleteTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_template/{name}", map[string]string{ + "name": s.name, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesDeleteTemplateService) Validate() error { + var invalid []string + if s.name == "" { + invalid = append(invalid, "Name") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesDeleteTemplateService) Do() (*IndicesDeleteTemplateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("DELETE", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesDeleteTemplateResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesDeleteTemplateResponse is the response of IndicesDeleteTemplateService.Do. +type IndicesDeleteTemplateResponse struct { + Acknowledged bool `json:"acknowledged,omitempty"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_test.go new file mode 100644 index 000000000..d84edb8de --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_test.go @@ -0,0 +1,20 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestIndicesDeleteValidate(t *testing.T) { + client := setupTestClient(t) + + // No index name -> fail with error + res, err := NewIndicesDeleteService(client).Do() + if err == nil { + t.Fatalf("expected IndicesDelete to fail without index name") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_warmer.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_warmer.go new file mode 100644 index 000000000..79aa4c2d5 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_warmer.go @@ -0,0 +1,131 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesDeleteWarmerService allows to delete a warmer. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-warmers.html. +type IndicesDeleteWarmerService struct { + client *Client + pretty bool + index []string + name []string + masterTimeout string +} + +// NewIndicesDeleteWarmerService creates a new IndicesDeleteWarmerService. +func NewIndicesDeleteWarmerService(client *Client) *IndicesDeleteWarmerService { + return &IndicesDeleteWarmerService{ + client: client, + index: make([]string, 0), + name: make([]string, 0), + } +} + +// Index is a list of index names the mapping should be added to +// (supports wildcards); use `_all` or omit to add the mapping on all indices. +func (s *IndicesDeleteWarmerService) Index(indices ...string) *IndicesDeleteWarmerService { + s.index = append(s.index, indices...) + return s +} + +// Name is a list of warmer names to delete (supports wildcards); +// use `_all` to delete all warmers in the specified indices. +func (s *IndicesDeleteWarmerService) Name(name ...string) *IndicesDeleteWarmerService { + s.name = append(s.name, name...) + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesDeleteWarmerService) MasterTimeout(masterTimeout string) *IndicesDeleteWarmerService { + s.masterTimeout = masterTimeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesDeleteWarmerService) Pretty(pretty bool) *IndicesDeleteWarmerService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesDeleteWarmerService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/_warmer/{name}", map[string]string{ + "index": strings.Join(s.index, ","), + "name": strings.Join(s.name, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if len(s.name) > 0 { + params.Set("name", strings.Join(s.name, ",")) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesDeleteWarmerService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(s.name) == 0 { + invalid = append(invalid, "Name") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesDeleteWarmerService) Do() (*DeleteWarmerResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("DELETE", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(DeleteWarmerResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// DeleteWarmerResponse is the response of IndicesDeleteWarmerService.Do. +type DeleteWarmerResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_warmer_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_warmer_test.go new file mode 100644 index 000000000..3d811ea59 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_warmer_test.go @@ -0,0 +1,48 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestDeleteWarmerBuildURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Names []string + Expected string + }{ + { + []string{"test"}, + []string{"warmer_1"}, + "/test/_warmer/warmer_1", + }, + { + []string{"*"}, + []string{"warmer_1"}, + "/%2A/_warmer/warmer_1", + }, + { + []string{"_all"}, + []string{"warmer_1"}, + "/_all/_warmer/warmer_1", + }, + { + []string{"index-1", "index-2"}, + []string{"warmer_1", "warmer_2"}, + "/index-1%2Cindex-2/_warmer/warmer_1%2Cwarmer_2", + }, + } + + for _, test := range tests { + path, _, err := client.DeleteWarmer().Index(test.Indices...).Name(test.Names...).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists.go new file mode 100644 index 000000000..92f9974f2 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists.go @@ -0,0 +1,149 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesExistsService checks if an index or indices exist or not. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-exists.html +// for details. +type IndicesExistsService struct { + client *Client + pretty bool + index []string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string + local *bool +} + +// NewIndicesExistsService creates and initializes a new IndicesExistsService. +func NewIndicesExistsService(client *Client) *IndicesExistsService { + return &IndicesExistsService{ + client: client, + index: make([]string, 0), + } +} + +// Index is a list of one or more indices to check. +func (s *IndicesExistsService) Index(index []string) *IndicesExistsService { + s.index = index + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices expression +// resolves into no concrete indices. (This includes `_all` string or +// when no indices have been specified). +func (s *IndicesExistsService) AllowNoIndices(allowNoIndices bool) *IndicesExistsService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesExistsService) ExpandWildcards(expandWildcards string) *IndicesExistsService { + s.expandWildcards = expandWildcards + return s +} + +// Local, when set, returns local information and does not retrieve the state +// from master node (default: false). +func (s *IndicesExistsService) Local(local bool) *IndicesExistsService { + s.local = &local + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesExistsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesExistsService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesExistsService) Pretty(pretty bool) *IndicesExistsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesExistsService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}", map[string]string{ + "index": strings.Join(s.index, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesExistsService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesExistsService) Do() (bool, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return false, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return false, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("HEAD", path, params, nil, 404) + if err != nil { + return false, err + } + + // Return operation response + switch res.StatusCode { + case http.StatusOK: + return true, nil + case http.StatusNotFound: + return false, nil + default: + return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_template.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_template.go new file mode 100644 index 000000000..7587a8786 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_template.go @@ -0,0 +1,112 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/http" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesExistsTemplateService checks if a given template exists. +// See http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html#indices-templates-exists +// for documentation. +type IndicesExistsTemplateService struct { + client *Client + pretty bool + name string + local *bool +} + +// NewIndicesExistsTemplateService creates a new IndicesExistsTemplateService. +func NewIndicesExistsTemplateService(client *Client) *IndicesExistsTemplateService { + return &IndicesExistsTemplateService{ + client: client, + } +} + +// Name is the name of the template. +func (s *IndicesExistsTemplateService) Name(name string) *IndicesExistsTemplateService { + s.name = name + return s +} + +// Local indicates whether to return local information, i.e. do not retrieve +// the state from master node (default: false). +func (s *IndicesExistsTemplateService) Local(local bool) *IndicesExistsTemplateService { + s.local = &local + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesExistsTemplateService) Pretty(pretty bool) *IndicesExistsTemplateService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesExistsTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_template/{name}", map[string]string{ + "name": s.name, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesExistsTemplateService) Validate() error { + var invalid []string + if s.name == "" { + invalid = append(invalid, "Name") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesExistsTemplateService) Do() (bool, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return false, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return false, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("HEAD", path, params, nil, 404) + if err != nil { + return false, err + } + + // Return operation response + switch res.StatusCode { + case http.StatusOK: + return true, nil + case http.StatusNotFound: + return false, nil + default: + return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_template_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_template_test.go new file mode 100644 index 000000000..32fb82ad3 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_template_test.go @@ -0,0 +1,68 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestIndexExistsTemplate(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tmpl := `{ + "template":"elastic-test*", + "settings":{ + "number_of_shards":1, + "number_of_replicas":0 + }, + "mappings":{ + "tweet":{ + "properties":{ + "tags":{ + "type":"string" + }, + "location":{ + "type":"geo_point" + }, + "suggest_field":{ + "type":"completion", + "payloads":true + } + } + } + } +}` + putres, err := client.IndexPutTemplate("elastic-template").BodyString(tmpl).Do() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if putres == nil { + t.Fatalf("expected response; got: %v", putres) + } + if !putres.Acknowledged { + t.Fatalf("expected index template to be ack'd; got: %v", putres.Acknowledged) + } + + // Always delete template + defer client.IndexDeleteTemplate("elastic-template").Do() + + // Check if template exists + exists, err := client.IndexTemplateExists("elastic-template").Do() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if !exists { + t.Fatalf("expected index template %q to exist; got: %v", "elastic-template", exists) + } + + // Get template + getres, err := client.IndexGetTemplate("elastic-template").Do() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if getres == nil { + t.Fatalf("expected to get index template %q; got: %v", "elastic-template", getres) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_test.go new file mode 100644 index 000000000..8cb6f5fab --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_test.go @@ -0,0 +1,20 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestIndicesExistsWithoutIndex(t *testing.T) { + client := setupTestClient(t) + + // No index name -> fail with error + res, err := NewIndicesExistsService(client).Do() + if err == nil { + t.Fatalf("expected IndicesExists to fail without index name") + } + if res != false { + t.Fatalf("expected result to be false; got: %v", res) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_type.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_type.go new file mode 100644 index 000000000..631f773fe --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_type.go @@ -0,0 +1,161 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesExistsTypeService checks if one or more types exist in one or more indices. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-types-exists.html +// for details. +type IndicesExistsTypeService struct { + client *Client + pretty bool + typ []string + index []string + expandWildcards string + local *bool + ignoreUnavailable *bool + allowNoIndices *bool +} + +// NewIndicesExistsTypeService creates a new IndicesExistsTypeService. +func NewIndicesExistsTypeService(client *Client) *IndicesExistsTypeService { + return &IndicesExistsTypeService{ + client: client, + index: make([]string, 0), + typ: make([]string, 0), + } +} + +// Index is a list of index names; use `_all` to check the types across all indices. +func (s *IndicesExistsTypeService) Index(indices ...string) *IndicesExistsTypeService { + s.index = append(s.index, indices...) + return s +} + +// Type is a list of document types to check. +func (s *IndicesExistsTypeService) Type(types ...string) *IndicesExistsTypeService { + s.typ = append(s.typ, types...) + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesExistsTypeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesExistsTypeService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *IndicesExistsTypeService) AllowNoIndices(allowNoIndices bool) *IndicesExistsTypeService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesExistsTypeService) ExpandWildcards(expandWildcards string) *IndicesExistsTypeService { + s.expandWildcards = expandWildcards + return s +} + +// Local specifies whether to return local information, i.e. do not retrieve +// the state from master node (default: false). +func (s *IndicesExistsTypeService) Local(local bool) *IndicesExistsTypeService { + s.local = &local + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesExistsTypeService) Pretty(pretty bool) *IndicesExistsTypeService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesExistsTypeService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/{type}", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesExistsTypeService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(s.typ) == 0 { + invalid = append(invalid, "Type") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesExistsTypeService) Do() (bool, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return false, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return false, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("HEAD", path, params, nil, 404) + if err != nil { + return false, err + } + + // Return operation response + switch res.StatusCode { + case http.StatusOK: + return true, nil + case http.StatusNotFound: + return false, nil + default: + return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_type_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_type_test.go new file mode 100644 index 000000000..51721b125 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_type_test.go @@ -0,0 +1,134 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestIndicesExistsTypeBuildURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Types []string + Expected string + ExpectValidateFailure bool + }{ + { + []string{}, + []string{}, + "", + true, + }, + { + []string{"index1"}, + []string{}, + "", + true, + }, + { + []string{}, + []string{"type1"}, + "", + true, + }, + { + []string{"index1"}, + []string{"type1"}, + "/index1/type1", + false, + }, + { + []string{"index1", "index2"}, + []string{"type1"}, + "/index1%2Cindex2/type1", + false, + }, + { + []string{"index1", "index2"}, + []string{"type1", "type2"}, + "/index1%2Cindex2/type1%2Ctype2", + false, + }, + } + + for i, test := range tests { + err := client.TypeExists().Index(test.Indices...).Type(test.Types...).Validate() + if err == nil && test.ExpectValidateFailure { + t.Errorf("case #%d: expected validate to fail", i+1) + continue + } + if err != nil && !test.ExpectValidateFailure { + t.Errorf("case #%d: expected validate to succeed", i+1) + continue + } + if !test.ExpectValidateFailure { + path, _, err := client.TypeExists().Index(test.Indices...).Type(test.Types...).buildURL() + if err != nil { + t.Fatalf("case #%d: %v", i+1, err) + } + if path != test.Expected { + t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + } + } + } +} + +func TestIndicesExistsType(t *testing.T) { + client := setupTestClient(t) + + // Create index with tweet type + createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do() + if err != nil { + t.Fatal(err) + } + if createIndex == nil { + t.Errorf("expected result to be != nil; got: %v", createIndex) + } + if !createIndex.Acknowledged { + t.Errorf("expected CreateIndexResult.Acknowledged %v; got %v", true, createIndex.Acknowledged) + } + + // Check if type exists + exists, err := client.TypeExists().Index(testIndexName).Type("tweet").Do() + if err != nil { + t.Fatal(err) + } + if !exists { + t.Fatalf("type %s should exist in index %s, but doesn't\n", "tweet", testIndexName) + } + + // Delete index + deleteIndex, err := client.DeleteIndex(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if !deleteIndex.Acknowledged { + t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged) + } + + // Check if type exists + exists, err = client.TypeExists().Index(testIndexName).Type("tweet").Do() + if err != nil { + t.Fatal(err) + } + if exists { + t.Fatalf("type %s should not exist in index %s, but it does\n", "tweet", testIndexName) + } +} + +func TestIndicesExistsTypeValidate(t *testing.T) { + client := setupTestClient(t) + + // No index name -> fail with error + res, err := NewIndicesExistsTypeService(client).Do() + if err == nil { + t.Fatalf("expected IndicesExistsType to fail without index name") + } + if res != false { + t.Fatalf("expected result to be false; got: %v", res) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_flush.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_flush.go new file mode 100644 index 000000000..3d101f9bd --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_flush.go @@ -0,0 +1,169 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// Flush allows to flush one or more indices. The flush process of an index +// basically frees memory from the index by flushing data to the index +// storage and clearing the internal transaction log. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html +// for details. +type IndicesFlushService struct { + client *Client + pretty bool + index []string + force *bool + waitIfOngoing *bool + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string +} + +// NewIndicesFlushService creates a new IndicesFlushService. +func NewIndicesFlushService(client *Client) *IndicesFlushService { + return &IndicesFlushService{ + client: client, + index: make([]string, 0), + } +} + +// Index is a list of index names; use `_all` or empty string for all indices. +func (s *IndicesFlushService) Index(indices ...string) *IndicesFlushService { + s.index = append(s.index, indices...) + return s +} + +// Force indicates whether a flush should be forced even if it is not +// necessarily needed ie. if no changes will be committed to the index. +// This is useful if transaction log IDs should be incremented even if +// no uncommitted changes are present. (This setting can be considered as internal). +func (s *IndicesFlushService) Force(force bool) *IndicesFlushService { + s.force = &force + return s +} + +// WaitIfOngoing, if set to true, indicates that the flush operation will +// block until the flush can be executed if another flush operation is +// already executing. The default is false and will cause an exception +// to be thrown on the shard level if another flush operation is already running.. +func (s *IndicesFlushService) WaitIfOngoing(waitIfOngoing bool) *IndicesFlushService { + s.waitIfOngoing = &waitIfOngoing + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesFlushService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesFlushService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices expression +// resolves into no concrete indices. (This includes `_all` string or when +// no indices have been specified). +func (s *IndicesFlushService) AllowNoIndices(allowNoIndices bool) *IndicesFlushService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards specifies whether to expand wildcard expression to +// concrete indices that are open, closed or both.. +func (s *IndicesFlushService) ExpandWildcards(expandWildcards string) *IndicesFlushService { + s.expandWildcards = expandWildcards + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesFlushService) Pretty(pretty bool) *IndicesFlushService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesFlushService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_flush", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else { + path = "/_flush" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.force != nil { + params.Set("force", fmt.Sprintf("%v", *s.force)) + } + if s.waitIfOngoing != nil { + params.Set("wait_if_ongoing", fmt.Sprintf("%v", *s.waitIfOngoing)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesFlushService) Validate() error { + return nil +} + +// Do executes the service. +func (s *IndicesFlushService) Do() (*IndicesFlushResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesFlushResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a flush request. + +type IndicesFlushResponse struct { + Shards shardsInfo `json:"_shards"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_flush_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_flush_test.go new file mode 100644 index 000000000..4e30a000b --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_flush_test.go @@ -0,0 +1,69 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestFlush(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Flush all indices + res, err := client.Flush().Do() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Errorf("expected res to be != nil; got: %v", res) + } +} + +func TestFlushBuildURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Expected string + ExpectValidateFailure bool + }{ + { + []string{}, + "/_flush", + false, + }, + { + []string{"index1"}, + "/index1/_flush", + false, + }, + { + []string{"index1", "index2"}, + "/index1%2Cindex2/_flush", + false, + }, + } + + for i, test := range tests { + err := NewIndicesFlushService(client).Index(test.Indices...).Validate() + if err == nil && test.ExpectValidateFailure { + t.Errorf("case #%d: expected validate to fail", i+1) + continue + } + if err != nil && !test.ExpectValidateFailure { + t.Errorf("case #%d: expected validate to succeed", i+1) + continue + } + if !test.ExpectValidateFailure { + path, _, err := NewIndicesFlushService(client).Index(test.Indices...).buildURL() + if err != nil { + t.Fatalf("case #%d: %v", i+1, err) + } + if path != test.Expected { + t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + } + } + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_forcemerge.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_forcemerge.go new file mode 100644 index 000000000..6ca7b5b8f --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_forcemerge.go @@ -0,0 +1,200 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesForcemergeService allows to force merging of one or more indices. +// The merge relates to the number of segments a Lucene index holds +// within each shard. The force merge operation allows to reduce the number +// of segments by merging them. +// +// See http://www.elastic.co/guide/en/elasticsearch/reference/2.1/indices-forcemerge.html +// for more information. +type IndicesForcemergeService struct { + client *Client + pretty bool + index []string + allowNoIndices *bool + expandWildcards string + flush *bool + ignoreUnavailable *bool + maxNumSegments interface{} + onlyExpungeDeletes *bool + operationThreading interface{} + waitForMerge *bool +} + +// NewIndicesForcemergeService creates a new IndicesForcemergeService. +func NewIndicesForcemergeService(client *Client) *IndicesForcemergeService { + return &IndicesForcemergeService{ + client: client, + index: make([]string, 0), + } +} + +// Index is a list of index names; use `_all` or empty string to perform +// the operation on all indices. +func (s *IndicesForcemergeService) Index(index ...string) *IndicesForcemergeService { + if s.index == nil { + s.index = make([]string, 0) + } + s.index = append(s.index, index...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *IndicesForcemergeService) AllowNoIndices(allowNoIndices bool) *IndicesForcemergeService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both.. +func (s *IndicesForcemergeService) ExpandWildcards(expandWildcards string) *IndicesForcemergeService { + s.expandWildcards = expandWildcards + return s +} + +// Flush specifies whether the index should be flushed after performing +// the operation (default: true). +func (s *IndicesForcemergeService) Flush(flush bool) *IndicesForcemergeService { + s.flush = &flush + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should +// be ignored when unavailable (missing or closed). +func (s *IndicesForcemergeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesForcemergeService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// MaxNumSegments specifies the number of segments the index should be +// merged into (default: dynamic). +func (s *IndicesForcemergeService) MaxNumSegments(maxNumSegments interface{}) *IndicesForcemergeService { + s.maxNumSegments = maxNumSegments + return s +} + +// OnlyExpungeDeletes specifies whether the operation should only expunge +// deleted documents. +func (s *IndicesForcemergeService) OnlyExpungeDeletes(onlyExpungeDeletes bool) *IndicesForcemergeService { + s.onlyExpungeDeletes = &onlyExpungeDeletes + return s +} + +func (s *IndicesForcemergeService) OperationThreading(operationThreading interface{}) *IndicesForcemergeService { + s.operationThreading = operationThreading + return s +} + +// WaitForMerge specifies whether the request should block until the +// merge process is finished (default: true). +func (s *IndicesForcemergeService) WaitForMerge(waitForMerge bool) *IndicesForcemergeService { + s.waitForMerge = &waitForMerge + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesForcemergeService) Pretty(pretty bool) *IndicesForcemergeService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesForcemergeService) buildURL() (string, url.Values, error) { + var err error + var path string + + // Build URL + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_forcemerge", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else { + path = "/_forcemerge" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.flush != nil { + params.Set("flush", fmt.Sprintf("%v", *s.flush)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.maxNumSegments != nil { + params.Set("max_num_segments", fmt.Sprintf("%v", s.maxNumSegments)) + } + if s.onlyExpungeDeletes != nil { + params.Set("only_expunge_deletes", fmt.Sprintf("%v", *s.onlyExpungeDeletes)) + } + if s.operationThreading != nil { + params.Set("operation_threading", fmt.Sprintf("%v", s.operationThreading)) + } + if s.waitForMerge != nil { + params.Set("wait_for_merge", fmt.Sprintf("%v", *s.waitForMerge)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesForcemergeService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesForcemergeService) Do() (*IndicesForcemergeResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesForcemergeResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesForcemergeResponse is the response of IndicesForcemergeService.Do. +type IndicesForcemergeResponse struct { + Shards shardsInfo `json:"_shards"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_forcemerge_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_forcemerge_test.go new file mode 100644 index 000000000..c620654cc --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_forcemerge_test.go @@ -0,0 +1,56 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestIndicesForcemergeBuildURL(t *testing.T) { + client := setupTestClient(t) + + tests := []struct { + Indices []string + Expected string + }{ + { + []string{}, + "/_forcemerge", + }, + { + []string{"index1"}, + "/index1/_forcemerge", + }, + { + []string{"index1", "index2"}, + "/index1%2Cindex2/_forcemerge", + }, + } + + for i, test := range tests { + path, _, err := client.Forcemerge().Index(test.Indices...).buildURL() + if err != nil { + t.Errorf("case #%d: %v", i+1, err) + continue + } + if path != test.Expected { + t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + } + } +} + +func TestIndicesForcemerge(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) + + _, err := client.Forcemerge(testIndexName).MaxNumSegments(1).WaitForMerge(true).Do() + if err != nil { + t.Fatal(err) + } + /* + if !ok { + t.Fatalf("expected forcemerge to succeed; got: %v", ok) + } + */ +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get.go new file mode 100644 index 000000000..355184394 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get.go @@ -0,0 +1,202 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesGetService retrieves information about one or more indices. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-index.html +// for more details. +type IndicesGetService struct { + client *Client + pretty bool + index []string + feature []string + local *bool + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string + flatSettings *bool + human *bool +} + +// NewIndicesGetService creates a new IndicesGetService. +func NewIndicesGetService(client *Client) *IndicesGetService { + return &IndicesGetService{ + client: client, + index: make([]string, 0), + feature: make([]string, 0), + } +} + +// Index is a list of index names. +func (s *IndicesGetService) Index(indices ...string) *IndicesGetService { + s.index = append(s.index, indices...) + return s +} + +// Feature is a list of features. +func (s *IndicesGetService) Feature(features ...string) *IndicesGetService { + s.feature = append(s.feature, features...) + return s +} + +// Local indicates whether to return local information, i.e. do not retrieve +// the state from master node (default: false). +func (s *IndicesGetService) Local(local bool) *IndicesGetService { + s.local = &local + return s +} + +// IgnoreUnavailable indicates whether to ignore unavailable indexes (default: false). +func (s *IndicesGetService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard expression +// resolves to no concrete indices (default: false). +func (s *IndicesGetService) AllowNoIndices(allowNoIndices bool) *IndicesGetService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether wildcard expressions should get +// expanded to open or closed indices (default: open). +func (s *IndicesGetService) ExpandWildcards(expandWildcards string) *IndicesGetService { + s.expandWildcards = expandWildcards + return s +} + +/* Disabled because serialization would fail in that case. */ +/* +// FlatSettings make the service return settings in flat format (default: false). +func (s *IndicesGetService) FlatSettings(flatSettings bool) *IndicesGetService { + s.flatSettings = &flatSettings + return s +} +*/ + +// Human indicates whether to return version and creation date values +// in human-readable format (default: false). +func (s *IndicesGetService) Human(human bool) *IndicesGetService { + s.human = &human + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesGetService) Pretty(pretty bool) *IndicesGetService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesGetService) buildURL() (string, url.Values, error) { + var err error + var path string + var index []string + + if len(s.index) > 0 { + index = s.index + } else { + index = []string{"_all"} + } + + if len(s.feature) > 0 { + // Build URL + path, err = uritemplates.Expand("/{index}/{feature}", map[string]string{ + "index": strings.Join(index, ","), + "feature": strings.Join(s.feature, ","), + }) + } else { + // Build URL + path, err = uritemplates.Expand("/{index}", map[string]string{ + "index": strings.Join(index, ","), + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.human != nil { + params.Set("human", fmt.Sprintf("%v", *s.human)) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesGetService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesGetService) Do() (map[string]*IndicesGetResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + var ret map[string]*IndicesGetResponse + if err := json.Unmarshal(res.Body, &ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesGetResponse is part of the response of IndicesGetService.Do. +type IndicesGetResponse struct { + Aliases map[string]interface{} `json:"aliases"` + Mappings map[string]interface{} `json:"mappings"` + Settings map[string]interface{} `json:"settings"` + Warmers map[string]interface{} `json:"warmers"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_aliases.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_aliases.go new file mode 100644 index 000000000..4de88c63d --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_aliases.go @@ -0,0 +1,155 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +type AliasesService struct { + client *Client + indices []string + pretty bool +} + +func NewAliasesService(client *Client) *AliasesService { + builder := &AliasesService{ + client: client, + indices: make([]string, 0), + } + return builder +} + +func (s *AliasesService) Pretty(pretty bool) *AliasesService { + s.pretty = pretty + return s +} + +func (s *AliasesService) Index(indices ...string) *AliasesService { + s.indices = append(s.indices, indices...) + return s +} + +func (s *AliasesService) Do() (*AliasesResult, error) { + var err error + + // Build url + path := "/" + + // Indices part + indexPart := make([]string, 0) + for _, index := range s.indices { + index, err = uritemplates.Expand("{index}", map[string]string{ + "index": index, + }) + if err != nil { + return nil, err + } + indexPart = append(indexPart, index) + } + path += strings.Join(indexPart, ",") + + // TODO Add types here + + // Search + path += "/_aliases" + + // Parameters + params := make(url.Values) + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + + // Get response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // { + // "indexName" : { + // "aliases" : { + // "alias1" : { }, + // "alias2" : { } + // } + // }, + // "indexName2" : { + // ... + // }, + // } + indexMap := make(map[string]interface{}) + if err := json.Unmarshal(res.Body, &indexMap); err != nil { + return nil, err + } + + // Each (indexName, _) + ret := &AliasesResult{ + Indices: make(map[string]indexResult), + } + for indexName, indexData := range indexMap { + indexOut, found := ret.Indices[indexName] + if !found { + indexOut = indexResult{Aliases: make([]aliasResult, 0)} + } + + // { "aliases" : { ... } } + indexDataMap, ok := indexData.(map[string]interface{}) + if ok { + aliasesData, ok := indexDataMap["aliases"].(map[string]interface{}) + if ok { + for aliasName, _ := range aliasesData { + aliasRes := aliasResult{AliasName: aliasName} + indexOut.Aliases = append(indexOut.Aliases, aliasRes) + } + } + } + + ret.Indices[indexName] = indexOut + } + + return ret, nil +} + +// -- Result of an alias request. + +type AliasesResult struct { + Indices map[string]indexResult +} + +type indexResult struct { + Aliases []aliasResult +} + +type aliasResult struct { + AliasName string +} + +func (ar AliasesResult) IndicesByAlias(aliasName string) []string { + indices := make([]string, 0) + + for indexName, indexInfo := range ar.Indices { + for _, aliasInfo := range indexInfo.Aliases { + if aliasInfo.AliasName == aliasName { + indices = append(indices, indexName) + } + } + } + + return indices +} + +func (ir indexResult) HasAlias(aliasName string) bool { + for _, alias := range ir.Aliases { + if alias.AliasName == aliasName { + return true + } + } + return false +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_aliases_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_aliases_test.go new file mode 100644 index 000000000..6094f426e --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_aliases_test.go @@ -0,0 +1,146 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestAliases(t *testing.T) { + var err error + + client := setupTestClientAndCreateIndex(t) + + // Some tweets + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "sandrae", Message: "Cycling is fun."} + tweet3 := tweet{User: "olivere", Message: "Another unrelated topic."} + + // Add tweets to first index + _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + // Add tweets to second index + _, err = client.Index().Index(testIndexName2).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + // Flush + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Flush().Index(testIndexName2).Do() + if err != nil { + t.Fatal(err) + } + + // Alias should not yet exist + aliasesResult1, err := client.Aliases(). + Index(testIndexName, testIndexName2). + //Pretty(true). + Do() + if err != nil { + t.Fatal(err) + } + if len(aliasesResult1.Indices) != 2 { + t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult1.Indices)) + } + for indexName, indexDetails := range aliasesResult1.Indices { + if len(indexDetails.Aliases) != 0 { + t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 0, len(indexDetails.Aliases)) + } + } + + // Add both indices to a new alias + aliasCreate, err := client.Alias(). + Add(testIndexName, testAliasName). + Add(testIndexName2, testAliasName). + //Pretty(true). + Do() + if err != nil { + t.Fatal(err) + } + if !aliasCreate.Acknowledged { + t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasCreate.Acknowledged) + } + + // Alias should now exist + aliasesResult2, err := client.Aliases(). + Index(testIndexName, testIndexName2). + //Pretty(true). + Do() + if err != nil { + t.Fatal(err) + } + if len(aliasesResult2.Indices) != 2 { + t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult2.Indices)) + } + for indexName, indexDetails := range aliasesResult2.Indices { + if len(indexDetails.Aliases) != 1 { + t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 1, len(indexDetails.Aliases)) + } + } + + // Check the reverse function: + indexInfo1, found := aliasesResult2.Indices[testIndexName] + if !found { + t.Errorf("expected info about index %s = %v; got %v", testIndexName, true, found) + } + aliasFound := indexInfo1.HasAlias(testAliasName) + if !aliasFound { + t.Errorf("expected alias %s to include index %s; got %v", testAliasName, testIndexName, aliasFound) + } + + // Check the reverse function: + indexInfo2, found := aliasesResult2.Indices[testIndexName2] + if !found { + t.Errorf("expected info about index %s = %v; got %v", testIndexName, true, found) + } + aliasFound = indexInfo2.HasAlias(testAliasName) + if !aliasFound { + t.Errorf("expected alias %s to include index %s; got %v", testAliasName, testIndexName2, aliasFound) + } + + // Remove first index should remove two tweets, so should only yield 1 + aliasRemove1, err := client.Alias(). + Remove(testIndexName, testAliasName). + //Pretty(true). + Do() + if err != nil { + t.Fatal(err) + } + if !aliasRemove1.Acknowledged { + t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasRemove1.Acknowledged) + } + + // Alias should now exist only for index 2 + aliasesResult3, err := client.Aliases().Index(testIndexName, testIndexName2).Do() + if err != nil { + t.Fatal(err) + } + if len(aliasesResult3.Indices) != 2 { + t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult3.Indices)) + } + for indexName, indexDetails := range aliasesResult3.Indices { + if indexName == testIndexName { + if len(indexDetails.Aliases) != 0 { + t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 0, len(indexDetails.Aliases)) + } + } else if indexName == testIndexName2 { + if len(indexDetails.Aliases) != 1 { + t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 1, len(indexDetails.Aliases)) + } + } else { + t.Errorf("got index %s", indexName) + } + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_mapping.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_mapping.go new file mode 100644 index 000000000..5526cfcb8 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_mapping.go @@ -0,0 +1,170 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesGetMappingService retrieves the mapping definitions for an index or +// index/type. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-mapping.html +// for details. +type IndicesGetMappingService struct { + client *Client + pretty bool + index []string + typ []string + local *bool + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string +} + +// NewGetMappingService is an alias for NewIndicesGetMappingService. +// Use NewIndicesGetMappingService. +func NewGetMappingService(client *Client) *IndicesGetMappingService { + return NewIndicesGetMappingService(client) +} + +// NewIndicesGetMappingService creates a new IndicesGetMappingService. +func NewIndicesGetMappingService(client *Client) *IndicesGetMappingService { + return &IndicesGetMappingService{ + client: client, + index: make([]string, 0), + typ: make([]string, 0), + } +} + +// Index is a list of index names. +func (s *IndicesGetMappingService) Index(indices ...string) *IndicesGetMappingService { + s.index = append(s.index, indices...) + return s +} + +// Type is a list of document types. +func (s *IndicesGetMappingService) Type(types ...string) *IndicesGetMappingService { + s.typ = append(s.typ, types...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// This includes `_all` string or when no indices have been specified. +func (s *IndicesGetMappingService) AllowNoIndices(allowNoIndices bool) *IndicesGetMappingService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both.. +func (s *IndicesGetMappingService) ExpandWildcards(expandWildcards string) *IndicesGetMappingService { + s.expandWildcards = expandWildcards + return s +} + +// Local indicates whether to return local information, do not retrieve +// the state from master node (default: false). +func (s *IndicesGetMappingService) Local(local bool) *IndicesGetMappingService { + s.local = &local + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesGetMappingService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetMappingService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesGetMappingService) Pretty(pretty bool) *IndicesGetMappingService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesGetMappingService) buildURL() (string, url.Values, error) { + var index, typ []string + + if len(s.index) > 0 { + index = s.index + } else { + index = []string{"_all"} + } + + if len(s.typ) > 0 { + typ = s.typ + } else { + typ = []string{"_all"} + } + + // Build URL + path, err := uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{ + "index": strings.Join(index, ","), + "type": strings.Join(typ, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesGetMappingService) Validate() error { + return nil +} + +// Do executes the operation. It returns mapping definitions for an index +// or index/type. +func (s *IndicesGetMappingService) Do() (map[string]interface{}, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + var ret map[string]interface{} + if err := json.Unmarshal(res.Body, &ret); err != nil { + return nil, err + } + return ret, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_mapping_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_mapping_test.go new file mode 100644 index 000000000..ccfa27fed --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_mapping_test.go @@ -0,0 +1,50 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestIndicesGetMappingURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Types []string + Expected string + }{ + { + []string{}, + []string{}, + "/_all/_mapping/_all", + }, + { + []string{}, + []string{"tweet"}, + "/_all/_mapping/tweet", + }, + { + []string{"twitter"}, + []string{"tweet"}, + "/twitter/_mapping/tweet", + }, + { + []string{"store-1", "store-2"}, + []string{"tweet", "user"}, + "/store-1%2Cstore-2/_mapping/tweet%2Cuser", + }, + } + + for _, test := range tests { + path, _, err := client.GetMapping().Index(test.Indices...).Type(test.Types...).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_settings.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_settings.go new file mode 100644 index 000000000..4820cb656 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_settings.go @@ -0,0 +1,183 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesGetSettingsService allows to retrieve settings of one +// or more indices. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-settings.html +// for more details. +type IndicesGetSettingsService struct { + client *Client + pretty bool + index []string + name []string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string + flatSettings *bool + local *bool +} + +// NewIndicesGetSettingsService creates a new IndicesGetSettingsService. +func NewIndicesGetSettingsService(client *Client) *IndicesGetSettingsService { + return &IndicesGetSettingsService{ + client: client, + index: make([]string, 0), + name: make([]string, 0), + } +} + +// Index is a list of index names; use `_all` or empty string to perform +// the operation on all indices. +func (s *IndicesGetSettingsService) Index(indices ...string) *IndicesGetSettingsService { + s.index = append(s.index, indices...) + return s +} + +// Name are the names of the settings that should be included. +func (s *IndicesGetSettingsService) Name(name ...string) *IndicesGetSettingsService { + s.name = append(s.name, name...) + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should +// be ignored when unavailable (missing or closed). +func (s *IndicesGetSettingsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetSettingsService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *IndicesGetSettingsService) AllowNoIndices(allowNoIndices bool) *IndicesGetSettingsService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression +// to concrete indices that are open, closed or both. +// Options: open, closed, none, all. Default: open,closed. +func (s *IndicesGetSettingsService) ExpandWildcards(expandWildcards string) *IndicesGetSettingsService { + s.expandWildcards = expandWildcards + return s +} + +// FlatSettings indicates whether to return settings in flat format (default: false). +func (s *IndicesGetSettingsService) FlatSettings(flatSettings bool) *IndicesGetSettingsService { + s.flatSettings = &flatSettings + return s +} + +// Local indicates whether to return local information, do not retrieve +// the state from master node (default: false). +func (s *IndicesGetSettingsService) Local(local bool) *IndicesGetSettingsService { + s.local = &local + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesGetSettingsService) Pretty(pretty bool) *IndicesGetSettingsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesGetSettingsService) buildURL() (string, url.Values, error) { + var err error + var path string + var index []string + + if len(s.index) > 0 { + index = s.index + } else { + index = []string{"_all"} + } + + if len(s.name) > 0 { + // Build URL + path, err = uritemplates.Expand("/{index}/_settings/{name}", map[string]string{ + "index": strings.Join(index, ","), + "name": strings.Join(s.name, ","), + }) + } else { + // Build URL + path, err = uritemplates.Expand("/{index}/_settings", map[string]string{ + "index": strings.Join(index, ","), + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesGetSettingsService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesGetSettingsService) Do() (map[string]*IndicesGetSettingsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + var ret map[string]*IndicesGetSettingsResponse + if err := json.Unmarshal(res.Body, &ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesGetSettingsResponse is the response of IndicesGetSettingsService.Do. +type IndicesGetSettingsResponse struct { + Settings map[string]interface{} `json:"settings"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_settings_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_settings_test.go new file mode 100644 index 000000000..f53512d53 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_settings_test.go @@ -0,0 +1,81 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestIndexGetSettingsURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Names []string + Expected string + }{ + { + []string{}, + []string{}, + "/_all/_settings", + }, + { + []string{}, + []string{"index.merge.*"}, + "/_all/_settings/index.merge.%2A", + }, + { + []string{"twitter-*"}, + []string{"index.merge.*", "_settings"}, + "/twitter-%2A/_settings/index.merge.%2A%2C_settings", + }, + { + []string{"store-1", "store-2"}, + []string{"index.merge.*", "_settings"}, + "/store-1%2Cstore-2/_settings/index.merge.%2A%2C_settings", + }, + } + + for _, test := range tests { + path, _, err := client.IndexGetSettings().Index(test.Indices...).Name(test.Names...).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} + +func TestIndexGetSettingsService(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + if esversion < "1.4.0" { + t.Skip("Index Get API is available since 1.4") + return + } + + res, err := client.IndexGetSettings().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatalf("expected result; got: %v", res) + } + info, found := res[testIndexName] + if !found { + t.Fatalf("expected index %q to be found; got: %v", testIndexName, found) + } + if info == nil { + t.Fatalf("expected index %q to be != nil; got: %v", testIndexName, info) + } + if info.Settings == nil { + t.Fatalf("expected index settings of %q to be != nil; got: %v", testIndexName, info.Settings) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_template.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_template.go new file mode 100644 index 000000000..b0e66d3f9 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_template.go @@ -0,0 +1,128 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesGetTemplateService returns an index template. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html. +type IndicesGetTemplateService struct { + client *Client + pretty bool + name []string + flatSettings *bool + local *bool +} + +// NewIndicesGetTemplateService creates a new IndicesGetTemplateService. +func NewIndicesGetTemplateService(client *Client) *IndicesGetTemplateService { + return &IndicesGetTemplateService{ + client: client, + name: make([]string, 0), + } +} + +// Name is the name of the index template. +func (s *IndicesGetTemplateService) Name(name ...string) *IndicesGetTemplateService { + s.name = append(s.name, name...) + return s +} + +// FlatSettings is returns settings in flat format (default: false). +func (s *IndicesGetTemplateService) FlatSettings(flatSettings bool) *IndicesGetTemplateService { + s.flatSettings = &flatSettings + return s +} + +// Local indicates whether to return local information, i.e. do not retrieve +// the state from master node (default: false). +func (s *IndicesGetTemplateService) Local(local bool) *IndicesGetTemplateService { + s.local = &local + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesGetTemplateService) Pretty(pretty bool) *IndicesGetTemplateService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesGetTemplateService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if len(s.name) > 0 { + path, err = uritemplates.Expand("/_template/{name}", map[string]string{ + "name": strings.Join(s.name, ","), + }) + } else { + path = "/_template" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesGetTemplateService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesGetTemplateService) Do() (map[string]*IndicesGetTemplateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + var ret map[string]*IndicesGetTemplateResponse + if err := json.Unmarshal(res.Body, &ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesGetTemplateResponse is the response of IndicesGetTemplateService.Do. +type IndicesGetTemplateResponse struct { + Order int `json:"order,omitempty"` + Template string `json:"template,omitempty"` + Settings map[string]interface{} `json:"settings,omitempty"` + Mappings map[string]interface{} `json:"mappings,omitempty"` + Aliases map[string]interface{} `json:"aliases,omitempty"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_template_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_template_test.go new file mode 100644 index 000000000..693cde5ea --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_template_test.go @@ -0,0 +1,41 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestIndexGetTemplateURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Names []string + Expected string + }{ + { + []string{}, + "/_template", + }, + { + []string{"index1"}, + "/_template/index1", + }, + { + []string{"index1", "index2"}, + "/_template/index1%2Cindex2", + }, + } + + for _, test := range tests { + path, _, err := client.IndexGetTemplate().Name(test.Names...).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_test.go new file mode 100644 index 000000000..fcdee54db --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_test.go @@ -0,0 +1,97 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestIndicesGetValidate(t *testing.T) { + client := setupTestClient(t) + + // No index name -> fail with error + res, err := NewIndicesGetService(client).Index("").Do() + if err == nil { + t.Fatalf("expected IndicesGet to fail without index name") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } +} + +func TestIndicesGetURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Features []string + Expected string + }{ + { + []string{}, + []string{}, + "/_all", + }, + { + []string{}, + []string{"_mappings"}, + "/_all/_mappings", + }, + { + []string{"twitter"}, + []string{"_mappings", "_settings"}, + "/twitter/_mappings%2C_settings", + }, + { + []string{"store-1", "store-2"}, + []string{"_mappings", "_settings"}, + "/store-1%2Cstore-2/_mappings%2C_settings", + }, + } + + for _, test := range tests { + path, _, err := NewIndicesGetService(client).Index(test.Indices...).Feature(test.Features...).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} + +func TestIndicesGetService(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + if esversion < "1.4.0" { + t.Skip("Index Get API is available since 1.4") + return + } + + res, err := client.IndexGet().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatalf("expected result; got: %v", res) + } + info, found := res[testIndexName] + if !found { + t.Fatalf("expected index %q to be found; got: %v", testIndexName, found) + } + if info == nil { + t.Fatalf("expected index %q to be != nil; got: %v", testIndexName, info) + } + if info.Mappings == nil { + t.Errorf("expected mappings to be != nil; got: %v", info.Mappings) + } + if info.Settings == nil { + t.Errorf("expected settings to be != nil; got: %v", info.Settings) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_warmer.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_warmer.go new file mode 100644 index 000000000..29bc6cbfd --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_warmer.go @@ -0,0 +1,194 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesGetWarmerService allows to get the definition of a warmer for a +// specific index (or alias, or several indices) based on its name. +// The provided name can be a simple wildcard expression or omitted to get +// all warmers. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-warmers.html +// for more information. +type IndicesGetWarmerService struct { + client *Client + pretty bool + index []string + name []string + typ []string + allowNoIndices *bool + expandWildcards string + ignoreUnavailable *bool + local *bool +} + +// NewIndicesGetWarmerService creates a new IndicesGetWarmerService. +func NewIndicesGetWarmerService(client *Client) *IndicesGetWarmerService { + return &IndicesGetWarmerService{ + client: client, + typ: make([]string, 0), + index: make([]string, 0), + name: make([]string, 0), + } +} + +// Index is a list of index names to restrict the operation; use `_all` to perform the operation on all indices. +func (s *IndicesGetWarmerService) Index(indices ...string) *IndicesGetWarmerService { + s.index = append(s.index, indices...) + return s +} + +// Name is the name of the warmer (supports wildcards); leave empty to get all warmers. +func (s *IndicesGetWarmerService) Name(name ...string) *IndicesGetWarmerService { + s.name = append(s.name, name...) + return s +} + +// Type is a list of type names the mapping should be added to +// (supports wildcards); use `_all` or omit to add the mapping on all types. +func (s *IndicesGetWarmerService) Type(typ ...string) *IndicesGetWarmerService { + s.typ = append(s.typ, typ...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// This includes `_all` string or when no indices have been specified. +func (s *IndicesGetWarmerService) AllowNoIndices(allowNoIndices bool) *IndicesGetWarmerService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesGetWarmerService) ExpandWildcards(expandWildcards string) *IndicesGetWarmerService { + s.expandWildcards = expandWildcards + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesGetWarmerService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetWarmerService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Local indicates wether or not to return local information, +// do not retrieve the state from master node (default: false). +func (s *IndicesGetWarmerService) Local(local bool) *IndicesGetWarmerService { + s.local = &local + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesGetWarmerService) Pretty(pretty bool) *IndicesGetWarmerService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesGetWarmerService) buildURL() (string, url.Values, error) { + var err error + var path string + + if len(s.index) == 0 && len(s.typ) == 0 && len(s.name) == 0 { + path = "/_warmer" + } else if len(s.index) == 0 && len(s.typ) == 0 && len(s.name) > 0 { + path, err = uritemplates.Expand("/_warmer/{name}", map[string]string{ + "name": strings.Join(s.name, ","), + }) + } else if len(s.index) == 0 && len(s.typ) > 0 && len(s.name) == 0 { + path, err = uritemplates.Expand("/_all/{type}/_warmer", map[string]string{ + "type": strings.Join(s.typ, ","), + }) + } else if len(s.index) == 0 && len(s.typ) > 0 && len(s.name) > 0 { + path, err = uritemplates.Expand("/_all/{type}/_warmer/{name}", map[string]string{ + "type": strings.Join(s.typ, ","), + "name": strings.Join(s.name, ","), + }) + } else if len(s.index) > 0 && len(s.typ) == 0 && len(s.name) == 0 { + path, err = uritemplates.Expand("/{index}/_warmer", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else if len(s.index) > 0 && len(s.typ) == 0 && len(s.name) > 0 { + path, err = uritemplates.Expand("/{index}/_warmer/{name}", map[string]string{ + "index": strings.Join(s.index, ","), + "name": strings.Join(s.name, ","), + }) + } else if len(s.index) > 0 && len(s.typ) > 0 && len(s.name) == 0 { + path, err = uritemplates.Expand("/{index}/{type}/_warmer", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + }) + } else if len(s.index) > 0 && len(s.typ) > 0 && len(s.name) > 0 { + path, err = uritemplates.Expand("/{index}/{type}/_warmer/{name}", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + "name": strings.Join(s.name, ","), + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesGetWarmerService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesGetWarmerService) Do() (map[string]interface{}, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + var ret map[string]interface{} + if err := json.Unmarshal(res.Body, &ret); err != nil { + return nil, err + } + return ret, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_warmer_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_warmer_test.go new file mode 100644 index 000000000..ea01a628e --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_warmer_test.go @@ -0,0 +1,83 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestGetWarmerBuildURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Types []string + Names []string + Expected string + }{ + { + []string{}, + []string{}, + []string{}, + "/_warmer", + }, + { + []string{}, + []string{}, + []string{"warmer_1"}, + "/_warmer/warmer_1", + }, + { + []string{}, + []string{"tweet"}, + []string{}, + "/_all/tweet/_warmer", + }, + { + []string{}, + []string{"tweet"}, + []string{"warmer_1"}, + "/_all/tweet/_warmer/warmer_1", + }, + { + []string{"test"}, + []string{}, + []string{}, + "/test/_warmer", + }, + { + []string{"test"}, + []string{}, + []string{"warmer_1"}, + "/test/_warmer/warmer_1", + }, + { + []string{"*"}, + []string{}, + []string{"warmer_1"}, + "/%2A/_warmer/warmer_1", + }, + { + []string{"test"}, + []string{"tweet"}, + []string{"warmer_1"}, + "/test/tweet/_warmer/warmer_1", + }, + { + []string{"index-1", "index-2"}, + []string{"type-1", "type-2"}, + []string{"warmer_1", "warmer_2"}, + "/index-1%2Cindex-2/type-1%2Ctype-2/_warmer/warmer_1%2Cwarmer_2", + }, + } + + for _, test := range tests { + path, _, err := client.GetWarmer().Index(test.Indices...).Type(test.Types...).Name(test.Names...).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_open.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_open.go new file mode 100644 index 000000000..85a45bb1d --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_open.go @@ -0,0 +1,157 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesOpenService opens an index. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html +// for details. +type IndicesOpenService struct { + client *Client + pretty bool + index string + timeout string + masterTimeout string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string +} + +// NewIndicesOpenService creates and initializes a new IndicesOpenService. +func NewIndicesOpenService(client *Client) *IndicesOpenService { + return &IndicesOpenService{client: client} +} + +// Index is the name of the index to open. +func (s *IndicesOpenService) Index(index string) *IndicesOpenService { + s.index = index + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesOpenService) Timeout(timeout string) *IndicesOpenService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesOpenService) MasterTimeout(masterTimeout string) *IndicesOpenService { + s.masterTimeout = masterTimeout + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should +// be ignored when unavailable (missing or closed). +func (s *IndicesOpenService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesOpenService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *IndicesOpenService) AllowNoIndices(allowNoIndices bool) *IndicesOpenService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both.. +func (s *IndicesOpenService) ExpandWildcards(expandWildcards string) *IndicesOpenService { + s.expandWildcards = expandWildcards + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesOpenService) Pretty(pretty bool) *IndicesOpenService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesOpenService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/_open", map[string]string{ + "index": s.index, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesOpenService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesOpenService) Do() (*IndicesOpenResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesOpenResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesOpenResponse is the response of IndicesOpenService.Do. +type IndicesOpenResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_open_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_open_test.go new file mode 100644 index 000000000..352bb479b --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_open_test.go @@ -0,0 +1,20 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestIndicesOpenValidate(t *testing.T) { + client := setupTestClient(t) + + // No index name -> fail with error + res, err := NewIndicesOpenService(client).Do() + if err == nil { + t.Fatalf("expected IndicesOpen to fail without index name") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_alias.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_alias.go new file mode 100644 index 000000000..d8515036b --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_alias.go @@ -0,0 +1,111 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" +) + +type AliasService struct { + client *Client + actions []aliasAction + pretty bool +} + +type aliasAction struct { + // "add" or "remove" + Type string + // Index name + Index string + // Alias name + Alias string + // Filter + Filter Query +} + +func NewAliasService(client *Client) *AliasService { + builder := &AliasService{ + client: client, + actions: make([]aliasAction, 0), + } + return builder +} + +func (s *AliasService) Pretty(pretty bool) *AliasService { + s.pretty = pretty + return s +} + +func (s *AliasService) Add(indexName string, aliasName string) *AliasService { + action := aliasAction{Type: "add", Index: indexName, Alias: aliasName} + s.actions = append(s.actions, action) + return s +} + +func (s *AliasService) AddWithFilter(indexName string, aliasName string, filter Query) *AliasService { + action := aliasAction{Type: "add", Index: indexName, Alias: aliasName, Filter: filter} + s.actions = append(s.actions, action) + return s +} + +func (s *AliasService) Remove(indexName string, aliasName string) *AliasService { + action := aliasAction{Type: "remove", Index: indexName, Alias: aliasName} + s.actions = append(s.actions, action) + return s +} + +func (s *AliasService) Do() (*AliasResult, error) { + // Build url + path := "/_aliases" + + // Parameters + params := make(url.Values) + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + + // Actions + body := make(map[string]interface{}) + actionsJson := make([]interface{}, 0) + + for _, action := range s.actions { + actionJson := make(map[string]interface{}) + detailsJson := make(map[string]interface{}) + detailsJson["index"] = action.Index + detailsJson["alias"] = action.Alias + if action.Filter != nil { + src, err := action.Filter.Source() + if err != nil { + return nil, err + } + detailsJson["filter"] = src + } + actionJson[action.Type] = detailsJson + actionsJson = append(actionsJson, actionJson) + } + + body["actions"] = actionsJson + + // Get response + res, err := s.client.PerformRequest("POST", path, params, body) + if err != nil { + return nil, err + } + + // Return results + ret := new(AliasResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of an alias request. + +type AliasResult struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_alias_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_alias_test.go new file mode 100644 index 000000000..3e4e797b0 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_alias_test.go @@ -0,0 +1,123 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +const ( + testAliasName = "elastic-test-alias" +) + +func TestAliasLifecycle(t *testing.T) { + var err error + + client := setupTestClientAndCreateIndex(t) + + // Some tweets + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "sandrae", Message: "Cycling is fun."} + tweet3 := tweet{User: "olivere", Message: "Another unrelated topic."} + + // Add tweets to first index + _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + // Add tweets to second index + _, err = client.Index().Index(testIndexName2).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + // Flush + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Flush().Index(testIndexName2).Do() + if err != nil { + t.Fatal(err) + } + + /* + // Alias should not yet exist + aliasesResult1, err := client.Aliases().Do() + if err != nil { + t.Fatal(err) + } + if len(aliasesResult1.Indices) != 0 { + t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 0, len(aliasesResult1.Indices)) + } + */ + + // Add both indices to a new alias + aliasCreate, err := client.Alias(). + Add(testIndexName, testAliasName). + Add(testIndexName2, testAliasName). + //Pretty(true). + Do() + if err != nil { + t.Fatal(err) + } + if !aliasCreate.Acknowledged { + t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasCreate.Acknowledged) + } + + // Search should return all 3 tweets + matchAll := NewMatchAllQuery() + searchResult1, err := client.Search().Index(testAliasName).Query(matchAll).Do() + if err != nil { + t.Fatal(err) + } + if searchResult1.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult1.Hits.TotalHits != 3 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult1.Hits.TotalHits) + } + + /* + // Alias should return both indices + aliasesResult2, err := client.Aliases().Do() + if err != nil { + t.Fatal(err) + } + if len(aliasesResult2.Indices) != 2 { + t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult2.Indices)) + } + */ + + // Remove first index should remove two tweets, so should only yield 1 + aliasRemove1, err := client.Alias(). + Remove(testIndexName, testAliasName). + //Pretty(true). + Do() + if err != nil { + t.Fatal(err) + } + if !aliasRemove1.Acknowledged { + t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasRemove1.Acknowledged) + } + + searchResult2, err := client.Search().Index(testAliasName).Query(matchAll).Do() + if err != nil { + t.Fatal(err) + } + if searchResult2.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult2.Hits.TotalHits != 1 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult2.Hits.TotalHits) + } + +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_mapping.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_mapping.go new file mode 100644 index 000000000..5a23165b0 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_mapping.go @@ -0,0 +1,221 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesPutMappingService allows to register specific mapping definition +// for a specific type. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-mapping.html +// for details. +type IndicesPutMappingService struct { + client *Client + pretty bool + typ string + index []string + masterTimeout string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string + ignoreConflicts *bool + timeout string + bodyJson map[string]interface{} + bodyString string +} + +// NewPutMappingService is an alias for NewIndicesPutMappingService. +// Use NewIndicesPutMappingService. +func NewPutMappingService(client *Client) *IndicesPutMappingService { + return NewIndicesPutMappingService(client) +} + +// NewIndicesPutMappingService creates a new IndicesPutMappingService. +func NewIndicesPutMappingService(client *Client) *IndicesPutMappingService { + return &IndicesPutMappingService{ + client: client, + index: make([]string, 0), + } +} + +// Index is a list of index names the mapping should be added to +// (supports wildcards); use `_all` or omit to add the mapping on all indices. +func (s *IndicesPutMappingService) Index(indices ...string) *IndicesPutMappingService { + s.index = append(s.index, indices...) + return s +} + +// Type is the name of the document type. +func (s *IndicesPutMappingService) Type(typ string) *IndicesPutMappingService { + s.typ = typ + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesPutMappingService) Timeout(timeout string) *IndicesPutMappingService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesPutMappingService) MasterTimeout(masterTimeout string) *IndicesPutMappingService { + s.masterTimeout = masterTimeout + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesPutMappingService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesPutMappingService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// This includes `_all` string or when no indices have been specified. +func (s *IndicesPutMappingService) AllowNoIndices(allowNoIndices bool) *IndicesPutMappingService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesPutMappingService) ExpandWildcards(expandWildcards string) *IndicesPutMappingService { + s.expandWildcards = expandWildcards + return s +} + +// IgnoreConflicts specifies whether to ignore conflicts while updating +// the mapping (default: false). +func (s *IndicesPutMappingService) IgnoreConflicts(ignoreConflicts bool) *IndicesPutMappingService { + s.ignoreConflicts = &ignoreConflicts + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesPutMappingService) Pretty(pretty bool) *IndicesPutMappingService { + s.pretty = pretty + return s +} + +// BodyJson contains the mapping definition. +func (s *IndicesPutMappingService) BodyJson(mapping map[string]interface{}) *IndicesPutMappingService { + s.bodyJson = mapping + return s +} + +// BodyString is the mapping definition serialized as a string. +func (s *IndicesPutMappingService) BodyString(mapping string) *IndicesPutMappingService { + s.bodyString = mapping + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesPutMappingService) buildURL() (string, url.Values, error) { + var err error + var path string + + // Build URL: Typ MUST be specified and is verified in Validate. + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{ + "index": strings.Join(s.index, ","), + "type": s.typ, + }) + } else { + path, err = uritemplates.Expand("/_mapping/{type}", map[string]string{ + "type": s.typ, + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.ignoreConflicts != nil { + params.Set("ignore_conflicts", fmt.Sprintf("%v", *s.ignoreConflicts)) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesPutMappingService) Validate() error { + var invalid []string + if s.typ == "" { + invalid = append(invalid, "Type") + } + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesPutMappingService) Do() (*PutMappingResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest("PUT", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(PutMappingResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// PutMappingResponse is the response of IndicesPutMappingService.Do. +type PutMappingResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_mapping_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_mapping_test.go new file mode 100644 index 000000000..356aa2728 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_mapping_test.go @@ -0,0 +1,82 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestPutMappingURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Type string + Expected string + }{ + { + []string{}, + "tweet", + "/_mapping/tweet", + }, + { + []string{"*"}, + "tweet", + "/%2A/_mapping/tweet", + }, + { + []string{"store-1", "store-2"}, + "tweet", + "/store-1%2Cstore-2/_mapping/tweet", + }, + } + + for _, test := range tests { + path, _, err := client.PutMapping().Index(test.Indices...).Type(test.Type).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} + +func TestMappingLifecycle(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + mapping := `{ + "tweetdoc":{ + "properties":{ + "message":{ + "type":"string" + } + } + } + }` + + putresp, err := client.PutMapping().Index(testIndexName2).Type("tweetdoc").BodyString(mapping).Do() + if err != nil { + t.Fatalf("expected put mapping to succeed; got: %v", err) + } + if putresp == nil { + t.Fatalf("expected put mapping response; got: %v", putresp) + } + if !putresp.Acknowledged { + t.Fatalf("expected put mapping ack; got: %v", putresp.Acknowledged) + } + + getresp, err := client.GetMapping().Index(testIndexName2).Type("tweetdoc").Do() + if err != nil { + t.Fatalf("expected get mapping to succeed; got: %v", err) + } + if getresp == nil { + t.Fatalf("expected get mapping response; got: %v", getresp) + } + props, ok := getresp[testIndexName2] + if !ok { + t.Fatalf("expected JSON root to be of type map[string]interface{}; got: %#v", props) + } + + // NOTE There is no Delete Mapping API in Elasticsearch 2.0 +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_settings.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_settings.go new file mode 100644 index 000000000..4cdd3e1cb --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_settings.go @@ -0,0 +1,184 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesPutSettingsService changes specific index level settings in +// real time. +// +// See the documentation at +// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-update-settings.html. +type IndicesPutSettingsService struct { + client *Client + pretty bool + index []string + allowNoIndices *bool + expandWildcards string + flatSettings *bool + ignoreUnavailable *bool + masterTimeout string + bodyJson interface{} + bodyString string +} + +// NewIndicesPutSettingsService creates a new IndicesPutSettingsService. +func NewIndicesPutSettingsService(client *Client) *IndicesPutSettingsService { + return &IndicesPutSettingsService{ + client: client, + index: make([]string, 0), + } +} + +// Index is a list of index names the mapping should be added to +// (supports wildcards); use `_all` or omit to add the mapping on all indices. +func (s *IndicesPutSettingsService) Index(indices ...string) *IndicesPutSettingsService { + s.index = append(s.index, indices...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. (This includes `_all` +// string or when no indices have been specified). +func (s *IndicesPutSettingsService) AllowNoIndices(allowNoIndices bool) *IndicesPutSettingsService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards specifies whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesPutSettingsService) ExpandWildcards(expandWildcards string) *IndicesPutSettingsService { + s.expandWildcards = expandWildcards + return s +} + +// FlatSettings indicates whether to return settings in flat format (default: false). +func (s *IndicesPutSettingsService) FlatSettings(flatSettings bool) *IndicesPutSettingsService { + s.flatSettings = &flatSettings + return s +} + +// IgnoreUnavailable specifies whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesPutSettingsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesPutSettingsService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// MasterTimeout is the timeout for connection to master. +func (s *IndicesPutSettingsService) MasterTimeout(masterTimeout string) *IndicesPutSettingsService { + s.masterTimeout = masterTimeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesPutSettingsService) Pretty(pretty bool) *IndicesPutSettingsService { + s.pretty = pretty + return s +} + +// BodyJson is documented as: The index settings to be updated. +func (s *IndicesPutSettingsService) BodyJson(body interface{}) *IndicesPutSettingsService { + s.bodyJson = body + return s +} + +// BodyString is documented as: The index settings to be updated. +func (s *IndicesPutSettingsService) BodyString(body string) *IndicesPutSettingsService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesPutSettingsService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_settings", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else { + path = "/_settings" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesPutSettingsService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesPutSettingsService) Do() (*IndicesPutSettingsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest("PUT", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesPutSettingsResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesPutSettingsResponse is the response of IndicesPutSettingsService.Do. +type IndicesPutSettingsResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_settings_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_settings_test.go new file mode 100644 index 000000000..4bc86e18e --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_settings_test.go @@ -0,0 +1,92 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestIndicesPutSettingsBuildURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Expected string + }{ + { + []string{}, + "/_settings", + }, + { + []string{"*"}, + "/%2A/_settings", + }, + { + []string{"store-1", "store-2"}, + "/store-1%2Cstore-2/_settings", + }, + } + + for _, test := range tests { + path, _, err := client.IndexPutSettings().Index(test.Indices...).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} + +func TestIndicesSettingsLifecycle(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + body := `{ + "index":{ + "refresh_interval":"-1" + } + }` + + // Put settings + putres, err := client.IndexPutSettings().Index(testIndexName).BodyString(body).Do() + if err != nil { + t.Fatalf("expected put settings to succeed; got: %v", err) + } + if putres == nil { + t.Fatalf("expected put settings response; got: %v", putres) + } + if !putres.Acknowledged { + t.Fatalf("expected put settings ack; got: %v", putres.Acknowledged) + } + + // Read settings + getres, err := client.IndexGetSettings().Index(testIndexName).Do() + if err != nil { + t.Fatalf("expected get mapping to succeed; got: %v", err) + } + if getres == nil { + t.Fatalf("expected get mapping response; got: %v", getres) + } + + // Check settings + index, found := getres[testIndexName] + if !found { + t.Fatalf("expected to return settings for index %q; got: %#v", testIndexName, getres) + } + // Retrieve "index" section of the settings for index testIndexName + sectionIntf, ok := index.Settings["index"] + if !ok { + t.Fatalf("expected settings to have %q field; got: %#v", "index", getres) + } + section, ok := sectionIntf.(map[string]interface{}) + if !ok { + t.Fatalf("expected settings to be of type map[string]interface{}; got: %#v", getres) + } + refintv, ok := section["refresh_interval"] + if !ok { + t.Fatalf(`expected JSON to include "refresh_interval" field; got: %#v`, getres) + } + if got, want := refintv, "-1"; got != want { + t.Fatalf("expected refresh_interval = %v; got: %v", want, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_template.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_template.go new file mode 100644 index 000000000..72947f311 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_template.go @@ -0,0 +1,179 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesPutTemplateService creates or updates index mappings. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html. +type IndicesPutTemplateService struct { + client *Client + pretty bool + name string + order interface{} + create *bool + timeout string + masterTimeout string + flatSettings *bool + bodyJson interface{} + bodyString string +} + +// NewIndicesPutTemplateService creates a new IndicesPutTemplateService. +func NewIndicesPutTemplateService(client *Client) *IndicesPutTemplateService { + return &IndicesPutTemplateService{ + client: client, + } +} + +// Name is the name of the index template. +func (s *IndicesPutTemplateService) Name(name string) *IndicesPutTemplateService { + s.name = name + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesPutTemplateService) Timeout(timeout string) *IndicesPutTemplateService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesPutTemplateService) MasterTimeout(masterTimeout string) *IndicesPutTemplateService { + s.masterTimeout = masterTimeout + return s +} + +// FlatSettings indicates whether to return settings in flat format (default: false). +func (s *IndicesPutTemplateService) FlatSettings(flatSettings bool) *IndicesPutTemplateService { + s.flatSettings = &flatSettings + return s +} + +// Order is the order for this template when merging multiple matching ones +// (higher numbers are merged later, overriding the lower numbers). +func (s *IndicesPutTemplateService) Order(order interface{}) *IndicesPutTemplateService { + s.order = order + return s +} + +// Create indicates whether the index template should only be added if +// new or can also replace an existing one. +func (s *IndicesPutTemplateService) Create(create bool) *IndicesPutTemplateService { + s.create = &create + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesPutTemplateService) Pretty(pretty bool) *IndicesPutTemplateService { + s.pretty = pretty + return s +} + +// BodyJson is documented as: The template definition. +func (s *IndicesPutTemplateService) BodyJson(body interface{}) *IndicesPutTemplateService { + s.bodyJson = body + return s +} + +// BodyString is documented as: The template definition. +func (s *IndicesPutTemplateService) BodyString(body string) *IndicesPutTemplateService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesPutTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_template/{name}", map[string]string{ + "name": s.name, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.order != nil { + params.Set("order", fmt.Sprintf("%v", s.order)) + } + if s.create != nil { + params.Set("create", fmt.Sprintf("%v", *s.create)) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesPutTemplateService) Validate() error { + var invalid []string + if s.name == "" { + invalid = append(invalid, "Name") + } + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesPutTemplateService) Do() (*IndicesPutTemplateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest("PUT", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesPutTemplateResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesPutTemplateResponse is the response of IndicesPutTemplateService.Do. +type IndicesPutTemplateResponse struct { + Acknowledged bool `json:"acknowledged,omitempty"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_warmer.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_warmer.go new file mode 100644 index 000000000..6e1f3ae66 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_warmer.go @@ -0,0 +1,222 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesPutWarmerService allows to register a warmer. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-warmers.html. +type IndicesPutWarmerService struct { + client *Client + pretty bool + typ []string + index []string + name string + masterTimeout string + ignoreUnavailable *bool + allowNoIndices *bool + requestCache *bool + expandWildcards string + bodyJson map[string]interface{} + bodyString string +} + +// NewIndicesPutWarmerService creates a new IndicesPutWarmerService. +func NewIndicesPutWarmerService(client *Client) *IndicesPutWarmerService { + return &IndicesPutWarmerService{ + client: client, + index: make([]string, 0), + typ: make([]string, 0), + } +} + +// Index is a list of index names the mapping should be added to +// (supports wildcards); use `_all` or omit to add the mapping on all indices. +func (s *IndicesPutWarmerService) Index(indices ...string) *IndicesPutWarmerService { + s.index = append(s.index, indices...) + return s +} + +// Type is a list of type names the mapping should be added to +// (supports wildcards); use `_all` or omit to add the mapping on all types. +func (s *IndicesPutWarmerService) Type(typ ...string) *IndicesPutWarmerService { + s.typ = append(s.typ, typ...) + return s +} + +// Name specifies the name of the warmer (supports wildcards); +// leave empty to get all warmers +func (s *IndicesPutWarmerService) Name(name string) *IndicesPutWarmerService { + s.name = name + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesPutWarmerService) MasterTimeout(masterTimeout string) *IndicesPutWarmerService { + s.masterTimeout = masterTimeout + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesPutWarmerService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesPutWarmerService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// This includes `_all` string or when no indices have been specified. +func (s *IndicesPutWarmerService) AllowNoIndices(allowNoIndices bool) *IndicesPutWarmerService { + s.allowNoIndices = &allowNoIndices + return s +} + +// RequestCache specifies whether the request to be warmed should use the request cache, +// defaults to index level setting +func (s *IndicesPutWarmerService) RequestCache(requestCache bool) *IndicesPutWarmerService { + s.requestCache = &requestCache + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesPutWarmerService) ExpandWildcards(expandWildcards string) *IndicesPutWarmerService { + s.expandWildcards = expandWildcards + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesPutWarmerService) Pretty(pretty bool) *IndicesPutWarmerService { + s.pretty = pretty + return s +} + +// BodyJson contains the mapping definition. +func (s *IndicesPutWarmerService) BodyJson(mapping map[string]interface{}) *IndicesPutWarmerService { + s.bodyJson = mapping + return s +} + +// BodyString is the mapping definition serialized as a string. +func (s *IndicesPutWarmerService) BodyString(mapping string) *IndicesPutWarmerService { + s.bodyString = mapping + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesPutWarmerService) buildURL() (string, url.Values, error) { + var err error + var path string + + if len(s.index) == 0 && len(s.typ) == 0 { + path, err = uritemplates.Expand("/_warmer/{name}", map[string]string{ + "name": s.name, + }) + } else if len(s.index) == 0 && len(s.typ) > 0 { + path, err = uritemplates.Expand("/_all/{type}/_warmer/{name}", map[string]string{ + "type": strings.Join(s.typ, ","), + "name": s.name, + }) + } else if len(s.index) > 0 && len(s.typ) == 0 { + path, err = uritemplates.Expand("/{index}/_warmer/{name}", map[string]string{ + "index": strings.Join(s.index, ","), + "name": s.name, + }) + } else { + path, err = uritemplates.Expand("/{index}/{type}/_warmer/{name}", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + "name": s.name, + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.requestCache != nil { + params.Set("request_cache", fmt.Sprintf("%v", *s.requestCache)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesPutWarmerService) Validate() error { + var invalid []string + if s.name == "" { + invalid = append(invalid, "Name") + } + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesPutWarmerService) Do() (*PutWarmerResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest("PUT", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(PutWarmerResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// PutWarmerResponse is the response of IndicesPutWarmerService.Do. +type PutWarmerResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_warmer_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_warmer_test.go new file mode 100644 index 000000000..25a1f3ecb --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_warmer_test.go @@ -0,0 +1,100 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestPutWarmerBuildURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Types []string + Name string + Expected string + }{ + { + []string{}, + []string{}, + "warmer_1", + "/_warmer/warmer_1", + }, + { + []string{"*"}, + []string{}, + "warmer_1", + "/%2A/_warmer/warmer_1", + }, + { + []string{}, + []string{"*"}, + "warmer_1", + "/_all/%2A/_warmer/warmer_1", + }, + { + []string{"index-1", "index-2"}, + []string{"type-1", "type-2"}, + "warmer_1", + "/index-1%2Cindex-2/type-1%2Ctype-2/_warmer/warmer_1", + }, + } + + for _, test := range tests { + path, _, err := client.PutWarmer().Index(test.Indices...).Type(test.Types...).Name(test.Name).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} + +func TestWarmerLifecycle(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + mapping := `{ + "query": { + "match_all": {} + } + }` + + // Ensure well prepared test index + client.Flush(testIndexName2).Do() + + putresp, err := client.PutWarmer().Index(testIndexName2).Type("tweet").Name("warmer_1").BodyString(mapping).Do() + if err != nil { + t.Fatalf("expected put warmer to succeed; got: %v", err) + } + if putresp == nil { + t.Fatalf("expected put warmer response; got: %v", putresp) + } + if !putresp.Acknowledged { + t.Fatalf("expected put warmer ack; got: %v", putresp.Acknowledged) + } + + getresp, err := client.GetWarmer().Index(testIndexName2).Name("warmer_1").Do() + if err != nil { + t.Fatalf("expected get warmer to succeed; got: %v", err) + } + if getresp == nil { + t.Fatalf("expected get warmer response; got: %v", getresp) + } + props, ok := getresp[testIndexName2] + if !ok { + t.Fatalf("expected JSON root to be of type map[string]interface{}; got: %#v", props) + } + + delresp, err := client.DeleteWarmer().Index(testIndexName2).Name("warmer_1").Do() + if err != nil { + t.Fatalf("expected del warmer to succeed; got: %v", err) + } + if delresp == nil { + t.Fatalf("expected del warmer response; got: %v", getresp) + } + if !delresp.Acknowledged { + t.Fatalf("expected del warmer ack; got: %v", delresp.Acknowledged) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_refresh.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_refresh.go new file mode 100644 index 000000000..392a8d393 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_refresh.go @@ -0,0 +1,94 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +type RefreshService struct { + client *Client + indices []string + force *bool + pretty bool +} + +func NewRefreshService(client *Client) *RefreshService { + builder := &RefreshService{ + client: client, + indices: make([]string, 0), + } + return builder +} + +func (s *RefreshService) Index(indices ...string) *RefreshService { + s.indices = append(s.indices, indices...) + return s +} + +func (s *RefreshService) Force(force bool) *RefreshService { + s.force = &force + return s +} + +func (s *RefreshService) Pretty(pretty bool) *RefreshService { + s.pretty = pretty + return s +} + +func (s *RefreshService) Do() (*RefreshResult, error) { + // Build url + path := "/" + + // Indices part + indexPart := make([]string, 0) + for _, index := range s.indices { + index, err := uritemplates.Expand("{index}", map[string]string{ + "index": index, + }) + if err != nil { + return nil, err + } + indexPart = append(indexPart, index) + } + if len(indexPart) > 0 { + path += strings.Join(indexPart, ",") + } + + path += "/_refresh" + + // Parameters + params := make(url.Values) + if s.force != nil { + params.Set("force", fmt.Sprintf("%v", *s.force)) + } + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + + // Get response + res, err := s.client.PerformRequest("POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return result + ret := new(RefreshResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a refresh request. + +type RefreshResult struct { + Shards shardsInfo `json:"_shards,omitempty"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_refresh_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_refresh_test.go new file mode 100644 index 000000000..885e63365 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_refresh_test.go @@ -0,0 +1,47 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestRefresh(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add some documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Refresh indices + res, err := client.Refresh(testIndexName, testIndexName2).Do() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected result; got nil") + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_stats.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_stats.go new file mode 100644 index 000000000..b9255c094 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_stats.go @@ -0,0 +1,385 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesStatsService provides stats on various metrics of one or more +// indices. See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-stats.html. +type IndicesStatsService struct { + client *Client + pretty bool + metric []string + index []string + level string + types []string + completionFields []string + fielddataFields []string + fields []string + groups []string + human *bool +} + +// NewIndicesStatsService creates a new IndicesStatsService. +func NewIndicesStatsService(client *Client) *IndicesStatsService { + return &IndicesStatsService{ + client: client, + index: make([]string, 0), + metric: make([]string, 0), + completionFields: make([]string, 0), + fielddataFields: make([]string, 0), + fields: make([]string, 0), + groups: make([]string, 0), + types: make([]string, 0), + } +} + +// Metric limits the information returned the specific metrics. Options are: +// docs, store, indexing, get, search, completion, fielddata, flush, merge, +// query_cache, refresh, suggest, and warmer. +func (s *IndicesStatsService) Metric(metric ...string) *IndicesStatsService { + s.metric = append(s.metric, metric...) + return s +} + +// Index is the list of index names; use `_all` or empty string to perform +// the operation on all indices. +func (s *IndicesStatsService) Index(indices ...string) *IndicesStatsService { + s.index = append(s.index, indices...) + return s +} + +// Type is a list of document types for the `indexing` index metric. +func (s *IndicesStatsService) Type(types ...string) *IndicesStatsService { + s.types = append(s.types, types...) + return s +} + +// Level returns stats aggregated at cluster, index or shard level. +func (s *IndicesStatsService) Level(level string) *IndicesStatsService { + s.level = level + return s +} + +// CompletionFields is a list of fields for `fielddata` and `suggest` +// index metric (supports wildcards). +func (s *IndicesStatsService) CompletionFields(completionFields ...string) *IndicesStatsService { + s.completionFields = append(s.completionFields, completionFields...) + return s +} + +// FielddataFields is a list of fields for `fielddata` index metric (supports wildcards). +func (s *IndicesStatsService) FielddataFields(fielddataFields ...string) *IndicesStatsService { + s.fielddataFields = append(s.fielddataFields, fielddataFields...) + return s +} + +// Fields is a list of fields for `fielddata` and `completion` index metric +// (supports wildcards). +func (s *IndicesStatsService) Fields(fields ...string) *IndicesStatsService { + s.fields = append(s.fields, fields...) + return s +} + +// Groups is a list of search groups for `search` index metric. +func (s *IndicesStatsService) Groups(groups ...string) *IndicesStatsService { + s.groups = append(s.groups, groups...) + return s +} + +// Human indicates whether to return time and byte values in human-readable format.. +func (s *IndicesStatsService) Human(human bool) *IndicesStatsService { + s.human = &human + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesStatsService) Pretty(pretty bool) *IndicesStatsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesStatsService) buildURL() (string, url.Values, error) { + var err error + var path string + if len(s.index) > 0 && len(s.metric) > 0 { + path, err = uritemplates.Expand("/{index}/_stats/{metric}", map[string]string{ + "index": strings.Join(s.index, ","), + "metric": strings.Join(s.metric, ","), + }) + } else if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_stats", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else if len(s.metric) > 0 { + path, err = uritemplates.Expand("/_stats/{metric}", map[string]string{ + "metric": strings.Join(s.metric, ","), + }) + } else { + path = "/_stats" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if len(s.groups) > 0 { + params.Set("groups", strings.Join(s.groups, ",")) + } + if s.human != nil { + params.Set("human", fmt.Sprintf("%v", *s.human)) + } + if s.level != "" { + params.Set("level", s.level) + } + if len(s.types) > 0 { + params.Set("types", strings.Join(s.types, ",")) + } + if len(s.completionFields) > 0 { + params.Set("completion_fields", strings.Join(s.completionFields, ",")) + } + if len(s.fielddataFields) > 0 { + params.Set("fielddata_fields", strings.Join(s.fielddataFields, ",")) + } + if len(s.fields) > 0 { + params.Set("fields", strings.Join(s.fields, ",")) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesStatsService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesStatsService) Do() (*IndicesStatsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesStatsResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesStatsResponse is the response of IndicesStatsService.Do. +type IndicesStatsResponse struct { + // Shards provides information returned from shards. + Shards shardsInfo `json:"_shards"` + + // All provides summary stats about all indices. + All *IndexStats `json:"_all,omitempty"` + + // Indices provides a map into the stats of an index. The key of the + // map is the index name. + Indices map[string]*IndexStats `json:"indices,omitempty"` +} + +// IndexStats is index stats for a specific index. +type IndexStats struct { + Primaries *IndexStatsDetails `json:"primaries,omitempty"` + Total *IndexStatsDetails `json:"total,omitempty"` +} + +type IndexStatsDetails struct { + Docs *IndexStatsDocs `json:"docs,omitempty"` + Store *IndexStatsStore `json:"store,omitempty"` + Indexing *IndexStatsIndexing `json:"indexing,omitempty"` + Get *IndexStatsGet `json:"get,omitempty"` + Search *IndexStatsSearch `json:"search,omitempty"` + Merges *IndexStatsMerges `json:"merges,omitempty"` + Refresh *IndexStatsRefresh `json:"refresh,omitempty"` + Flush *IndexStatsFlush `json:"flush,omitempty"` + Warmer *IndexStatsWarmer `json:"warmer,omitempty"` + FilterCache *IndexStatsFilterCache `json:"filter_cache,omitempty"` + IdCache *IndexStatsIdCache `json:"id_cache,omitempty"` + Fielddata *IndexStatsFielddata `json:"fielddata,omitempty"` + Percolate *IndexStatsPercolate `json:"percolate,omitempty"` + Completion *IndexStatsCompletion `json:"completion,omitempty"` + Segments *IndexStatsSegments `json:"segments,omitempty"` + Translog *IndexStatsTranslog `json:"translog,omitempty"` + Suggest *IndexStatsSuggest `json:"suggest,omitempty"` + QueryCache *IndexStatsQueryCache `json:"query_cache,omitempty"` +} + +type IndexStatsDocs struct { + Count int64 `json:"count,omitempty"` + Deleted int64 `json:"deleted,omitempty"` +} + +type IndexStatsStore struct { + Size string `json:"size,omitempty"` // human size, e.g. 119.3mb + SizeInBytes int64 `json:"size_in_bytes,omitempty"` + ThrottleTime string `json:"throttle_time,omitempty"` // human time, e.g. 0s + ThrottleTimeInMillis int64 `json:"throttle_time_in_millis,omitempty"` +} + +type IndexStatsIndexing struct { + IndexTotal int64 `json:"index_total,omitempty"` + IndexTime string `json:"index_time,omitempty"` + IndexTimeInMillis int64 `json:"index_time_in_millis,omitempty"` + IndexCurrent int64 `json:"index_current,omitempty"` + DeleteTotal int64 `json:"delete_total,omitempty"` + DeleteTime string `json:"delete_time,omitempty"` + DeleteTimeInMillis int64 `json:"delete_time_in_millis,omitempty"` + DeleteCurrent int64 `json:"delete_current,omitempty"` + NoopUpdateTotal int64 `json:"noop_update_total,omitempty"` + IsThrottled bool `json:"is_throttled,omitempty"` + ThrottleTime string `json:"throttle_time,omitempty"` + ThrottleTimeInMillis int64 `json:"throttle_time_in_millis,omitempty"` +} + +type IndexStatsGet struct { + Total int64 `json:"total,omitempty"` + GetTime string `json:"get_time,omitempty"` + TimeInMillis int64 `json:"time_in_millis,omitempty"` + ExistsTotal int64 `json:"exists_total,omitempty"` + ExistsTime string `json:"exists_time,omitempty"` + ExistsTimeInMillis int64 `json:"exists_time_in_millis,omitempty"` + MissingTotal int64 `json:"missing_total,omitempty"` + MissingTime string `json:"missing_time,omitempty"` + MissingTimeInMillis int64 `json:"missing_time_in_millis,omitempty"` + Current int64 `json:"current,omitempty"` +} + +type IndexStatsSearch struct { + OpenContexts int64 `json:"open_contexts,omitempty"` + QueryTotal int64 `json:"query_total,omitempty"` + QueryTime string `json:"query_time,omitempty"` + QueryTimeInMillis int64 `json:"query_time_in_millis,omitempty"` + QueryCurrent int64 `json:"query_current,omitempty"` + FetchTotal int64 `json:"fetch_total,omitempty"` + FetchTime string `json:"fetch_time,omitempty"` + FetchTimeInMillis int64 `json:"fetch_time_in_millis,omitempty"` + FetchCurrent int64 `json:"fetch_current,omitempty"` +} + +type IndexStatsMerges struct { + Current int64 `json:"current,omitempty"` + CurrentDocs int64 `json:"current_docs,omitempty"` + CurrentSize string `json:"current_size,omitempty"` + CurrentSizeInBytes int64 `json:"current_size_in_bytes,omitempty"` + Total int64 `json:"total,omitempty"` + TotalTime string `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` + TotalDocs int64 `json:"total_docs,omitempty"` + TotalSize string `json:"total_size,omitempty"` + TotalSizeInBytes int64 `json:"total_size_in_bytes,omitempty"` +} + +type IndexStatsRefresh struct { + Total int64 `json:"total,omitempty"` + TotalTime string `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` +} + +type IndexStatsFlush struct { + Total int64 `json:"total,omitempty"` + TotalTime string `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` +} + +type IndexStatsWarmer struct { + Current int64 `json:"current,omitempty"` + Total int64 `json:"total,omitempty"` + TotalTime string `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` +} + +type IndexStatsFilterCache struct { + MemorySize string `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` + Evictions int64 `json:"evictions,omitempty"` +} + +type IndexStatsIdCache struct { + MemorySize string `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` +} + +type IndexStatsFielddata struct { + MemorySize string `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` + Evictions int64 `json:"evictions,omitempty"` +} + +type IndexStatsPercolate struct { + Total int64 `json:"total,omitempty"` + GetTime string `json:"get_time,omitempty"` + TimeInMillis int64 `json:"time_in_millis,omitempty"` + Current int64 `json:"current,omitempty"` + MemorySize string `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` + Queries int64 `json:"queries,omitempty"` +} + +type IndexStatsCompletion struct { + Size string `json:"size,omitempty"` + SizeInBytes int64 `json:"size_in_bytes,omitempty"` +} + +type IndexStatsSegments struct { + Count int64 `json:"count,omitempty"` + Memory string `json:"memory,omitempty"` + MemoryInBytes int64 `json:"memory_in_bytes,omitempty"` + IndexWriterMemory string `json:"index_writer_memory,omitempty"` + IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes,omitempty"` + IndexWriterMaxMemory string `json:"index_writer_max_memory,omitempty"` + IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes,omitempty"` + VersionMapMemory string `json:"version_map_memory,omitempty"` + VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes,omitempty"` + FixedBitSetMemory string `json:"fixed_bit_set,omitempty"` + FixedBitSetMemoryInBytes int64 `json:"fixed_bit_set_memory_in_bytes,omitempty"` +} + +type IndexStatsTranslog struct { + Operations int64 `json:"operations,omitempty"` + Size string `json:"size,omitempty"` + SizeInBytes int64 `json:"size_in_bytes,omitempty"` +} + +type IndexStatsSuggest struct { + Total int64 `json:"total,omitempty"` + Time string `json:"time,omitempty"` + TimeInMillis int64 `json:"time_in_millis,omitempty"` + Current int64 `json:"current,omitempty"` +} + +type IndexStatsQueryCache struct { + MemorySize string `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` + Evictions int64 `json:"evictions,omitempty"` + HitCount int64 `json:"hit_count,omitempty"` + MissCount int64 `json:"miss_count,omitempty"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_stats_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_stats_test.go new file mode 100644 index 000000000..2a72858d7 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/indices_stats_test.go @@ -0,0 +1,85 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestIndexStatsBuildURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Metrics []string + Expected string + }{ + { + []string{}, + []string{}, + "/_stats", + }, + { + []string{"index1"}, + []string{}, + "/index1/_stats", + }, + { + []string{}, + []string{"metric1"}, + "/_stats/metric1", + }, + { + []string{"index1"}, + []string{"metric1"}, + "/index1/_stats/metric1", + }, + { + []string{"index1", "index2"}, + []string{"metric1"}, + "/index1%2Cindex2/_stats/metric1", + }, + { + []string{"index1", "index2"}, + []string{"metric1", "metric2"}, + "/index1%2Cindex2/_stats/metric1%2Cmetric2", + }, + } + + for i, test := range tests { + path, _, err := client.IndexStats().Index(test.Indices...).Metric(test.Metrics...).buildURL() + if err != nil { + t.Fatalf("case #%d: %v", i+1, err) + } + if path != test.Expected { + t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + } + } +} + +func TestIndexStats(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) + + stats, err := client.IndexStats(testIndexName).Do() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if stats == nil { + t.Fatalf("expected response; got: %v", stats) + } + stat, found := stats.Indices[testIndexName] + if !found { + t.Fatalf("expected stats about index %q; got: %v", testIndexName, found) + } + if stat.Total == nil { + t.Fatalf("expected total to be != nil; got: %v", stat.Total) + } + if stat.Total.Docs == nil { + t.Fatalf("expected total docs to be != nil; got: %v", stat.Total.Docs) + } + if stat.Total.Docs.Count == 0 { + t.Fatalf("expected total docs count to be > 0; got: %d", stat.Total.Docs.Count) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/inner_hit.go b/services/templeton/vendor/src/github.com/olivere/elastic/inner_hit.go new file mode 100644 index 000000000..1330df1ee --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/inner_hit.go @@ -0,0 +1,160 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// InnerHit implements a simple join for parent/child, nested, and even +// top-level documents in Elasticsearch. +// It is an experimental feature for Elasticsearch versions 1.5 (or greater). +// See http://www.elastic.co/guide/en/elasticsearch/reference/1.5/search-request-inner-hits.html +// for documentation. +// +// See the tests for SearchSource, HasChildFilter, HasChildQuery, +// HasParentFilter, HasParentQuery, NestedFilter, and NestedQuery +// for usage examples. +type InnerHit struct { + source *SearchSource + path string + typ string + + name string +} + +// NewInnerHit creates a new InnerHit. +func NewInnerHit() *InnerHit { + return &InnerHit{source: NewSearchSource()} +} + +func (hit *InnerHit) Path(path string) *InnerHit { + hit.path = path + return hit +} + +func (hit *InnerHit) Type(typ string) *InnerHit { + hit.typ = typ + return hit +} + +func (hit *InnerHit) Query(query Query) *InnerHit { + hit.source.Query(query) + return hit +} + +func (hit *InnerHit) From(from int) *InnerHit { + hit.source.From(from) + return hit +} + +func (hit *InnerHit) Size(size int) *InnerHit { + hit.source.Size(size) + return hit +} + +func (hit *InnerHit) TrackScores(trackScores bool) *InnerHit { + hit.source.TrackScores(trackScores) + return hit +} + +func (hit *InnerHit) Explain(explain bool) *InnerHit { + hit.source.Explain(explain) + return hit +} + +func (hit *InnerHit) Version(version bool) *InnerHit { + hit.source.Version(version) + return hit +} + +func (hit *InnerHit) Field(fieldName string) *InnerHit { + hit.source.Field(fieldName) + return hit +} + +func (hit *InnerHit) Fields(fieldNames ...string) *InnerHit { + hit.source.Fields(fieldNames...) + return hit +} + +func (hit *InnerHit) NoFields() *InnerHit { + hit.source.NoFields() + return hit +} + +func (hit *InnerHit) FetchSource(fetchSource bool) *InnerHit { + hit.source.FetchSource(fetchSource) + return hit +} + +func (hit *InnerHit) FetchSourceContext(fetchSourceContext *FetchSourceContext) *InnerHit { + hit.source.FetchSourceContext(fetchSourceContext) + return hit +} + +func (hit *InnerHit) FieldDataFields(fieldDataFields ...string) *InnerHit { + hit.source.FieldDataFields(fieldDataFields...) + return hit +} + +func (hit *InnerHit) FieldDataField(fieldDataField string) *InnerHit { + hit.source.FieldDataField(fieldDataField) + return hit +} + +func (hit *InnerHit) ScriptFields(scriptFields ...*ScriptField) *InnerHit { + hit.source.ScriptFields(scriptFields...) + return hit +} + +func (hit *InnerHit) ScriptField(scriptField *ScriptField) *InnerHit { + hit.source.ScriptField(scriptField) + return hit +} + +func (hit *InnerHit) Sort(field string, ascending bool) *InnerHit { + hit.source.Sort(field, ascending) + return hit +} + +func (hit *InnerHit) SortWithInfo(info SortInfo) *InnerHit { + hit.source.SortWithInfo(info) + return hit +} + +func (hit *InnerHit) SortBy(sorter ...Sorter) *InnerHit { + hit.source.SortBy(sorter...) + return hit +} + +func (hit *InnerHit) Highlight(highlight *Highlight) *InnerHit { + hit.source.Highlight(highlight) + return hit +} + +func (hit *InnerHit) Highlighter() *Highlight { + return hit.source.Highlighter() +} + +func (hit *InnerHit) Name(name string) *InnerHit { + hit.name = name + return hit +} + +func (hit *InnerHit) Source() (interface{}, error) { + src, err := hit.source.Source() + if err != nil { + return nil, err + } + source, ok := src.(map[string]interface{}) + if !ok { + return nil, nil + } + + // Notice that hit.typ and hit.path are not exported here. + // They are only used with SearchSource and serialized there. + + if hit.name != "" { + source["name"] = hit.name + } + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/inner_hit_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/inner_hit_test.go new file mode 100644 index 000000000..c4a74dafa --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/inner_hit_test.go @@ -0,0 +1,44 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestInnerHitEmpty(t *testing.T) { + hit := NewInnerHit() + src, err := hit.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestInnerHitWithName(t *testing.T) { + hit := NewInnerHit().Name("comments") + src, err := hit.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"name":"comments"}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/logger.go b/services/templeton/vendor/src/github.com/olivere/elastic/logger.go new file mode 100644 index 000000000..0fb16b19f --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/logger.go @@ -0,0 +1,10 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// Logger specifies the interface for all log operations. +type Logger interface { + Printf(format string, v ...interface{}) +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/mget.go b/services/templeton/vendor/src/github.com/olivere/elastic/mget.go new file mode 100644 index 000000000..6cc6b8d22 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/mget.go @@ -0,0 +1,219 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" +) + +// MgetService allows to get multiple documents based on an index, +// type (optional) and id (possibly routing). The response includes +// a docs array with all the fetched documents, each element similar +// in structure to a document provided by the Get API. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html +// for details. +type MgetService struct { + client *Client + pretty bool + preference string + realtime *bool + refresh *bool + items []*MultiGetItem +} + +func NewMgetService(client *Client) *MgetService { + builder := &MgetService{ + client: client, + items: make([]*MultiGetItem, 0), + } + return builder +} + +func (b *MgetService) Preference(preference string) *MgetService { + b.preference = preference + return b +} + +func (b *MgetService) Refresh(refresh bool) *MgetService { + b.refresh = &refresh + return b +} + +func (b *MgetService) Realtime(realtime bool) *MgetService { + b.realtime = &realtime + return b +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *MgetService) Pretty(pretty bool) *MgetService { + s.pretty = pretty + return s +} + +func (b *MgetService) Add(items ...*MultiGetItem) *MgetService { + b.items = append(b.items, items...) + return b +} + +func (b *MgetService) Source() (interface{}, error) { + source := make(map[string]interface{}) + items := make([]interface{}, len(b.items)) + for i, item := range b.items { + src, err := item.Source() + if err != nil { + return nil, err + } + items[i] = src + } + source["docs"] = items + return source, nil +} + +func (b *MgetService) Do() (*MgetResponse, error) { + // Build url + path := "/_mget" + + params := make(url.Values) + if b.realtime != nil { + params.Add("realtime", fmt.Sprintf("%v", *b.realtime)) + } + if b.preference != "" { + params.Add("preference", b.preference) + } + if b.refresh != nil { + params.Add("refresh", fmt.Sprintf("%v", *b.refresh)) + } + + // Set body + body, err := b.Source() + if err != nil { + return nil, err + } + + // Get response + res, err := b.client.PerformRequest("GET", path, params, body) + if err != nil { + return nil, err + } + + // Return result + ret := new(MgetResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Multi Get Item -- + +// MultiGetItem is a single document to retrieve via the MgetService. +type MultiGetItem struct { + index string + typ string + id string + routing string + fields []string + version *int64 // see org.elasticsearch.common.lucene.uid.Versions + versionType string // see org.elasticsearch.index.VersionType + fsc *FetchSourceContext +} + +func NewMultiGetItem() *MultiGetItem { + return &MultiGetItem{} +} + +func (item *MultiGetItem) Index(index string) *MultiGetItem { + item.index = index + return item +} + +func (item *MultiGetItem) Type(typ string) *MultiGetItem { + item.typ = typ + return item +} + +func (item *MultiGetItem) Id(id string) *MultiGetItem { + item.id = id + return item +} + +func (item *MultiGetItem) Routing(routing string) *MultiGetItem { + item.routing = routing + return item +} + +func (item *MultiGetItem) Fields(fields ...string) *MultiGetItem { + if item.fields == nil { + item.fields = make([]string, 0) + } + item.fields = append(item.fields, fields...) + return item +} + +// Version can be MatchAny (-3), MatchAnyPre120 (0), NotFound (-1), +// or NotSet (-2). These are specified in org.elasticsearch.common.lucene.uid.Versions. +// The default in Elasticsearch is MatchAny (-3). +func (item *MultiGetItem) Version(version int64) *MultiGetItem { + item.version = &version + return item +} + +// VersionType can be "internal", "external", "external_gt", "external_gte", +// or "force". See org.elasticsearch.index.VersionType in Elasticsearch source. +// It is "internal" by default. +func (item *MultiGetItem) VersionType(versionType string) *MultiGetItem { + item.versionType = versionType + return item +} + +func (item *MultiGetItem) FetchSource(fetchSourceContext *FetchSourceContext) *MultiGetItem { + item.fsc = fetchSourceContext + return item +} + +// Source returns the serialized JSON to be sent to Elasticsearch as +// part of a MultiGet search. +func (item *MultiGetItem) Source() (interface{}, error) { + source := make(map[string]interface{}) + + source["_id"] = item.id + + if item.index != "" { + source["_index"] = item.index + } + if item.typ != "" { + source["_type"] = item.typ + } + if item.fsc != nil { + src, err := item.fsc.Source() + if err != nil { + return nil, err + } + source["_source"] = src + } + if item.fields != nil { + source["fields"] = item.fields + } + if item.routing != "" { + source["_routing"] = item.routing + } + if item.version != nil { + source["version"] = fmt.Sprintf("%d", *item.version) + } + if item.versionType != "" { + source["version_type"] = item.versionType + } + + return source, nil +} + +// -- Result of a Multi Get request. + +type MgetResponse struct { + Docs []*GetResult `json:"docs,omitempty"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/mget_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/mget_test.go new file mode 100644 index 000000000..da78e3122 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/mget_test.go @@ -0,0 +1,95 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMultiGet(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add some documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Count documents + count, err := client.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if count != 3 { + t.Errorf("expected Count = %d; got %d", 3, count) + } + + // Get documents 1 and 3 + res, err := client.MultiGet(). + Add(NewMultiGetItem().Index(testIndexName).Type("tweet").Id("1")). + Add(NewMultiGetItem().Index(testIndexName).Type("tweet").Id("3")). + Do() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected result to be != nil; got nil") + } + if res.Docs == nil { + t.Fatal("expected result docs to be != nil; got nil") + } + if len(res.Docs) != 2 { + t.Fatalf("expected to have 2 docs; got %d", len(res.Docs)) + } + + item := res.Docs[0] + if item.Error != nil { + t.Errorf("expected no error on item 0; got %v", item.Error) + } + if item.Source == nil { + t.Errorf("expected Source != nil; got %v", item.Source) + } + var doc tweet + if err := json.Unmarshal(*item.Source, &doc); err != nil { + t.Fatalf("expected to unmarshal item Source; got %v", err) + } + if doc.Message != tweet1.Message { + t.Errorf("expected Message of first tweet to be %q; got %q", tweet1.Message, doc.Message) + } + + item = res.Docs[1] + if item.Error != nil { + t.Errorf("expected no error on item 1; got %v", item.Error) + } + if item.Source == nil { + t.Errorf("expected Source != nil; got %v", item.Source) + } + if err := json.Unmarshal(*item.Source, &doc); err != nil { + t.Fatalf("expected to unmarshal item Source; got %v", err) + } + if doc.Message != tweet3.Message { + t.Errorf("expected Message of second tweet to be %q; got %q", tweet3.Message, doc.Message) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/msearch.go b/services/templeton/vendor/src/github.com/olivere/elastic/msearch.go new file mode 100644 index 000000000..2eb2b550e --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/msearch.go @@ -0,0 +1,96 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" +) + +// MultiSearch executes one or more searches in one roundtrip. +// See http://www.elasticsearch.org/guide/reference/api/multi-search/ +type MultiSearchService struct { + client *Client + requests []*SearchRequest + indices []string + pretty bool + routing string + preference string +} + +func NewMultiSearchService(client *Client) *MultiSearchService { + builder := &MultiSearchService{ + client: client, + requests: make([]*SearchRequest, 0), + indices: make([]string, 0), + } + return builder +} + +func (s *MultiSearchService) Add(requests ...*SearchRequest) *MultiSearchService { + s.requests = append(s.requests, requests...) + return s +} + +func (s *MultiSearchService) Index(indices ...string) *MultiSearchService { + s.indices = append(s.indices, indices...) + return s +} + +func (s *MultiSearchService) Pretty(pretty bool) *MultiSearchService { + s.pretty = pretty + return s +} + +func (s *MultiSearchService) Do() (*MultiSearchResult, error) { + // Build url + path := "/_msearch" + + // Parameters + params := make(url.Values) + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + + // Set body + lines := make([]string, 0) + for _, sr := range s.requests { + // Set default indices if not specified in the request + if !sr.HasIndices() && len(s.indices) > 0 { + sr = sr.Index(s.indices...) + } + + header, err := json.Marshal(sr.header()) + if err != nil { + return nil, err + } + body, err := json.Marshal(sr.body()) + if err != nil { + return nil, err + } + lines = append(lines, string(header)) + lines = append(lines, string(body)) + } + body := strings.Join(lines, "\n") + "\n" // Don't forget trailing \n + + // Get response + res, err := s.client.PerformRequest("GET", path, params, body) + if err != nil { + return nil, err + } + + // Return result + ret := new(MultiSearchResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +type MultiSearchResult struct { + Responses []*SearchResult `json:"responses,omitempty"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/msearch_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/msearch_test.go new file mode 100644 index 000000000..332ade2c6 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/msearch_test.go @@ -0,0 +1,197 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + _ "net/http" + "testing" +) + +func TestMultiSearch(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", + Message: "Welcome to Golang and Elasticsearch.", + Tags: []string{"golang", "elasticsearch"}, + } + tweet2 := tweet{ + User: "olivere", + Message: "Another unrelated topic.", + Tags: []string{"golang"}, + } + tweet3 := tweet{ + User: "sandrae", + Message: "Cycling is fun.", + Tags: []string{"sports", "cycling"}, + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Spawn two search queries with one roundtrip + q1 := NewMatchAllQuery() + q2 := NewTermQuery("tags", "golang") + + sreq1 := NewSearchRequest().Index(testIndexName, testIndexName2). + Source(NewSearchSource().Query(q1).Size(10)) + sreq2 := NewSearchRequest().Index(testIndexName).Type("tweet"). + Source(NewSearchSource().Query(q2)) + + searchResult, err := client.MultiSearch(). + Add(sreq1, sreq2). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Responses == nil { + t.Fatal("expected responses != nil; got nil") + } + if len(searchResult.Responses) != 2 { + t.Fatalf("expected 2 responses; got %d", len(searchResult.Responses)) + } + + sres := searchResult.Responses[0] + if sres.Hits == nil { + t.Errorf("expected Hits != nil; got nil") + } + if sres.Hits.TotalHits != 3 { + t.Errorf("expected Hits.TotalHits = %d; got %d", 3, sres.Hits.TotalHits) + } + if len(sres.Hits.Hits) != 3 { + t.Errorf("expected len(Hits.Hits) = %d; got %d", 3, len(sres.Hits.Hits)) + } + for _, hit := range sres.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } + + sres = searchResult.Responses[1] + if sres.Hits == nil { + t.Errorf("expected Hits != nil; got nil") + } + if sres.Hits.TotalHits != 2 { + t.Errorf("expected Hits.TotalHits = %d; got %d", 2, sres.Hits.TotalHits) + } + if len(sres.Hits.Hits) != 2 { + t.Errorf("expected len(Hits.Hits) = %d; got %d", 2, len(sres.Hits.Hits)) + } + for _, hit := range sres.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } +} + +func TestMultiSearchWithOneRequest(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", + Message: "Welcome to Golang and Elasticsearch.", + Tags: []string{"golang", "elasticsearch"}, + } + tweet2 := tweet{ + User: "olivere", + Message: "Another unrelated topic.", + Tags: []string{"golang"}, + } + tweet3 := tweet{ + User: "sandrae", + Message: "Cycling is fun.", + Tags: []string{"sports", "cycling"}, + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Spawn two search queries with one roundtrip + query := NewMatchAllQuery() + source := NewSearchSource().Query(query).Size(10) + sreq := NewSearchRequest().Source(source) + + searchResult, err := client.MultiSearch(). + Index(testIndexName). + Add(sreq). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Responses == nil { + t.Fatal("expected responses != nil; got nil") + } + if len(searchResult.Responses) != 1 { + t.Fatalf("expected 1 responses; got %d", len(searchResult.Responses)) + } + + sres := searchResult.Responses[0] + if sres.Hits == nil { + t.Errorf("expected Hits != nil; got nil") + } + if sres.Hits.TotalHits != 3 { + t.Errorf("expected Hits.TotalHits = %d; got %d", 3, sres.Hits.TotalHits) + } + if len(sres.Hits.Hits) != 3 { + t.Errorf("expected len(Hits.Hits) = %d; got %d", 3, len(sres.Hits.Hits)) + } + for _, hit := range sres.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/nodes_info.go b/services/templeton/vendor/src/github.com/olivere/elastic/nodes_info.go new file mode 100644 index 000000000..8a1c40fa9 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/nodes_info.go @@ -0,0 +1,318 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "log" + "net/url" + "strings" + "time" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +var ( + _ = fmt.Print + _ = log.Print + _ = strings.Index + _ = uritemplates.Expand + _ = url.Parse +) + +// NodesInfoService allows to retrieve one or more or all of the +// cluster nodes information. +// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cluster-nodes-info.html. +type NodesInfoService struct { + client *Client + pretty bool + nodeId []string + metric []string + flatSettings *bool + human *bool +} + +// NewNodesInfoService creates a new NodesInfoService. +func NewNodesInfoService(client *Client) *NodesInfoService { + return &NodesInfoService{ + client: client, + nodeId: []string{"_all"}, + metric: []string{"_all"}, + } +} + +// NodeId is a list of node IDs or names to limit the returned information. +// Use "_local" to return information from the node you're connecting to, +// leave empty to get information from all nodes. +func (s *NodesInfoService) NodeId(nodeId ...string) *NodesInfoService { + s.nodeId = append(s.nodeId, nodeId...) + return s +} + +// Metric is a list of metrics you wish returned. Leave empty to return all. +// Valid metrics are: settings, os, process, jvm, thread_pool, network, +// transport, http, and plugins. +func (s *NodesInfoService) Metric(metric ...string) *NodesInfoService { + s.metric = append(s.metric, metric...) + return s +} + +// FlatSettings returns settings in flat format (default: false). +func (s *NodesInfoService) FlatSettings(flatSettings bool) *NodesInfoService { + s.flatSettings = &flatSettings + return s +} + +// Human indicates whether to return time and byte values in human-readable format. +func (s *NodesInfoService) Human(human bool) *NodesInfoService { + s.human = &human + return s +} + +// Pretty indicates whether to indent the returned JSON. +func (s *NodesInfoService) Pretty(pretty bool) *NodesInfoService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *NodesInfoService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_nodes/{node_id}/{metric}", map[string]string{ + "node_id": strings.Join(s.nodeId, ","), + "metric": strings.Join(s.metric, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.human != nil { + params.Set("human", fmt.Sprintf("%v", *s.human)) + } + if s.pretty { + params.Set("pretty", "1") + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *NodesInfoService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *NodesInfoService) Do() (*NodesInfoResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(NodesInfoResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// NodesInfoResponse is the response of NodesInfoService.Do. +type NodesInfoResponse struct { + ClusterName string `json:"cluster_name"` + Nodes map[string]*NodesInfoNode `json:"nodes"` +} + +type NodesInfoNode struct { + // Name of the node, e.g. "Mister Fear" + Name string `json:"name"` + // TransportAddress, e.g. "127.0.0.1:9300" + TransportAddress string `json:"transport_address"` + // Host is the host name, e.g. "macbookair" + Host string `json:"host"` + // IP is the IP address, e.g. "192.168.1.2" + IP string `json:"ip"` + // Version is the Elasticsearch version running on the node, e.g. "1.4.3" + Version string `json:"version"` + // Build is the Elasticsearch build, e.g. "36a29a7" + Build string `json:"build"` + // HTTPAddress, e.g. "127.0.0.1:9200" + HTTPAddress string `json:"http_address"` + // HTTPSAddress, e.g. "127.0.0.1:9200" + HTTPSAddress string `json:"https_address"` + + // Attributes of the node. + Attributes map[string]interface{} `json:"attributes"` + + // Settings of the node, e.g. paths and pidfile. + Settings map[string]interface{} `json:"settings"` + + // OS information, e.g. CPU and memory. + OS *NodesInfoNodeOS `json:"os"` + + // Process information, e.g. max file descriptors. + Process *NodesInfoNodeProcess `json:"process"` + + // JVM information, e.g. VM version. + JVM *NodesInfoNodeProcess `json:"jvm"` + + // ThreadPool information. + ThreadPool *NodesInfoNodeThreadPool `json:"thread_pool"` + + // Network information. + Network *NodesInfoNodeNetwork `json:"network"` + + // Network information. + Transport *NodesInfoNodeTransport `json:"transport"` + + // HTTP information. + HTTP *NodesInfoNodeHTTP `json:"http"` + + // Plugins information. + Plugins []*NodesInfoNodePlugin `json:"plugins"` +} + +type NodesInfoNodeOS struct { + RefreshInterval string `json:"refresh_interval"` // e.g. 1s + RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000 + AvailableProcessors int `json:"available_processors"` // e.g. 4 + + // CPU information + CPU struct { + Vendor string `json:"vendor"` // e.g. Intel + Model string `json:"model"` // e.g. iMac15,1 + MHz int `json:"mhz"` // e.g. 3500 + TotalCores int `json:"total_cores"` // e.g. 4 + TotalSockets int `json:"total_sockets"` // e.g. 4 + CoresPerSocket int `json:"cores_per_socket"` // e.g. 16 + CacheSizeInBytes int `json:"cache_size_in_bytes"` // e.g. 256 + } `json:"cpu"` + + // Mem information + Mem struct { + Total string `json:"total"` // e.g. 16gb + TotalInBytes int `json:"total_in_bytes"` // e.g. 17179869184 + } `json:"mem"` + + // Swap information + Swap struct { + Total string `json:"total"` // e.g. 1gb + TotalInBytes int `json:"total_in_bytes"` // e.g. 1073741824 + } `json:"swap"` +} + +type NodesInfoNodeProcess struct { + RefreshInterval string `json:"refresh_interval"` // e.g. 1s + RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000 + ID int `json:"id"` // process id, e.g. 87079 + MaxFileDescriptors int `json:"max_file_descriptors"` // e.g. 32768 + Mlockall bool `json:"mlockall"` // e.g. false +} + +type NodesInfoNodeJVM struct { + PID int `json:"pid"` // process id, e.g. 87079 + Version string `json:"version"` // e.g. "1.8.0_25" + VMName string `json:"vm_name"` // e.g. "Java HotSpot(TM) 64-Bit Server VM" + VMVersion string `json:"vm_version"` // e.g. "25.25-b02" + VMVendor string `json:"vm_vendor"` // e.g. "Oracle Corporation" + StartTime time.Time `json:"start_time"` // e.g. "2015-01-03T15:18:30.982Z" + StartTimeInMillis int64 `json:"start_time_in_millis"` + + // Mem information + Mem struct { + HeapInit string `json:"heap_init"` // e.g. 1gb + HeapInitInBytes int `json:"heap_init_in_bytes"` + HeapMax string `json:"heap_max"` // e.g. 4gb + HeapMaxInBytes int `json:"heap_max_in_bytes"` + NonHeapInit string `json:"non_heap_init"` // e.g. 2.4mb + NonHeapInitInBytes int `json:"non_heap_init_in_bytes"` + NonHeapMax string `json:"non_heap_max"` // e.g. 0b + NonHeapMaxInBytes int `json:"non_heap_max_in_bytes"` + DirectMax string `json:"direct_max"` // e.g. 4gb + DirectMaxInBytes int `json:"direct_max_in_bytes"` + } `json:"mem"` + + GCCollectors []string `json:"gc_collectors"` // e.g. ["ParNew"] + MemoryPools []string `json:"memory_pools"` // e.g. ["Code Cache", "Metaspace"] +} + +type NodesInfoNodeThreadPool struct { + Percolate *NodesInfoNodeThreadPoolSection `json:"percolate"` + Bench *NodesInfoNodeThreadPoolSection `json:"bench"` + Listener *NodesInfoNodeThreadPoolSection `json:"listener"` + Index *NodesInfoNodeThreadPoolSection `json:"index"` + Refresh *NodesInfoNodeThreadPoolSection `json:"refresh"` + Suggest *NodesInfoNodeThreadPoolSection `json:"suggest"` + Generic *NodesInfoNodeThreadPoolSection `json:"generic"` + Warmer *NodesInfoNodeThreadPoolSection `json:"warmer"` + Search *NodesInfoNodeThreadPoolSection `json:"search"` + Flush *NodesInfoNodeThreadPoolSection `json:"flush"` + Optimize *NodesInfoNodeThreadPoolSection `json:"optimize"` + Management *NodesInfoNodeThreadPoolSection `json:"management"` + Get *NodesInfoNodeThreadPoolSection `json:"get"` + Merge *NodesInfoNodeThreadPoolSection `json:"merge"` + Bulk *NodesInfoNodeThreadPoolSection `json:"bulk"` + Snapshot *NodesInfoNodeThreadPoolSection `json:"snapshot"` +} + +type NodesInfoNodeThreadPoolSection struct { + Type string `json:"type"` // e.g. fixed + Min int `json:"min"` // e.g. 4 + Max int `json:"max"` // e.g. 4 + KeepAlive string `json:"keep_alive"` // e.g. "5m" + QueueSize interface{} `json:"queue_size"` // e.g. "1k" or -1 +} + +type NodesInfoNodeNetwork struct { + RefreshInterval string `json:"refresh_interval"` // e.g. 1s + RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000 + PrimaryInterface struct { + Address string `json:"address"` // e.g. 192.168.1.2 + Name string `json:"name"` // e.g. en0 + MACAddress string `json:"mac_address"` // e.g. 11:22:33:44:55:66 + } `json:"primary_interface"` +} + +type NodesInfoNodeTransport struct { + BoundAddress []string `json:"bound_address"` + PublishAddress string `json:"publish_address"` + Profiles map[string]*NodesInfoNodeTransportProfile `json:"profiles"` +} + +type NodesInfoNodeTransportProfile struct { + BoundAddress []string `json:"bound_address"` + PublishAddress string `json:"publish_address"` +} + +type NodesInfoNodeHTTP struct { + BoundAddress []string `json:"bound_address"` // e.g. ["127.0.0.1:9200", "[fe80::1]:9200", "[::1]:9200"] + PublishAddress string `json:"publish_address"` // e.g. "127.0.0.1:9300" + MaxContentLength string `json:"max_content_length"` // e.g. "100mb" + MaxContentLengthInBytes int64 `json:"max_content_length_in_bytes"` +} + +type NodesInfoNodePlugin struct { + Name string `json:"name"` + Description string `json:"description"` + Site bool `json:"site"` + JVM bool `json:"jvm"` + URL string `json:"url"` // e.g. /_plugin/dummy/ +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/nodes_info_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/nodes_info_test.go new file mode 100644 index 000000000..0402b2706 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/nodes_info_test.go @@ -0,0 +1,40 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestNodesInfo(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + + info, err := client.NodesInfo().Do() + if err != nil { + t.Fatal(err) + } + if info == nil { + t.Fatal("expected nodes info") + } + + if info.ClusterName == "" { + t.Errorf("expected cluster name; got: %q", info.ClusterName) + } + if len(info.Nodes) == 0 { + t.Errorf("expected some nodes; got: %d", len(info.Nodes)) + } + for id, node := range info.Nodes { + if id == "" { + t.Errorf("expected node id; got: %q", id) + } + if node == nil { + t.Fatalf("expected node info; got: %v", node) + } + if node.IP == "" { + t.Errorf("expected node IP; got: %q", node.IP) + } + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/optimize.go b/services/templeton/vendor/src/github.com/olivere/elastic/optimize.go new file mode 100644 index 000000000..c9107f714 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/optimize.go @@ -0,0 +1,130 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +type OptimizeService struct { + client *Client + indices []string + maxNumSegments *int + onlyExpungeDeletes *bool + flush *bool + waitForMerge *bool + force *bool + pretty bool +} + +func NewOptimizeService(client *Client) *OptimizeService { + builder := &OptimizeService{ + client: client, + indices: make([]string, 0), + } + return builder +} + +func (s *OptimizeService) Index(indices ...string) *OptimizeService { + s.indices = append(s.indices, indices...) + return s +} + +func (s *OptimizeService) MaxNumSegments(maxNumSegments int) *OptimizeService { + s.maxNumSegments = &maxNumSegments + return s +} + +func (s *OptimizeService) OnlyExpungeDeletes(onlyExpungeDeletes bool) *OptimizeService { + s.onlyExpungeDeletes = &onlyExpungeDeletes + return s +} + +func (s *OptimizeService) Flush(flush bool) *OptimizeService { + s.flush = &flush + return s +} + +func (s *OptimizeService) WaitForMerge(waitForMerge bool) *OptimizeService { + s.waitForMerge = &waitForMerge + return s +} + +func (s *OptimizeService) Force(force bool) *OptimizeService { + s.force = &force + return s +} + +func (s *OptimizeService) Pretty(pretty bool) *OptimizeService { + s.pretty = pretty + return s +} + +func (s *OptimizeService) Do() (*OptimizeResult, error) { + // Build url + path := "/" + + // Indices part + indexPart := make([]string, 0) + for _, index := range s.indices { + index, err := uritemplates.Expand("{index}", map[string]string{ + "index": index, + }) + if err != nil { + return nil, err + } + indexPart = append(indexPart, index) + } + if len(indexPart) > 0 { + path += strings.Join(indexPart, ",") + } + + path += "/_optimize" + + // Parameters + params := make(url.Values) + if s.maxNumSegments != nil { + params.Set("max_num_segments", fmt.Sprintf("%d", *s.maxNumSegments)) + } + if s.onlyExpungeDeletes != nil { + params.Set("only_expunge_deletes", fmt.Sprintf("%v", *s.onlyExpungeDeletes)) + } + if s.flush != nil { + params.Set("flush", fmt.Sprintf("%v", *s.flush)) + } + if s.waitForMerge != nil { + params.Set("wait_for_merge", fmt.Sprintf("%v", *s.waitForMerge)) + } + if s.force != nil { + params.Set("force", fmt.Sprintf("%v", *s.force)) + } + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + + // Get response + res, err := s.client.PerformRequest("POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return result + ret := new(OptimizeResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of an optimize request. + +type OptimizeResult struct { + Shards shardsInfo `json:"_shards,omitempty"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/optimize_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/optimize_test.go new file mode 100644 index 000000000..c47de3a94 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/optimize_test.go @@ -0,0 +1,47 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestOptimize(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add some documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Optimize documents + res, err := client.Optimize(testIndexName, testIndexName2).Do() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected result; got nil") + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/percolate.go b/services/templeton/vendor/src/github.com/olivere/elastic/percolate.go new file mode 100644 index 000000000..a2bd14ba2 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/percolate.go @@ -0,0 +1,309 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// PercolateService is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/search-percolate.html. +type PercolateService struct { + client *Client + pretty bool + index string + typ string + id string + version interface{} + versionType string + routing []string + preference string + ignoreUnavailable *bool + percolateIndex string + percolatePreference string + percolateRouting string + source string + allowNoIndices *bool + expandWildcards string + percolateFormat string + percolateType string + bodyJson interface{} + bodyString string +} + +// NewPercolateService creates a new PercolateService. +func NewPercolateService(client *Client) *PercolateService { + return &PercolateService{ + client: client, + routing: make([]string, 0), + } +} + +// Index is the name of the index of the document being percolated. +func (s *PercolateService) Index(index string) *PercolateService { + s.index = index + return s +} + +// Type is the type of the document being percolated. +func (s *PercolateService) Type(typ string) *PercolateService { + s.typ = typ + return s +} + +// Id is to substitute the document in the request body with a +// document that is known by the specified id. On top of the id, +// the index and type parameter will be used to retrieve +// the document from within the cluster. +func (s *PercolateService) Id(id string) *PercolateService { + s.id = id + return s +} + +// ExpandWildcards indicates whether to expand wildcard expressions +// to concrete indices that are open, closed or both. +func (s *PercolateService) ExpandWildcards(expandWildcards string) *PercolateService { + s.expandWildcards = expandWildcards + return s +} + +// PercolateFormat indicates whether to return an array of matching +// query IDs instead of objects. +func (s *PercolateService) PercolateFormat(percolateFormat string) *PercolateService { + s.percolateFormat = percolateFormat + return s +} + +// PercolateType is the type to percolate document into. Defaults to type. +func (s *PercolateService) PercolateType(percolateType string) *PercolateService { + s.percolateType = percolateType + return s +} + +// PercolateRouting is the routing value to use when percolating +// the existing document. +func (s *PercolateService) PercolateRouting(percolateRouting string) *PercolateService { + s.percolateRouting = percolateRouting + return s +} + +// Source is the URL-encoded request definition. +func (s *PercolateService) Source(source string) *PercolateService { + s.source = source + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *PercolateService) AllowNoIndices(allowNoIndices bool) *PercolateService { + s.allowNoIndices = &allowNoIndices + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should +// be ignored when unavailable (missing or closed). +func (s *PercolateService) IgnoreUnavailable(ignoreUnavailable bool) *PercolateService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// PercolateIndex is the index to percolate the document into. Defaults to index. +func (s *PercolateService) PercolateIndex(percolateIndex string) *PercolateService { + s.percolateIndex = percolateIndex + return s +} + +// PercolatePreference defines which shard to prefer when executing +// the percolate request. +func (s *PercolateService) PercolatePreference(percolatePreference string) *PercolateService { + s.percolatePreference = percolatePreference + return s +} + +// Version is an explicit version number for concurrency control. +func (s *PercolateService) Version(version interface{}) *PercolateService { + s.version = version + return s +} + +// VersionType is the specific version type. +func (s *PercolateService) VersionType(versionType string) *PercolateService { + s.versionType = versionType + return s +} + +// Routing is a list of specific routing values. +func (s *PercolateService) Routing(routing []string) *PercolateService { + s.routing = routing + return s +} + +// Preference specifies the node or shard the operation should be +// performed on (default: random). +func (s *PercolateService) Preference(preference string) *PercolateService { + s.preference = preference + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *PercolateService) Pretty(pretty bool) *PercolateService { + s.pretty = pretty + return s +} + +// Doc wraps the given document into the "doc" key of the body. +func (s *PercolateService) Doc(doc interface{}) *PercolateService { + return s.BodyJson(map[string]interface{}{"doc": doc}) +} + +// BodyJson is the percolator request definition using the percolate DSL. +func (s *PercolateService) BodyJson(body interface{}) *PercolateService { + s.bodyJson = body + return s +} + +// BodyString is the percolator request definition using the percolate DSL. +func (s *PercolateService) BodyString(body string) *PercolateService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *PercolateService) buildURL() (string, url.Values, error) { + // Build URL + var path string + var err error + if s.id == "" { + path, err = uritemplates.Expand("/{index}/{type}/_percolate", map[string]string{ + "index": s.index, + "type": s.typ, + }) + } else { + path, err = uritemplates.Expand("/{index}/{type}/{id}/_percolate", map[string]string{ + "index": s.index, + "type": s.typ, + "id": s.id, + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + if len(s.routing) > 0 { + params.Set("routing", strings.Join(s.routing, ",")) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.percolateIndex != "" { + params.Set("percolate_index", s.percolateIndex) + } + if s.percolatePreference != "" { + params.Set("percolate_preference", s.percolatePreference) + } + if s.percolateRouting != "" { + params.Set("percolate_routing", s.percolateRouting) + } + if s.source != "" { + params.Set("source", s.source) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.percolateFormat != "" { + params.Set("percolate_format", s.percolateFormat) + } + if s.percolateType != "" { + params.Set("percolate_type", s.percolateType) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *PercolateService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *PercolateService) Do() (*PercolateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(PercolateResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// PercolateResponse is the response of PercolateService.Do. +type PercolateResponse struct { + TookInMillis int64 `json:"took"` // search time in milliseconds + Total int64 `json:"total"` // total matches + Matches []*PercolateMatch `json:"matches,omitempty"` + Aggregations Aggregations `json:"aggregations,omitempty"` // results from aggregations +} + +// PercolateMatch returns a single match in a PercolateResponse. +type PercolateMatch struct { + Index string `json:"_index,omitempty"` + Id string `json:"_id"` + Score float64 `json:"_score,omitempty"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/percolate_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/percolate_test.go new file mode 100644 index 000000000..07b36fef7 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/percolate_test.go @@ -0,0 +1,92 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestPercolate(t *testing.T) { + client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + + // Add a document + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + // Register a query in the ".percolator" type. + search := NewSearchSource().Query(NewMatchQuery("message", "Golang")) + searchSrc, err := search.Source() + if err != nil { + t.Fatal(err) + } + _, err = client.Index(). + Index(testIndexName).Type(".percolator").Id("1"). + BodyJson(searchSrc). + Do() + if err != nil { + t.Fatal(err) + } + + // Percolate should return our registered query + newTweet := tweet{User: "olivere", Message: "Golang is fun."} + res, err := client.Percolate(). + Index(testIndexName).Type("tweet"). + Doc(newTweet). // shortcut for: BodyJson(map[string]interface{}{"doc": newTweet}). + Pretty(true). + Do() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Errorf("expected results != nil; got nil") + } + if res.Total != 1 { + t.Fatalf("expected 1 result; got: %d", res.Total) + } + if res.Matches == nil { + t.Fatalf("expected Matches; got: %v", res.Matches) + } + matches := res.Matches + if matches == nil { + t.Fatalf("expected matches as map; got: %v", matches) + } + if len(matches) != 1 { + t.Fatalf("expected %d registered matches; got: %d", 1, len(matches)) + } + if matches[0].Id != "1" { + t.Errorf("expected to return query %q; got: %q", "1", matches[0].Id) + } + + // Percolating an existsing document should return our registered query + res, err = client.Percolate(). + Index(testIndexName).Type("tweet"). + Id("1"). + Pretty(true). + Do() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Errorf("expected results != nil; got nil") + } + if res.Total != 1 { + t.Fatalf("expected 1 result; got: %d", res.Total) + } + if res.Matches == nil { + t.Fatalf("expected Matches; got: %v", res.Matches) + } + matches = res.Matches + if matches == nil { + t.Fatalf("expected matches as map; got: %v", matches) + } + if len(matches) != 1 { + t.Fatalf("expected %d registered matches; got: %d", 1, len(matches)) + } + if matches[0].Id != "1" { + t.Errorf("expected to return query %q; got: %q", "1", matches[0].Id) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/ping.go b/services/templeton/vendor/src/github.com/olivere/elastic/ping.go new file mode 100644 index 000000000..fada22817 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/ping.go @@ -0,0 +1,126 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "net/http" + "net/url" +) + +// PingService checks if an Elasticsearch server on a given URL is alive. +// When asked for, it can also return various information about the +// Elasticsearch server, e.g. the Elasticsearch version number. +// +// Ping simply starts a HTTP GET request to the URL of the server. +// If the server responds with HTTP Status code 200 OK, the server is alive. +type PingService struct { + client *Client + url string + timeout string + httpHeadOnly bool + pretty bool +} + +// PingResult is the result returned from querying the Elasticsearch server. +type PingResult struct { + Name string `json:"name"` + ClusterName string `json:"cluster_name"` + Version struct { + Number string `json:"number"` + BuildHash string `json:"build_hash"` + BuildTimestamp string `json:"build_timestamp"` + BuildSnapshot bool `json:"build_snapshot"` + LuceneVersion string `json:"lucene_version"` + } `json:"version"` + TagLine string `json:"tagline"` +} + +func NewPingService(client *Client) *PingService { + return &PingService{ + client: client, + url: DefaultURL, + httpHeadOnly: false, + pretty: false, + } +} + +func (s *PingService) URL(url string) *PingService { + s.url = url + return s +} + +func (s *PingService) Timeout(timeout string) *PingService { + s.timeout = timeout + return s +} + +// HeadOnly makes the service to only return the status code in Do; +// the PingResult will be nil. +func (s *PingService) HttpHeadOnly(httpHeadOnly bool) *PingService { + s.httpHeadOnly = httpHeadOnly + return s +} + +func (s *PingService) Pretty(pretty bool) *PingService { + s.pretty = pretty + return s +} + +// Do returns the PingResult, the HTTP status code of the Elasticsearch +// server, and an error. +func (s *PingService) Do() (*PingResult, int, error) { + s.client.mu.RLock() + basicAuth := s.client.basicAuth + basicAuthUsername := s.client.basicAuthUsername + basicAuthPassword := s.client.basicAuthPassword + s.client.mu.RUnlock() + + url_ := s.url + "/" + + params := make(url.Values) + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.pretty { + params.Set("pretty", "1") + } + if len(params) > 0 { + url_ += "?" + params.Encode() + } + + var method string + if s.httpHeadOnly { + method = "HEAD" + } else { + method = "GET" + } + + // Notice: This service must NOT use PerformRequest! + req, err := NewRequest(method, url_) + if err != nil { + return nil, 0, err + } + + if basicAuth { + req.SetBasicAuth(basicAuthUsername, basicAuthPassword) + } + + res, err := s.client.c.Do((*http.Request)(req)) + if err != nil { + return nil, 0, err + } + defer res.Body.Close() + + var ret *PingResult + if !s.httpHeadOnly { + ret = new(PingResult) + if err := json.NewDecoder(res.Body).Decode(ret); err != nil { + return nil, res.StatusCode, err + } + } + + return ret, res.StatusCode, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/ping_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/ping_test.go new file mode 100644 index 000000000..9891c2025 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/ping_test.go @@ -0,0 +1,64 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "net/http" + "testing" +) + +func TestPingGet(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + res, code, err := client.Ping(DefaultURL).Do() + if err != nil { + t.Fatal(err) + } + if code != http.StatusOK { + t.Errorf("expected status code = %d; got %d", http.StatusOK, code) + } + if res == nil { + t.Fatalf("expected to return result, got: %v", res) + } + if res.Name == "" { + t.Errorf("expected Name != \"\"; got %q", res.Name) + } + if res.Version.Number == "" { + t.Errorf("expected Version.Number != \"\"; got %q", res.Version.Number) + } +} + +func TestPingHead(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + res, code, err := client.Ping(DefaultURL).HttpHeadOnly(true).Do() + if err != nil { + t.Fatal(err) + } + if code != http.StatusOK { + t.Errorf("expected status code = %d; got %d", http.StatusOK, code) + } + if res != nil { + t.Errorf("expected not to return result, got: %v", res) + } +} + +func TestPingHeadFailure(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + res, code, err := client. + Ping("http://127.0.0.1:9299"). + HttpHeadOnly(true). + Do() + if err == nil { + t.Error("expected error, got nil") + } + if code == http.StatusOK { + t.Errorf("expected status code != %d; got %d", http.StatusOK, code) + } + if res != nil { + t.Errorf("expected not to return result, got: %v", res) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/plugins.go b/services/templeton/vendor/src/github.com/olivere/elastic/plugins.go new file mode 100644 index 000000000..3906d74d7 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/plugins.go @@ -0,0 +1,38 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// HasPlugin indicates whether the cluster has the named plugin. +func (c *Client) HasPlugin(name string) (bool, error) { + plugins, err := c.Plugins() + if err != nil { + return false, nil + } + for _, plugin := range plugins { + if plugin == name { + return true, nil + } + } + return false, nil +} + +// Plugins returns the list of all registered plugins. +func (c *Client) Plugins() ([]string, error) { + stats, err := c.ClusterStats().Do() + if err != nil { + return nil, err + } + if stats == nil { + return nil, err + } + if stats.Nodes == nil { + return nil, err + } + var plugins []string + for _, plugin := range stats.Nodes.Plugins { + plugins = append(plugins, plugin.Name) + } + return plugins, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/plugins_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/plugins_test.go new file mode 100644 index 000000000..112b80943 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/plugins_test.go @@ -0,0 +1,32 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestClientPlugins(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + _, err = client.Plugins() + if err != nil { + t.Fatal(err) + } +} + +func TestClientHasPlugin(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + found, err := client.HasPlugin("no-such-plugin") + if err != nil { + t.Fatal(err) + } + if found { + t.Fatalf("expected to not find plugin %q", "no-such-plugin") + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/query.go b/services/templeton/vendor/src/github.com/olivere/elastic/query.go new file mode 100644 index 000000000..0869eaecc --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/query.go @@ -0,0 +1,13 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// Query represents the generic query interface. A query's sole purpose +// is to return the source of the query as a JSON-serializable object. +// Returning map[string]interface{} is the norm for queries. +type Query interface { + // Source returns the JSON-serializable query request. + Source() (interface{}, error) +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/reindexer.go b/services/templeton/vendor/src/github.com/olivere/elastic/reindexer.go new file mode 100644 index 000000000..7193a1337 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/reindexer.go @@ -0,0 +1,270 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "errors" +) + +// Reindexer simplifies the process of reindexing an index. You typically +// reindex a source index to a target index. However, you can also specify +// a query that filters out documents from the source index before bulk +// indexing them into the target index. The caller may also specify a +// different client for the target, e.g. when copying indices from one +// Elasticsearch cluster to another. +// +// Internally, the Reindex users a scan and scroll operation on the source +// index and bulk indexing to push data into the target index. +// +// By default the reindexer fetches the _source, _parent, and _routing +// attributes from the source index, using the provided CopyToTargetIndex +// will copy those attributes into the destinationIndex. +// This behaviour can be overridden by setting the ScanFields and providing a +// custom ReindexerFunc. +// +// The caller is responsible for setting up and/or clearing the target index +// before starting the reindex process. +// +// See http://www.elastic.co/guide/en/elasticsearch/guide/current/reindex.html +// for more information about reindexing. +type Reindexer struct { + sourceClient, targetClient *Client + sourceIndex string + query Query + scanFields []string + bulkSize int + size int + scroll string + reindexerFunc ReindexerFunc + progress ReindexerProgressFunc + statsOnly bool +} + +// A ReindexerFunc receives each hit from the sourceIndex. +// It can choose to add any number of BulkableRequests to the bulkService. +type ReindexerFunc func(hit *SearchHit, bulkService *BulkService) error + +// CopyToTargetIndex returns a ReindexerFunc that copies the SearchHit's +// _source, _parent, and _routing attributes into the targetIndex +func CopyToTargetIndex(targetIndex string) ReindexerFunc { + return func(hit *SearchHit, bulkService *BulkService) error { + // TODO(oe) Do we need to deserialize here? + source := make(map[string]interface{}) + if err := json.Unmarshal(*hit.Source, &source); err != nil { + return err + } + req := NewBulkIndexRequest().Index(targetIndex).Type(hit.Type).Id(hit.Id).Doc(source) + if hit.Parent != "" { + req = req.Parent(hit.Parent) + } + if hit.Routing != "" { + req = req.Routing(hit.Routing) + } + bulkService.Add(req) + return nil + } +} + +// ReindexerProgressFunc is a callback that can be used with Reindexer +// to report progress while reindexing data. +type ReindexerProgressFunc func(current, total int64) + +// ReindexerResponse is returned from the Do func in a Reindexer. +// By default, it returns the number of succeeded and failed bulk operations. +// To return details about all failed items, set StatsOnly to false in +// Reindexer. +type ReindexerResponse struct { + Success int64 + Failed int64 + Errors []*BulkResponseItem +} + +// NewReindexer returns a new Reindexer. +func NewReindexer(client *Client, source string, reindexerFunc ReindexerFunc) *Reindexer { + return &Reindexer{ + sourceClient: client, + sourceIndex: source, + reindexerFunc: reindexerFunc, + statsOnly: true, + } +} + +// TargetClient specifies a different client for the target. This is +// necessary when the target index is in a different Elasticsearch cluster. +// By default, the source and target clients are the same. +func (ix *Reindexer) TargetClient(c *Client) *Reindexer { + ix.targetClient = c + return ix +} + +// Query specifies the query to apply to the source. It filters out those +// documents to be indexed into target. A nil query does not filter out any +// documents. +func (ix *Reindexer) Query(q Query) *Reindexer { + ix.query = q + return ix +} + +// ScanFields specifies the fields the scan query should load. +// The default fields are _source, _parent, _routing. +func (ix *Reindexer) ScanFields(scanFields ...string) *Reindexer { + ix.scanFields = scanFields + return ix +} + +// BulkSize returns the number of documents to send to Elasticsearch per chunk. +// The default is 500. +func (ix *Reindexer) BulkSize(bulkSize int) *Reindexer { + ix.bulkSize = bulkSize + return ix +} + +// Size is the number of results to return per shard, not per request. +// So a size of 10 which hits 5 shards will return a maximum of 50 results +// per scan request. +func (ix *Reindexer) Size(size int) *Reindexer { + ix.size = size + return ix +} + +// Scroll specifies for how long the scroll operation on the source index +// should be maintained. The default is 5m. +func (ix *Reindexer) Scroll(timeout string) *Reindexer { + ix.scroll = timeout + return ix +} + +// Progress indicates a callback that will be called while indexing. +func (ix *Reindexer) Progress(f ReindexerProgressFunc) *Reindexer { + ix.progress = f + return ix +} + +// StatsOnly indicates whether the Do method should return details e.g. about +// the documents that failed while indexing. It is true by default, i.e. only +// the number of documents that succeeded/failed are returned. Set to false +// if you want all the details. +func (ix *Reindexer) StatsOnly(statsOnly bool) *Reindexer { + ix.statsOnly = statsOnly + return ix +} + +// Do starts the reindexing process. +func (ix *Reindexer) Do() (*ReindexerResponse, error) { + if ix.sourceClient == nil { + return nil, errors.New("no source client") + } + if ix.sourceIndex == "" { + return nil, errors.New("no source index") + } + if ix.targetClient == nil { + ix.targetClient = ix.sourceClient + } + if ix.scanFields == nil { + ix.scanFields = []string{"_source", "_parent", "_routing"} + } + if ix.bulkSize <= 0 { + ix.bulkSize = 500 + } + if ix.scroll == "" { + ix.scroll = "5m" + } + + // Count total to report progress (if necessary) + var err error + var current, total int64 + if ix.progress != nil { + total, err = ix.count() + if err != nil { + return nil, err + } + } + + // Prepare scan and scroll to iterate through the source index + scanner := ix.sourceClient.Scan(ix.sourceIndex).Scroll(ix.scroll).Fields(ix.scanFields...) + if ix.query != nil { + scanner = scanner.Query(ix.query) + } + if ix.size > 0 { + scanner = scanner.Size(ix.size) + } + cursor, err := scanner.Do() + + bulk := ix.targetClient.Bulk() + + ret := &ReindexerResponse{ + Errors: make([]*BulkResponseItem, 0), + } + + // Main loop iterates through the source index and bulk indexes into target. + for { + docs, err := cursor.Next() + if err == EOS { + break + } + if err != nil { + return ret, err + } + + if docs.TotalHits() > 0 { + for _, hit := range docs.Hits.Hits { + if ix.progress != nil { + current++ + ix.progress(current, total) + } + + err := ix.reindexerFunc(hit, bulk) + if err != nil { + return ret, err + } + + if bulk.NumberOfActions() >= ix.bulkSize { + bulk, err = ix.commit(bulk, ret) + if err != nil { + return ret, err + } + } + } + } + } + + // Final flush + if bulk.NumberOfActions() > 0 { + bulk, err = ix.commit(bulk, ret) + if err != nil { + return ret, err + } + bulk = nil + } + + return ret, nil +} + +// count returns the number of documents in the source index. +// The query is taken into account, if specified. +func (ix *Reindexer) count() (int64, error) { + service := ix.sourceClient.Count(ix.sourceIndex) + if ix.query != nil { + service = service.Query(ix.query) + } + return service.Do() +} + +// commit commits a bulk, updates the stats, and returns a fresh bulk service. +func (ix *Reindexer) commit(bulk *BulkService, ret *ReindexerResponse) (*BulkService, error) { + bres, err := bulk.Do() + if err != nil { + return nil, err + } + ret.Success += int64(len(bres.Succeeded())) + failed := bres.Failed() + ret.Failed += int64(len(failed)) + if !ix.statsOnly { + ret.Errors = append(ret.Errors, failed...) + } + bulk = ix.targetClient.Bulk() + return bulk, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/reindexer_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/reindexer_test.go new file mode 100644 index 000000000..a21dff5c5 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/reindexer_test.go @@ -0,0 +1,285 @@ +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestReindexer(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) + + sourceCount, err := client.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if sourceCount <= 0 { + t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount) + } + + targetCount, err := client.Count(testIndexName2).Do() + if err != nil { + t.Fatal(err) + } + if targetCount != 0 { + t.Fatalf("expected %d documents; got: %d", 0, targetCount) + } + + r := NewReindexer(client, testIndexName, CopyToTargetIndex(testIndexName2)) + ret, err := r.Do() + if err != nil { + t.Fatal(err) + } + if ret == nil { + t.Fatalf("expected result != %v; got: %v", nil, ret) + } + if ret.Success != sourceCount { + t.Errorf("expected success = %d; got: %d", sourceCount, ret.Success) + } + if ret.Failed != 0 { + t.Errorf("expected failed = %d; got: %d", 0, ret.Failed) + } + if len(ret.Errors) != 0 { + t.Errorf("expected to return no errors by default; got: %v", ret.Errors) + } + + if _, err := client.Flush().Index(testIndexName2).Do(); err != nil { + t.Fatal(err) + } + + targetCount, err = client.Count(testIndexName2).Do() + if err != nil { + t.Fatal(err) + } + if targetCount != sourceCount { + t.Fatalf("expected %d documents; got: %d", sourceCount, targetCount) + } +} + +func TestReindexerWithQuery(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) + + q := NewTermQuery("user", "olivere") + + sourceCount, err := client.Count(testIndexName).Query(q).Do() + if err != nil { + t.Fatal(err) + } + if sourceCount <= 0 { + t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount) + } + + targetCount, err := client.Count(testIndexName2).Do() + if err != nil { + t.Fatal(err) + } + if targetCount != 0 { + t.Fatalf("expected %d documents; got: %d", 0, targetCount) + } + + r := NewReindexer(client, testIndexName, CopyToTargetIndex(testIndexName2)) + r = r.Query(q) + ret, err := r.Do() + if err != nil { + t.Fatal(err) + } + if ret == nil { + t.Fatalf("expected result != %v; got: %v", nil, ret) + } + if ret.Success != sourceCount { + t.Errorf("expected success = %d; got: %d", sourceCount, ret.Success) + } + if ret.Failed != 0 { + t.Errorf("expected failed = %d; got: %d", 0, ret.Failed) + } + if len(ret.Errors) != 0 { + t.Errorf("expected to return no errors by default; got: %v", ret.Errors) + } + + if _, err := client.Flush().Index(testIndexName2).Do(); err != nil { + t.Fatal(err) + } + + targetCount, err = client.Count(testIndexName2).Do() + if err != nil { + t.Fatal(err) + } + if targetCount != sourceCount { + t.Fatalf("expected %d documents; got: %d", sourceCount, targetCount) + } +} + +func TestReindexerProgress(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) + + sourceCount, err := client.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if sourceCount <= 0 { + t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount) + } + + var calls int64 + totalsOk := true + progress := func(current, total int64) { + calls += 1 + totalsOk = totalsOk && total == sourceCount + } + + r := NewReindexer(client, testIndexName, CopyToTargetIndex(testIndexName2)) + r = r.Progress(progress) + ret, err := r.Do() + if err != nil { + t.Fatal(err) + } + if ret == nil { + t.Fatalf("expected result != %v; got: %v", nil, ret) + } + if ret.Success != sourceCount { + t.Errorf("expected success = %d; got: %d", sourceCount, ret.Success) + } + if ret.Failed != 0 { + t.Errorf("expected failed = %d; got: %d", 0, ret.Failed) + } + if len(ret.Errors) != 0 { + t.Errorf("expected to return no errors by default; got: %v", ret.Errors) + } + + if calls != sourceCount { + t.Errorf("expected progress to be called %d times; got: %d", sourceCount, calls) + } + if !totalsOk { + t.Errorf("expected totals in progress to be %d", sourceCount) + } +} + +func TestReindexerWithTargetClient(t *testing.T) { + sourceClient := setupTestClientAndCreateIndexAndAddDocs(t) + targetClient, err := NewClient() + if err != nil { + t.Fatal(err) + } + + sourceCount, err := sourceClient.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if sourceCount <= 0 { + t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount) + } + + targetCount, err := targetClient.Count(testIndexName2).Do() + if err != nil { + t.Fatal(err) + } + if targetCount != 0 { + t.Fatalf("expected %d documents; got: %d", 0, targetCount) + } + + r := NewReindexer(sourceClient, testIndexName, CopyToTargetIndex(testIndexName2)) + r = r.TargetClient(targetClient) + ret, err := r.Do() + if err != nil { + t.Fatal(err) + } + if ret == nil { + t.Fatalf("expected result != %v; got: %v", nil, ret) + } + if ret.Success != sourceCount { + t.Errorf("expected success = %d; got: %d", sourceCount, ret.Success) + } + if ret.Failed != 0 { + t.Errorf("expected failed = %d; got: %d", 0, ret.Failed) + } + if len(ret.Errors) != 0 { + t.Errorf("expected to return no errors by default; got: %v", ret.Errors) + } + + if _, err := targetClient.Flush().Index(testIndexName2).Do(); err != nil { + t.Fatal(err) + } + + targetCount, err = targetClient.Count(testIndexName2).Do() + if err != nil { + t.Fatal(err) + } + if targetCount != sourceCount { + t.Fatalf("expected %d documents; got: %d", sourceCount, targetCount) + } +} + +// TestReindexerPreservingTTL shows how a caller can take control of the +// copying process by providing ScanFields and a custom ReindexerFunc. +func TestReindexerPreservingTTL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").TTL("999999s").Version(10).VersionType("external").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + sourceCount, err := client.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if sourceCount <= 0 { + t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount) + } + + targetCount, err := client.Count(testIndexName2).Do() + if err != nil { + t.Fatal(err) + } + if targetCount != 0 { + t.Fatalf("expected %d documents; got: %d", 0, targetCount) + } + + // Carries over the source item's ttl to the reindexed item + copyWithTTL := func(hit *SearchHit, bulkService *BulkService) error { + source := make(map[string]interface{}) + if err := json.Unmarshal(*hit.Source, &source); err != nil { + return err + } + req := NewBulkIndexRequest().Index(testIndexName2).Type(hit.Type).Id(hit.Id).Doc(source) + if hit.TTL > 0 { + req = req.Ttl(hit.TTL) + } + bulkService.Add(req) + return nil + } + + r := NewReindexer(client, testIndexName, copyWithTTL).ScanFields("_source", "_ttl") + + ret, err := r.Do() + if err != nil { + t.Fatal(err) + } + if ret == nil { + t.Fatalf("expected result != %v; got: %v", nil, ret) + } + if ret.Success != sourceCount { + t.Errorf("expected success = %d; got: %d", sourceCount, ret.Success) + } + if ret.Failed != 0 { + t.Errorf("expected failed = %d; got: %d", 0, ret.Failed) + } + if len(ret.Errors) != 0 { + t.Errorf("expected to return no errors by default; got: %v", ret.Errors) + } + + getResult, err := client.Get().Index(testIndexName2).Id("1").Fields("_source", "_ttl").Do() + if err != nil { + t.Fatal(err) + } + + if getResult.TTL <= 0 { + t.Errorf("expected TTL field in reindexed document") + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/request.go b/services/templeton/vendor/src/github.com/olivere/elastic/request.go new file mode 100644 index 000000000..1347e1b6f --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/request.go @@ -0,0 +1,123 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "bytes" + "compress/gzip" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "runtime" + "strings" +) + +// Elasticsearch-specific HTTP request +type Request http.Request + +// NewRequest is a http.Request and adds features such as encoding the body. +func NewRequest(method, url string) (*Request, error) { + req, err := http.NewRequest(method, url, nil) + if err != nil { + return nil, err + } + req.Header.Add("User-Agent", "elastic/"+Version+" ("+runtime.GOOS+"-"+runtime.GOARCH+")") + req.Header.Add("Accept", "application/json") + return (*Request)(req), nil +} + +// SetBasicAuth wraps http.Request's SetBasicAuth. +func (r *Request) SetBasicAuth(username, password string) { + ((*http.Request)(r)).SetBasicAuth(username, password) +} + +// SetBody encodes the body in the request. Optionally, it performs GZIP compression. +func (r *Request) SetBody(body interface{}, gzipCompress bool) error { + switch b := body.(type) { + case string: + if gzipCompress { + return r.setBodyGzip(b) + } else { + return r.setBodyString(b) + } + default: + if gzipCompress { + return r.setBodyGzip(body) + } else { + return r.setBodyJson(body) + } + } +} + +// setBodyJson encodes the body as a struct to be marshaled via json.Marshal. +func (r *Request) setBodyJson(data interface{}) error { + body, err := json.Marshal(data) + if err != nil { + return err + } + r.Header.Set("Content-Type", "application/json") + r.setBodyReader(bytes.NewReader(body)) + return nil +} + +// setBodyString encodes the body as a string. +func (r *Request) setBodyString(body string) error { + return r.setBodyReader(strings.NewReader(body)) +} + +// setBodyGzip gzip's the body. It accepts both strings and structs as body. +// The latter will be encoded via json.Marshal. +func (r *Request) setBodyGzip(body interface{}) error { + switch b := body.(type) { + case string: + buf := new(bytes.Buffer) + w := gzip.NewWriter(buf) + if _, err := w.Write([]byte(b)); err != nil { + return err + } + if err := w.Close(); err != nil { + return err + } + r.Header.Add("Content-Encoding", "gzip") + r.Header.Add("Vary", "Accept-Encoding") + return r.setBodyReader(bytes.NewReader(buf.Bytes())) + default: + data, err := json.Marshal(b) + if err != nil { + return err + } + buf := new(bytes.Buffer) + w := gzip.NewWriter(buf) + if _, err := w.Write(data); err != nil { + return err + } + if err := w.Close(); err != nil { + return err + } + r.Header.Add("Content-Encoding", "gzip") + r.Header.Add("Vary", "Accept-Encoding") + r.Header.Set("Content-Type", "application/json") + return r.setBodyReader(bytes.NewReader(buf.Bytes())) + } +} + +// setBodyReader writes the body from an io.Reader. +func (r *Request) setBodyReader(body io.Reader) error { + rc, ok := body.(io.ReadCloser) + if !ok && body != nil { + rc = ioutil.NopCloser(body) + } + r.Body = rc + if body != nil { + switch v := body.(type) { + case *strings.Reader: + r.ContentLength = int64(v.Len()) + case *bytes.Buffer: + r.ContentLength = int64(v.Len()) + } + } + return nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/rescore.go b/services/templeton/vendor/src/github.com/olivere/elastic/rescore.go new file mode 100644 index 000000000..0cbc06710 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/rescore.go @@ -0,0 +1,44 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +type Rescore struct { + rescorer Rescorer + windowSize *int + defaultRescoreWindowSize *int +} + +func NewRescore() *Rescore { + return &Rescore{} +} + +func (r *Rescore) WindowSize(windowSize int) *Rescore { + r.windowSize = &windowSize + return r +} + +func (r *Rescore) IsEmpty() bool { + return r.rescorer == nil +} + +func (r *Rescore) Rescorer(rescorer Rescorer) *Rescore { + r.rescorer = rescorer + return r +} + +func (r *Rescore) Source() (interface{}, error) { + source := make(map[string]interface{}) + if r.windowSize != nil { + source["window_size"] = *r.windowSize + } else if r.defaultRescoreWindowSize != nil { + source["window_size"] = *r.defaultRescoreWindowSize + } + rescorerSrc, err := r.rescorer.Source() + if err != nil { + return nil, err + } + source[r.rescorer.Name()] = rescorerSrc + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/rescorer.go b/services/templeton/vendor/src/github.com/olivere/elastic/rescorer.go new file mode 100644 index 000000000..28ad59cbb --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/rescorer.go @@ -0,0 +1,64 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +type Rescorer interface { + Name() string + Source() (interface{}, error) +} + +// -- Query Rescorer -- + +type QueryRescorer struct { + query Query + rescoreQueryWeight *float64 + queryWeight *float64 + scoreMode string +} + +func NewQueryRescorer(query Query) *QueryRescorer { + return &QueryRescorer{ + query: query, + } +} + +func (r *QueryRescorer) Name() string { + return "query" +} + +func (r *QueryRescorer) RescoreQueryWeight(rescoreQueryWeight float64) *QueryRescorer { + r.rescoreQueryWeight = &rescoreQueryWeight + return r +} + +func (r *QueryRescorer) QueryWeight(queryWeight float64) *QueryRescorer { + r.queryWeight = &queryWeight + return r +} + +func (r *QueryRescorer) ScoreMode(scoreMode string) *QueryRescorer { + r.scoreMode = scoreMode + return r +} + +func (r *QueryRescorer) Source() (interface{}, error) { + rescoreQuery, err := r.query.Source() + if err != nil { + return nil, err + } + + source := make(map[string]interface{}) + source["rescore_query"] = rescoreQuery + if r.queryWeight != nil { + source["query_weight"] = *r.queryWeight + } + if r.rescoreQueryWeight != nil { + source["rescore_query_weight"] = *r.rescoreQueryWeight + } + if r.scoreMode != "" { + source["score_mode"] = r.scoreMode + } + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/response.go b/services/templeton/vendor/src/github.com/olivere/elastic/response.go new file mode 100644 index 000000000..9426c23af --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/response.go @@ -0,0 +1,43 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "io/ioutil" + "net/http" +) + +// Response represents a response from Elasticsearch. +type Response struct { + // StatusCode is the HTTP status code, e.g. 200. + StatusCode int + // Header is the HTTP header from the HTTP response. + // Keys in the map are canonicalized (see http.CanonicalHeaderKey). + Header http.Header + // Body is the deserialized response body. + Body json.RawMessage +} + +// newResponse creates a new response from the HTTP response. +func (c *Client) newResponse(res *http.Response) (*Response, error) { + r := &Response{ + StatusCode: res.StatusCode, + Header: res.Header, + } + if res.Body != nil { + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + // HEAD requests return a body but no content + if len(slurp) > 0 { + if err := c.decoder.Decode(slurp, &r.Body); err != nil { + return nil, err + } + } + } + return r, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/scan.go b/services/templeton/vendor/src/github.com/olivere/elastic/scan.go new file mode 100644 index 000000000..08822531b --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/scan.go @@ -0,0 +1,359 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "errors" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +const ( + defaultKeepAlive = "5m" +) + +var ( + // End of stream (or scan) + EOS = errors.New("EOS") + + // No ScrollId + ErrNoScrollId = errors.New("no scrollId") +) + +// ScanService manages a cursor through documents in Elasticsearch. +type ScanService struct { + client *Client + indices []string + types []string + keepAlive string + searchSource *SearchSource + pretty bool + routing string + preference string + size *int +} + +// NewScanService creates a new service to iterate through the results +// of a query. +func NewScanService(client *Client) *ScanService { + builder := &ScanService{ + client: client, + searchSource: NewSearchSource().Query(NewMatchAllQuery()), + } + return builder +} + +// Index sets the name(s) of the index to use for scan. +func (s *ScanService) Index(indices ...string) *ScanService { + if s.indices == nil { + s.indices = make([]string, 0) + } + s.indices = append(s.indices, indices...) + return s +} + +// Types allows to restrict the scan to a list of types. +func (s *ScanService) Type(types ...string) *ScanService { + if s.types == nil { + s.types = make([]string, 0) + } + s.types = append(s.types, types...) + return s +} + +// Scroll is an alias for KeepAlive, the time to keep +// the cursor alive (e.g. "5m" for 5 minutes). +func (s *ScanService) Scroll(keepAlive string) *ScanService { + s.keepAlive = keepAlive + return s +} + +// KeepAlive sets the maximum time the cursor will be +// available before expiration (e.g. "5m" for 5 minutes). +func (s *ScanService) KeepAlive(keepAlive string) *ScanService { + s.keepAlive = keepAlive + return s +} + +// Fields tells Elasticsearch to only load specific fields from a search hit. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-fields.html. +func (s *ScanService) Fields(fields ...string) *ScanService { + s.searchSource = s.searchSource.Fields(fields...) + return s +} + +// SearchSource sets the search source builder to use with this service. +func (s *ScanService) SearchSource(searchSource *SearchSource) *ScanService { + s.searchSource = searchSource + if s.searchSource == nil { + s.searchSource = NewSearchSource().Query(NewMatchAllQuery()) + } + return s +} + +// Routing allows for (a comma-separated) list of specific routing values. +func (s *ScanService) Routing(routings ...string) *ScanService { + s.routing = strings.Join(routings, ",") + return s +} + +// Preference specifies the node or shard the operation should be +// performed on (default: "random"). +func (s *ScanService) Preference(preference string) *ScanService { + s.preference = preference + return s +} + +// Query sets the query to perform, e.g. MatchAllQuery. +func (s *ScanService) Query(query Query) *ScanService { + s.searchSource = s.searchSource.Query(query) + return s +} + +// PostFilter is executed as the last filter. It only affects the +// search hits but not facets. See +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-post-filter.html +// for details. +func (s *ScanService) PostFilter(postFilter Query) *ScanService { + s.searchSource = s.searchSource.PostFilter(postFilter) + return s +} + +// FetchSource indicates whether the response should contain the stored +// _source for every hit. +func (s *ScanService) FetchSource(fetchSource bool) *ScanService { + s.searchSource = s.searchSource.FetchSource(fetchSource) + return s +} + +// FetchSourceContext indicates how the _source should be fetched. +func (s *ScanService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *ScanService { + s.searchSource = s.searchSource.FetchSourceContext(fetchSourceContext) + return s +} + +// Version can be set to true to return a version for each search hit. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-version.html. +func (s *ScanService) Version(version bool) *ScanService { + s.searchSource = s.searchSource.Version(version) + return s +} + +// Sort the results by the given field, in the given order. +// Use the alternative SortWithInfo to use a struct to define the sorting. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html +// for detailed documentation of sorting. +func (s *ScanService) Sort(field string, ascending bool) *ScanService { + s.searchSource = s.searchSource.Sort(field, ascending) + return s +} + +// SortWithInfo defines how to sort results. +// Use the Sort func for a shortcut. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html +// for detailed documentation of sorting. +func (s *ScanService) SortWithInfo(info SortInfo) *ScanService { + s.searchSource = s.searchSource.SortWithInfo(info) + return s +} + +// SortBy defines how to sort results. +// Use the Sort func for a shortcut. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html +// for detailed documentation of sorting. +func (s *ScanService) SortBy(sorter ...Sorter) *ScanService { + s.searchSource = s.searchSource.SortBy(sorter...) + return s +} + +// Pretty enables the caller to indent the JSON output. +func (s *ScanService) Pretty(pretty bool) *ScanService { + s.pretty = pretty + return s +} + +// Size is the number of results to return per shard, not per request. +// So a size of 10 which hits 5 shards will return a maximum of 50 results +// per scan request. +func (s *ScanService) Size(size int) *ScanService { + s.size = &size + return s +} + +// Do executes the query and returns a "server-side cursor". +func (s *ScanService) Do() (*ScanCursor, error) { + // Build url + path := "/" + + // Indices part + indexPart := make([]string, 0) + for _, index := range s.indices { + index, err := uritemplates.Expand("{index}", map[string]string{ + "index": index, + }) + if err != nil { + return nil, err + } + indexPart = append(indexPart, index) + } + if len(indexPart) > 0 { + path += strings.Join(indexPart, ",") + } + + // Types + typesPart := make([]string, 0) + for _, typ := range s.types { + typ, err := uritemplates.Expand("{type}", map[string]string{ + "type": typ, + }) + if err != nil { + return nil, err + } + typesPart = append(typesPart, typ) + } + if len(typesPart) > 0 { + path += "/" + strings.Join(typesPart, ",") + } + + // Search + path += "/_search" + + // Parameters + params := make(url.Values) + if !s.searchSource.hasSort() { + // TODO: ES 2.1 deprecates search_type=scan. See https://www.elastic.co/guide/en/elasticsearch/reference/current/breaking_21_search_changes.html#_literal_search_type_scan_literal_deprecated. + params.Set("search_type", "scan") + } + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + if s.keepAlive != "" { + params.Set("scroll", s.keepAlive) + } else { + params.Set("scroll", defaultKeepAlive) + } + if s.size != nil && *s.size > 0 { + params.Set("size", fmt.Sprintf("%d", *s.size)) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + + // Get response + body, err := s.searchSource.Source() + if err != nil { + return nil, err + } + res, err := s.client.PerformRequest("POST", path, params, body) + if err != nil { + return nil, err + } + + // Return result + searchResult := new(SearchResult) + if err := json.Unmarshal(res.Body, searchResult); err != nil { + return nil, err + } + + cursor := NewScanCursor(s.client, s.keepAlive, s.pretty, searchResult) + + return cursor, nil +} + +// scanCursor represents a single page of results from +// an Elasticsearch Scan operation. +type ScanCursor struct { + Results *SearchResult + + client *Client + keepAlive string + pretty bool + currentPage int +} + +// newScanCursor returns a new initialized instance +// of scanCursor. +func NewScanCursor(client *Client, keepAlive string, pretty bool, searchResult *SearchResult) *ScanCursor { + return &ScanCursor{ + client: client, + keepAlive: keepAlive, + pretty: pretty, + Results: searchResult, + } +} + +// TotalHits is a convenience method that returns the number +// of hits the cursor will iterate through. +func (c *ScanCursor) TotalHits() int64 { + if c.Results.Hits == nil { + return 0 + } + return c.Results.Hits.TotalHits +} + +// Next returns the next search result or nil when all +// documents have been scanned. +// +// Usage: +// +// for { +// res, err := cursor.Next() +// if err == elastic.EOS { +// // End of stream (or scan) +// break +// } +// if err != nil { +// // Handle error +// } +// // Work with res +// } +// +func (c *ScanCursor) Next() (*SearchResult, error) { + if c.currentPage > 0 { + if c.Results.Hits == nil || len(c.Results.Hits.Hits) == 0 || c.Results.Hits.TotalHits == 0 { + return nil, EOS + } + } + if c.Results.ScrollId == "" { + return nil, EOS + } + + // Build url + path := "/_search/scroll" + + // Parameters + params := make(url.Values) + if c.pretty { + params.Set("pretty", fmt.Sprintf("%v", c.pretty)) + } + if c.keepAlive != "" { + params.Set("scroll", c.keepAlive) + } else { + params.Set("scroll", defaultKeepAlive) + } + + // Set body + body := c.Results.ScrollId + + // Get response + res, err := c.client.PerformRequest("POST", path, params, body) + if err != nil { + return nil, err + } + + // Return result + c.Results = &SearchResult{ScrollId: body} + if err := json.Unmarshal(res.Body, c.Results); err != nil { + return nil, err + } + + c.currentPage += 1 + + return c.Results, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/scan_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/scan_test.go new file mode 100644 index 000000000..b2a8f0ef9 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/scan_test.go @@ -0,0 +1,559 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + _ "net/http" + "testing" +) + +func TestScan(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + cursor, err := client.Scan(testIndexName).Size(1).Do() + if err != nil { + t.Fatal(err) + } + + if cursor.Results == nil { + t.Errorf("expected results != nil; got nil") + } + if cursor.Results.Hits == nil { + t.Errorf("expected results.Hits != nil; got nil") + } + if cursor.Results.Hits.TotalHits != 3 { + t.Errorf("expected results.Hits.TotalHits = %d; got %d", 3, cursor.Results.Hits.TotalHits) + } + if len(cursor.Results.Hits.Hits) != 0 { + t.Errorf("expected len(results.Hits.Hits) = %d; got %d", 0, len(cursor.Results.Hits.Hits)) + } + + pages := 0 + numDocs := 0 + + for { + searchResult, err := cursor.Next() + if err == EOS { + break + } + if err != nil { + t.Fatal(err) + } + + pages += 1 + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + numDocs += 1 + } + } + + if pages <= 0 { + t.Errorf("expected to retrieve at least 1 page; got %d", pages) + } + + if numDocs != 3 { + t.Errorf("expected to retrieve %d hits; got %d", 3, numDocs) + } +} + +func TestScanWithSort(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch.", Retweets: 4} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic.", Retweets: 10} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun.", Retweets: 3} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // We sort on a numerical field, because sorting on the 'message' string field would + // raise the whole question of tokenizing and analyzing. + cursor, err := client.Scan(testIndexName).Sort("retweets", true).Size(1).Do() + if err != nil { + t.Fatal(err) + } + + if cursor.Results == nil { + t.Errorf("expected results != nil; got nil") + } + if cursor.Results.Hits == nil { + t.Errorf("expected results.Hits != nil; got nil") + } + if cursor.Results.Hits.TotalHits != 3 { + t.Errorf("expected results.Hits.TotalHits = %d; got %d", 3, cursor.Results.Hits.TotalHits) + } + if len(cursor.Results.Hits.Hits) != 1 { + t.Errorf("expected len(results.Hits.Hits) = %d; got %d", 1, len(cursor.Results.Hits.Hits)) + } + + if cursor.Results.Hits.Hits[0].Id != "3" { + t.Errorf("expected hitID = %v; got %v", "3", cursor.Results.Hits.Hits[0].Id) + } + + numDocs := 1 // The cursor already gave us a result + pages := 0 + + for { + searchResult, err := cursor.Next() + if err == EOS { + break + } + if err != nil { + t.Fatal(err) + } + + pages += 1 + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + numDocs += 1 + } + } + + if pages <= 0 { + t.Errorf("expected to retrieve at least 1 page; got %d", pages) + } + + if numDocs != 3 { + t.Errorf("expected to retrieve %d hits; got %d", 3, numDocs) + } +} + +func TestScanWithSortByDoc(t *testing.T) { + // Sorting by doc is introduced in Elasticsearch 2.1, + // and replaces the deprecated search_type=scan. + // See https://www.elastic.co/guide/en/elasticsearch/reference/2.x/breaking_21_search_changes.html#_literal_search_type_scan_literal_deprecated + client := setupTestClientAndCreateIndex(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + if esversion < "2.1" { + t.Skipf(`Elasticsearch %s does not have {"sort":["_doc"]}`, esversion) + return + } + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + comment1 := comment{User: "nico", Comment: "You bet."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("comment").Id("1").Parent("1").BodyJson(&comment1).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + cursor, err := client.Scan(testIndexName).Sort("_doc", true).Size(1).Do() + if err != nil { + t.Fatal(err) + } + + numDocs := 0 + pages := 0 + + for { + searchResult, err := cursor.Next() + if err == EOS { + break + } + if err != nil { + t.Fatal(err) + } + + pages += 1 + + for range searchResult.Hits.Hits { + numDocs += 1 + } + } + + if pages != 3 { + t.Errorf("expected to retrieve %d pages; got %d", 2, pages) + } + if numDocs != 2 { + t.Errorf("expected to retrieve %d hits; got %d", 2, numDocs) + } +} + +func TestScanWithSearchSource(t *testing.T) { + //client := setupTestClientAndCreateIndexAndLog(t) + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch.", Retweets: 4} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic.", Retweets: 10} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun.", Retweets: 3} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + src := NewSearchSource(). + Query(NewTermQuery("user", "olivere")). + FetchSourceContext(NewFetchSourceContext(true).Include("retweets")) + cursor, err := client.Scan(testIndexName).SearchSource(src).Size(1).Do() + if err != nil { + t.Fatal(err) + } + + if cursor.Results == nil { + t.Fatalf("expected results != nil; got nil") + } + if cursor.Results.Hits == nil { + t.Fatalf("expected results.Hits != nil; got nil") + } + if cursor.Results.Hits.TotalHits != 2 { + t.Fatalf("expected results.Hits.TotalHits = %d; got %d", 2, cursor.Results.Hits.TotalHits) + } + + numDocs := 0 + pages := 0 + + for { + searchResult, err := cursor.Next() + if err == EOS { + break + } + if err != nil { + t.Fatal(err) + } + + pages += 1 + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + if _, found := item["message"]; found { + t.Fatalf("expected to not see field %q; got: %#v", "message", item) + } + numDocs += 1 + } + } + + if pages != 3 { + t.Errorf("expected to retrieve %d pages; got %d", 2, pages) + } + if numDocs != 2 { + t.Errorf("expected to retrieve %d hits; got %d", 2, numDocs) + } +} + +func TestScanWithQuery(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Return tweets from olivere only + termQuery := NewTermQuery("user", "olivere") + cursor, err := client.Scan(testIndexName). + Size(1). + Query(termQuery). + Do() + if err != nil { + t.Fatal(err) + } + + if cursor.Results == nil { + t.Errorf("expected results != nil; got nil") + } + if cursor.Results.Hits == nil { + t.Errorf("expected results.Hits != nil; got nil") + } + if cursor.Results.Hits.TotalHits != 2 { + t.Errorf("expected results.Hits.TotalHits = %d; got %d", 2, cursor.Results.Hits.TotalHits) + } + if len(cursor.Results.Hits.Hits) != 0 { + t.Errorf("expected len(results.Hits.Hits) = %d; got %d", 0, len(cursor.Results.Hits.Hits)) + } + + pages := 0 + numDocs := 0 + + for { + searchResult, err := cursor.Next() + if err == EOS { + break + } + if err != nil { + t.Fatal(err) + } + + pages += 1 + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + numDocs += 1 + } + } + + if pages <= 0 { + t.Errorf("expected to retrieve at least 1 page; got %d", pages) + } + + if numDocs != 2 { + t.Errorf("expected to retrieve %d hits; got %d", 2, numDocs) + } +} + +func TestScanAndScrollWithMissingIndex(t *testing.T) { + client := setupTestClient(t) // does not create testIndexName + + cursor, err := client.Scan(testIndexName).Scroll("30s").Do() + if err == nil { + t.Fatalf("expected error != nil; got: %v", err) + } + if cursor != nil { + t.Fatalf("expected cursor == nil; got: %v", cursor) + } +} + +func TestScanAndScrollWithEmptyIndex(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + if isTravis() { + t.Skip("test on Travis failes regularly with " + + "Error 503 (Service Unavailable): SearchPhaseExecutionException[Failed to execute phase [init_scan], all shards failed]") + } + + _, err := client.Flush().Index(testIndexName).WaitIfOngoing(true).Do() + if err != nil { + t.Fatal(err) + } + + cursor, err := client.Scan(testIndexName).Scroll("30s").Do() + if err != nil { + t.Fatal(err) + } + if cursor == nil { + t.Fatalf("expected cursor; got: %v", cursor) + } + + // First request returns no error, but no hits + res, err := cursor.Next() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatalf("expected results != nil; got: nil") + } + if res.ScrollId == "" { + t.Errorf("expected scrollId in results; got: %q", res.ScrollId) + } + if res.TotalHits() != 0 { + t.Errorf("expected TotalHits() = %d; got %d", 0, res.TotalHits()) + } + if res.Hits == nil { + t.Errorf("expected results.Hits != nil; got: nil") + } + if res.Hits.TotalHits != 0 { + t.Errorf("expected results.Hits.TotalHits = %d; got %d", 0, res.Hits.TotalHits) + } + if res.Hits.Hits == nil { + t.Errorf("expected results.Hits.Hits != nil; got: %v", res.Hits.Hits) + } + if len(res.Hits.Hits) != 0 { + t.Errorf("expected len(results.Hits.Hits) == %d; got: %d", 0, len(res.Hits.Hits)) + } + + // Subsequent requests return EOS + res, err = cursor.Next() + if err != EOS { + t.Fatal(err) + } + if res != nil { + t.Fatalf("expected results == %v; got: %v", nil, res) + } + + res, err = cursor.Next() + if err != EOS { + t.Fatal(err) + } + if res != nil { + t.Fatalf("expected results == %v; got: %v", nil, res) + } +} + +func TestScanIssue119(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + comment1 := comment{User: "nico", Comment: "You bet."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("comment").Id("1").Parent("1").BodyJson(&comment1).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + cursor, err := client.Scan(testIndexName).Fields("_source", "_parent").Size(1).Do() + if err != nil { + t.Fatal(err) + } + + for { + searchResult, err := cursor.Next() + if err == EOS { + break + } + if err != nil { + t.Fatal(err) + } + + for _, hit := range searchResult.Hits.Hits { + if hit.Type == "tweet" { + if _, ok := hit.Fields["_parent"].(string); ok { + t.Errorf("Type `tweet` cannot have any parent...") + + toPrint, _ := json.MarshalIndent(hit, "", " ") + t.Fatal(string(toPrint)) + } + } + + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/script.go b/services/templeton/vendor/src/github.com/olivere/elastic/script.go new file mode 100644 index 000000000..a5c9e45e2 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/script.go @@ -0,0 +1,131 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// Script holds all the paramaters necessary to compile or find in cache +// and then execute a script. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html +// for details of scripting. +type Script struct { + script string + typ string + lang string + params map[string]interface{} +} + +// NewScript creates and initializes a new Script. +func NewScript(script string) *Script { + return &Script{ + script: script, + typ: "", // default type is "inline" + params: make(map[string]interface{}), + } +} + +// NewScriptInline creates and initializes a new Script of type "inline". +func NewScriptInline(script string) *Script { + return NewScript(script).Type("inline") +} + +// NewScriptId creates and initializes a new Script of type "id". +func NewScriptId(script string) *Script { + return NewScript(script).Type("id") +} + +// NewScriptFile creates and initializes a new Script of type "file". +func NewScriptFile(script string) *Script { + return NewScript(script).Type("file") +} + +// Script is either the cache key of the script to be compiled/executed +// or the actual script source code for inline scripts. For indexed +// scripts this is the id used in the request. For file scripts this is +// the file name. +func (s *Script) Script(script string) *Script { + s.script = script + return s +} + +// Type sets the type of script: "inline", "id", or "file". +func (s *Script) Type(typ string) *Script { + s.typ = typ + return s +} + +// Lang sets the language of the script. Permitted values are "groovy", +// "expression", "mustache", "mvel" (default), "javascript", "python". +// To use certain languages, you need to configure your server and/or +// add plugins. See https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html +// for details. +func (s *Script) Lang(lang string) *Script { + s.lang = lang + return s +} + +// Param adds a key/value pair to the parameters that this script will be executed with. +func (s *Script) Param(name string, value interface{}) *Script { + if s.params == nil { + s.params = make(map[string]interface{}) + } + s.params[name] = value + return s +} + +// Params sets the map of parameters this script will be executed with. +func (s *Script) Params(params map[string]interface{}) *Script { + s.params = params + return s +} + +// Source returns the JSON serializable data for this Script. +func (s *Script) Source() (interface{}, error) { + if s.typ == "" && s.lang == "" && len(s.params) == 0 { + return s.script, nil + } + source := make(map[string]interface{}) + if s.typ == "" { + source["inline"] = s.script + } else { + source[s.typ] = s.script + } + if s.lang != "" { + source["lang"] = s.lang + } + if len(s.params) > 0 { + source["params"] = s.params + } + return source, nil +} + +// -- Script Field -- + +// ScriptField is a single script field. +type ScriptField struct { + FieldName string // name of the field + + script *Script +} + +// NewScriptField creates and initializes a new ScriptField. +func NewScriptField(fieldName string, script *Script) *ScriptField { + return &ScriptField{FieldName: fieldName, script: script} +} + +// Source returns the serializable JSON for the ScriptField. +func (f *ScriptField) Source() (interface{}, error) { + if f.script == nil { + return nil, errors.New("ScriptField expects script") + } + source := make(map[string]interface{}) + src, err := f.script.Source() + if err != nil { + return nil, err + } + source["script"] = src + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/script_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/script_test.go new file mode 100644 index 000000000..552d92a02 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/script_test.go @@ -0,0 +1,78 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestScriptingDefault(t *testing.T) { + builder := NewScript("doc['field'].value * 2") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `"doc['field'].value * 2"` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestScriptingInline(t *testing.T) { + builder := NewScriptInline("doc['field'].value * factor").Param("factor", 2.0) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"inline":"doc['field'].value * factor","params":{"factor":2}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestScriptingId(t *testing.T) { + builder := NewScriptId("script-with-id").Param("factor", 2.0) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"id":"script-with-id","params":{"factor":2}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestScriptingFile(t *testing.T) { + builder := NewScriptFile("script-file").Param("factor", 2.0).Lang("groovy") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"file":"script-file","lang":"groovy","params":{"factor":2}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/scroll.go b/services/templeton/vendor/src/github.com/olivere/elastic/scroll.go new file mode 100644 index 000000000..1cab35c36 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/scroll.go @@ -0,0 +1,208 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// ScrollService manages a cursor through documents in Elasticsearch. +type ScrollService struct { + client *Client + indices []string + types []string + keepAlive string + query Query + size *int + pretty bool + scrollId string +} + +func NewScrollService(client *Client) *ScrollService { + builder := &ScrollService{ + client: client, + query: NewMatchAllQuery(), + } + return builder +} + +func (s *ScrollService) Index(indices ...string) *ScrollService { + if s.indices == nil { + s.indices = make([]string, 0) + } + s.indices = append(s.indices, indices...) + return s +} + +func (s *ScrollService) Type(types ...string) *ScrollService { + if s.types == nil { + s.types = make([]string, 0) + } + s.types = append(s.types, types...) + return s +} + +// Scroll is an alias for KeepAlive, the time to keep +// the cursor alive (e.g. "5m" for 5 minutes). +func (s *ScrollService) Scroll(keepAlive string) *ScrollService { + s.keepAlive = keepAlive + return s +} + +// KeepAlive sets the maximum time the cursor will be +// available before expiration (e.g. "5m" for 5 minutes). +func (s *ScrollService) KeepAlive(keepAlive string) *ScrollService { + s.keepAlive = keepAlive + return s +} + +func (s *ScrollService) Query(query Query) *ScrollService { + s.query = query + return s +} + +func (s *ScrollService) Pretty(pretty bool) *ScrollService { + s.pretty = pretty + return s +} + +func (s *ScrollService) Size(size int) *ScrollService { + s.size = &size + return s +} + +func (s *ScrollService) ScrollId(scrollId string) *ScrollService { + s.scrollId = scrollId + return s +} + +func (s *ScrollService) Do() (*SearchResult, error) { + if s.scrollId == "" { + return s.GetFirstPage() + } + return s.GetNextPage() +} + +func (s *ScrollService) GetFirstPage() (*SearchResult, error) { + // Build url + path := "/" + + // Indices part + indexPart := make([]string, 0) + for _, index := range s.indices { + index, err := uritemplates.Expand("{index}", map[string]string{ + "index": index, + }) + if err != nil { + return nil, err + } + indexPart = append(indexPart, index) + } + if len(indexPart) > 0 { + path += strings.Join(indexPart, ",") + } + + // Types + typesPart := make([]string, 0) + for _, typ := range s.types { + typ, err := uritemplates.Expand("{type}", map[string]string{ + "type": typ, + }) + if err != nil { + return nil, err + } + typesPart = append(typesPart, typ) + } + if len(typesPart) > 0 { + path += "/" + strings.Join(typesPart, ",") + } + + // Search + path += "/_search" + + // Parameters + params := make(url.Values) + // TODO: ES 2.1 deprecates search_type=scan. See https://www.elastic.co/guide/en/elasticsearch/reference/current/breaking_21_search_changes.html#_literal_search_type_scan_literal_deprecated. + params.Set("search_type", "scan") + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + if s.keepAlive != "" { + params.Set("scroll", s.keepAlive) + } else { + params.Set("scroll", defaultKeepAlive) + } + if s.size != nil && *s.size > 0 { + params.Set("size", fmt.Sprintf("%d", *s.size)) + } + + // Set body + body := make(map[string]interface{}) + if s.query != nil { + src, err := s.query.Source() + if err != nil { + return nil, err + } + body["query"] = src + } + + // Get response + res, err := s.client.PerformRequest("POST", path, params, body) + if err != nil { + return nil, err + } + + // Return result + searchResult := new(SearchResult) + if err := json.Unmarshal(res.Body, searchResult); err != nil { + return nil, err + } + + return searchResult, nil +} + +func (s *ScrollService) GetNextPage() (*SearchResult, error) { + if s.scrollId == "" { + return nil, EOS + } + + // Build url + path := "/_search/scroll" + + // Parameters + params := make(url.Values) + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + if s.keepAlive != "" { + params.Set("scroll", s.keepAlive) + } else { + params.Set("scroll", defaultKeepAlive) + } + + // Get response + res, err := s.client.PerformRequest("POST", path, params, s.scrollId) + if err != nil { + return nil, err + } + + // Return result + searchResult := new(SearchResult) + if err := json.Unmarshal(res.Body, searchResult); err != nil { + return nil, err + } + + // Determine last page + if searchResult == nil || searchResult.Hits == nil || len(searchResult.Hits.Hits) == 0 || searchResult.Hits.TotalHits == 0 { + return nil, EOS + } + + return searchResult, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/scroll_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/scroll_test.go new file mode 100644 index 000000000..4a5c48111 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/scroll_test.go @@ -0,0 +1,106 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + _ "net/http" + "testing" +) + +func TestScroll(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + res, err := client.Scroll(testIndexName).Size(1).Do() + if err != nil { + t.Fatal(err) + } + + if res == nil { + t.Errorf("expected results != nil; got nil") + } + if res.Hits == nil { + t.Errorf("expected results.Hits != nil; got nil") + } + if res.Hits.TotalHits != 3 { + t.Errorf("expected results.Hits.TotalHits = %d; got %d", 3, res.Hits.TotalHits) + } + if len(res.Hits.Hits) != 0 { + t.Errorf("expected len(results.Hits.Hits) = %d; got %d", 0, len(res.Hits.Hits)) + } + if res.ScrollId == "" { + t.Errorf("expected scrollId in results; got %q", res.ScrollId) + } + + pages := 0 + numDocs := 0 + scrollId := res.ScrollId + + for { + searchResult, err := client.Scroll(testIndexName). + Size(1). + ScrollId(scrollId). + Do() + if err == EOS { + break + } + if err != nil { + t.Fatal(err) + } + + pages += 1 + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + numDocs += 1 + } + + scrollId = searchResult.ScrollId + if scrollId == "" { + t.Errorf("expeced scrollId in results; got %q", scrollId) + } + } + + if pages <= 0 { + t.Errorf("expected to retrieve at least 1 page; got %d", pages) + } + + if numDocs != 3 { + t.Errorf("expected to retrieve %d hits; got %d", 3, numDocs) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search.go b/services/templeton/vendor/src/github.com/olivere/elastic/search.go new file mode 100644 index 000000000..4811ee1ed --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search.go @@ -0,0 +1,429 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "reflect" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// Search for documents in Elasticsearch. +type SearchService struct { + client *Client + searchSource *SearchSource + source interface{} + pretty bool + searchType string + indices []string + routing string + preference string + types []string +} + +// NewSearchService creates a new service for searching in Elasticsearch. +func NewSearchService(client *Client) *SearchService { + builder := &SearchService{ + client: client, + searchSource: NewSearchSource(), + } + return builder +} + +// SearchSource sets the search source builder to use with this service. +func (s *SearchService) SearchSource(searchSource *SearchSource) *SearchService { + s.searchSource = searchSource + if s.searchSource == nil { + s.searchSource = NewSearchSource() + } + return s +} + +// Source allows the user to set the request body manually without using +// any of the structs and interfaces in Elastic. +func (s *SearchService) Source(source interface{}) *SearchService { + s.source = source + return s +} + +// Index sets the names of the indices to use for search. +func (s *SearchService) Index(indices ...string) *SearchService { + if s.indices == nil { + s.indices = make([]string, 0) + } + s.indices = append(s.indices, indices...) + return s +} + +// Type allows to restrict the search to a list of types. +func (s *SearchService) Type(types ...string) *SearchService { + if s.types == nil { + s.types = make([]string, 0) + } + s.types = append(s.types, types...) + return s +} + +// Pretty enables the caller to indent the JSON output. +func (s *SearchService) Pretty(pretty bool) *SearchService { + s.pretty = pretty + return s +} + +// Timeout sets the timeout to use, e.g. "1s" or "1000ms". +func (s *SearchService) Timeout(timeout string) *SearchService { + s.searchSource = s.searchSource.Timeout(timeout) + return s +} + +// TimeoutInMillis sets the timeout in milliseconds. +func (s *SearchService) TimeoutInMillis(timeoutInMillis int) *SearchService { + s.searchSource = s.searchSource.TimeoutInMillis(timeoutInMillis) + return s +} + +// SearchType sets the search operation type. Valid values are: +// "query_then_fetch", "query_and_fetch", "dfs_query_then_fetch", +// "dfs_query_and_fetch", "count", "scan". +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-search-type.html +// for details. +func (s *SearchService) SearchType(searchType string) *SearchService { + s.searchType = searchType + return s +} + +// Routing is a list of specific routing values to control the shards +// the search will be executed on. +func (s *SearchService) Routing(routings ...string) *SearchService { + s.routing = strings.Join(routings, ",") + return s +} + +// Preference sets the preference to execute the search. Defaults to +// randomize across shards. Can be set to "_local" to prefer local shards, +// "_primary" to execute on primary shards only, or a custom value which +// guarantees that the same order will be used across different requests. +func (s *SearchService) Preference(preference string) *SearchService { + s.preference = preference + return s +} + +// Query sets the query to perform, e.g. MatchAllQuery. +func (s *SearchService) Query(query Query) *SearchService { + s.searchSource = s.searchSource.Query(query) + return s +} + +// PostFilter will be executed after the query has been executed and +// only affects the search hits, not the aggregations. +// This filter is always executed as the last filtering mechanism. +func (s *SearchService) PostFilter(postFilter Query) *SearchService { + s.searchSource = s.searchSource.PostFilter(postFilter) + return s +} + +// FetchSource indicates whether the response should contain the stored +// _source for every hit. +func (s *SearchService) FetchSource(fetchSource bool) *SearchService { + s.searchSource = s.searchSource.FetchSource(fetchSource) + return s +} + +// FetchSourceContext indicates how the _source should be fetched. +func (s *SearchService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *SearchService { + s.searchSource = s.searchSource.FetchSourceContext(fetchSourceContext) + return s +} + +// Highlight adds highlighting to the search. +func (s *SearchService) Highlight(highlight *Highlight) *SearchService { + s.searchSource = s.searchSource.Highlight(highlight) + return s +} + +// GlobalSuggestText defines the global text to use with all suggesters. +// This avoids repetition. +func (s *SearchService) GlobalSuggestText(globalText string) *SearchService { + s.searchSource = s.searchSource.GlobalSuggestText(globalText) + return s +} + +// Suggester adds a suggester to the search. +func (s *SearchService) Suggester(suggester Suggester) *SearchService { + s.searchSource = s.searchSource.Suggester(suggester) + return s +} + +// Aggregation adds an aggreation to perform as part of the search. +func (s *SearchService) Aggregation(name string, aggregation Aggregation) *SearchService { + s.searchSource = s.searchSource.Aggregation(name, aggregation) + return s +} + +// MinScore sets the minimum score below which docs will be filtered out. +func (s *SearchService) MinScore(minScore float64) *SearchService { + s.searchSource = s.searchSource.MinScore(minScore) + return s +} + +// From index to start the search from. Defaults to 0. +func (s *SearchService) From(from int) *SearchService { + s.searchSource = s.searchSource.From(from) + return s +} + +// Size is the number of search hits to return. Defaults to 10. +func (s *SearchService) Size(size int) *SearchService { + s.searchSource = s.searchSource.Size(size) + return s +} + +// Explain indicates whether each search hit should be returned with +// an explanation of the hit (ranking). +func (s *SearchService) Explain(explain bool) *SearchService { + s.searchSource = s.searchSource.Explain(explain) + return s +} + +// Version indicates whether each search hit should be returned with +// a version associated to it. +func (s *SearchService) Version(version bool) *SearchService { + s.searchSource = s.searchSource.Version(version) + return s +} + +// Sort adds a sort order. +func (s *SearchService) Sort(field string, ascending bool) *SearchService { + s.searchSource = s.searchSource.Sort(field, ascending) + return s +} + +// SortWithInfo adds a sort order. +func (s *SearchService) SortWithInfo(info SortInfo) *SearchService { + s.searchSource = s.searchSource.SortWithInfo(info) + return s +} + +// SortBy adds a sort order. +func (s *SearchService) SortBy(sorter ...Sorter) *SearchService { + s.searchSource = s.searchSource.SortBy(sorter...) + return s +} + +// NoFields indicates that no fields should be loaded, resulting in only +// id and type to be returned per field. +func (s *SearchService) NoFields() *SearchService { + s.searchSource = s.searchSource.NoFields() + return s +} + +// Field adds a single field to load and return (note, must be stored) as +// part of the search request. If none are specified, the source of the +// document will be returned. +func (s *SearchService) Field(fieldName string) *SearchService { + s.searchSource = s.searchSource.Field(fieldName) + return s +} + +// Fields sets the fields to load and return as part of the search request. +// If none are specified, the source of the document will be returned. +func (s *SearchService) Fields(fields ...string) *SearchService { + s.searchSource = s.searchSource.Fields(fields...) + return s +} + +// Do executes the search and returns a SearchResult. +func (s *SearchService) Do() (*SearchResult, error) { + // Build url + path := "/" + + // Indices part + indexPart := make([]string, 0) + for _, index := range s.indices { + index, err := uritemplates.Expand("{index}", map[string]string{ + "index": index, + }) + if err != nil { + return nil, err + } + indexPart = append(indexPart, index) + } + path += strings.Join(indexPart, ",") + + // Types part + if len(s.types) > 0 { + typesPart := make([]string, 0) + for _, typ := range s.types { + typ, err := uritemplates.Expand("{type}", map[string]string{ + "type": typ, + }) + if err != nil { + return nil, err + } + typesPart = append(typesPart, typ) + } + path += "/" + path += strings.Join(typesPart, ",") + } + + // Search + path += "/_search" + + // Parameters + params := make(url.Values) + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + if s.searchType != "" { + params.Set("search_type", s.searchType) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + + // Perform request + var body interface{} + if s.source != nil { + body = s.source + } else { + src, err := s.searchSource.Source() + if err != nil { + return nil, err + } + body = src + } + res, err := s.client.PerformRequest("POST", path, params, body) + if err != nil { + return nil, err + } + + // Return search results + ret := new(SearchResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// SearchResult is the result of a search in Elasticsearch. +type SearchResult struct { + TookInMillis int64 `json:"took"` // search time in milliseconds + ScrollId string `json:"_scroll_id"` // only used with Scroll and Scan operations + Hits *SearchHits `json:"hits"` // the actual search hits + Suggest SearchSuggest `json:"suggest"` // results from suggesters + Aggregations Aggregations `json:"aggregations"` // results from aggregations + TimedOut bool `json:"timed_out"` // true if the search timed out + //Error string `json:"error,omitempty"` // used in MultiSearch only + // TODO double-check that MultiGet now returns details error information + Error *ErrorDetails `json:"error,omitempty"` // only used in MultiGet +} + +// TotalHits is a convenience function to return the number of hits for +// a search result. +func (r *SearchResult) TotalHits() int64 { + if r.Hits != nil { + return r.Hits.TotalHits + } + return 0 +} + +// Each is a utility function to iterate over all hits. It saves you from +// checking for nil values. Notice that Each will ignore errors in +// serializing JSON. +func (r *SearchResult) Each(typ reflect.Type) []interface{} { + if r.Hits == nil || r.Hits.Hits == nil || len(r.Hits.Hits) == 0 { + return nil + } + slice := make([]interface{}, 0) + for _, hit := range r.Hits.Hits { + v := reflect.New(typ).Elem() + if err := json.Unmarshal(*hit.Source, v.Addr().Interface()); err == nil { + slice = append(slice, v.Interface()) + } + } + return slice +} + +// SearchHits specifies the list of search hits. +type SearchHits struct { + TotalHits int64 `json:"total"` // total number of hits found + MaxScore *float64 `json:"max_score"` // maximum score of all hits + Hits []*SearchHit `json:"hits"` // the actual hits returned +} + +// SearchHit is a single hit. +type SearchHit struct { + Score *float64 `json:"_score"` // computed score + Index string `json:"_index"` // index name + Type string `json:"_type"` // type meta field + Id string `json:"_id"` // external or internal + Uid string `json:"_uid"` // uid meta field (see MapperService.java for all meta fields) + Timestamp int64 `json:"_timestamp"` // timestamp meta field + TTL int64 `json:"_ttl"` // ttl meta field + Routing string `json:"_routing"` // routing meta field + Parent string `json:"_parent"` // parent meta field + Version *int64 `json:"_version"` // version number, when Version is set to true in SearchService + Sort []interface{} `json:"sort"` // sort information + Highlight SearchHitHighlight `json:"highlight"` // highlighter information + Source *json.RawMessage `json:"_source"` // stored document source + Fields map[string]interface{} `json:"fields"` // returned fields + Explanation *SearchExplanation `json:"_explanation"` // explains how the score was computed + MatchedQueries []string `json:"matched_queries"` // matched queries + InnerHits map[string]*SearchHitInnerHits `json:"inner_hits"` // inner hits with ES >= 1.5.0 + + // Shard + // HighlightFields + // SortValues + // MatchedFilters +} + +type SearchHitInnerHits struct { + Hits *SearchHits `json:"hits"` +} + +// SearchExplanation explains how the score for a hit was computed. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-explain.html. +type SearchExplanation struct { + Value float64 `json:"value"` // e.g. 1.0 + Description string `json:"description"` // e.g. "boost" or "ConstantScore(*:*), product of:" + Details []SearchExplanation `json:"details,omitempty"` // recursive details +} + +// Suggest + +// SearchSuggest is a map of suggestions. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html. +type SearchSuggest map[string][]SearchSuggestion + +// SearchSuggestion is a single search suggestion. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html. +type SearchSuggestion struct { + Text string `json:"text"` + Offset int `json:"offset"` + Length int `json:"length"` + Options []SearchSuggestionOption `json:"options"` +} + +// SearchSuggestionOption is an option of a SearchSuggestion. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html. +type SearchSuggestionOption struct { + Text string `json:"text"` + Score float64 `json:"score"` + Freq int `json:"freq"` + Payload interface{} `json:"payload"` +} + +// Aggregations (see search_aggs.go) + +// Highlighting + +// SearchHitHighlight is the highlight information of a search hit. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-highlighting.html +// for a general discussion of highlighting. +type SearchHitHighlight map[string][]string diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs.go new file mode 100644 index 000000000..8e13a539a --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs.go @@ -0,0 +1,1270 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "bytes" + "encoding/json" +) + +// Aggregations can be seen as a unit-of-work that build +// analytic information over a set of documents. It is +// (in many senses) the follow-up of facets in Elasticsearch. +// For more details about aggregations, visit: +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations.html +type Aggregation interface { + // Source returns a JSON-serializable aggregation that is a fragment + // of the request sent to Elasticsearch. + Source() (interface{}, error) +} + +// Aggregations is a list of aggregations that are part of a search result. +type Aggregations map[string]*json.RawMessage + +// Min returns min aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-min-aggregation.html +func (a Aggregations) Min(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Max returns max aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-max-aggregation.html +func (a Aggregations) Max(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Sum returns sum aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-sum-aggregation.html +func (a Aggregations) Sum(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Avg returns average aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-avg-aggregation.html +func (a Aggregations) Avg(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// ValueCount returns value-count aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-valuecount-aggregation.html +func (a Aggregations) ValueCount(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Cardinality returns cardinality aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html +func (a Aggregations) Cardinality(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Stats returns stats aggregation results. +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-stats-aggregation.html +func (a Aggregations) Stats(name string) (*AggregationStatsMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationStatsMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// ExtendedStats returns extended stats aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-extendedstats-aggregation.html +func (a Aggregations) ExtendedStats(name string) (*AggregationExtendedStatsMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationExtendedStatsMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Percentiles returns percentiles results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-aggregation.html +func (a Aggregations) Percentiles(name string) (*AggregationPercentilesMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPercentilesMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// PercentileRanks returns percentile ranks results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-rank-aggregation.html +func (a Aggregations) PercentileRanks(name string) (*AggregationPercentilesMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPercentilesMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// TopHits returns top-hits aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-top-hits-aggregation.html +func (a Aggregations) TopHits(name string) (*AggregationTopHitsMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationTopHitsMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Global returns global results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-global-aggregation.html +func (a Aggregations) Global(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Filter returns filter results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filter-aggregation.html +func (a Aggregations) Filter(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Filters returns filters results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filters-aggregation.html +func (a Aggregations) Filters(name string) (*AggregationBucketFilters, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketFilters) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Missing returns missing results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-missing-aggregation.html +func (a Aggregations) Missing(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Nested returns nested results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-nested-aggregation.html +func (a Aggregations) Nested(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// ReverseNested returns reverse-nested results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-reverse-nested-aggregation.html +func (a Aggregations) ReverseNested(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Children returns children results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-children-aggregation.html +func (a Aggregations) Children(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Terms returns terms aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html +func (a Aggregations) Terms(name string) (*AggregationBucketKeyItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketKeyItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// SignificantTerms returns significant terms aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html +func (a Aggregations) SignificantTerms(name string) (*AggregationBucketSignificantTerms, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketSignificantTerms) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Sampler returns sampler aggregation results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-sampler-aggregation.html +func (a Aggregations) Sampler(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Range returns range aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-range-aggregation.html +func (a Aggregations) Range(name string) (*AggregationBucketRangeItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketRangeItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// KeyedRange returns keyed range aggregation results. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-range-aggregation.html. +func (a Aggregations) KeyedRange(name string) (*AggregationBucketKeyedRangeItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketKeyedRangeItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// DateRange returns date range aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-daterange-aggregation.html +func (a Aggregations) DateRange(name string) (*AggregationBucketRangeItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketRangeItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// IPv4Range returns IPv4 range aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-iprange-aggregation.html +func (a Aggregations) IPv4Range(name string) (*AggregationBucketRangeItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketRangeItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Histogram returns histogram aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-histogram-aggregation.html +func (a Aggregations) Histogram(name string) (*AggregationBucketHistogramItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketHistogramItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// DateHistogram returns date histogram aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html +func (a Aggregations) DateHistogram(name string) (*AggregationBucketHistogramItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketHistogramItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// GeoBounds returns geo-bounds aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-geobounds-aggregation.html +func (a Aggregations) GeoBounds(name string) (*AggregationGeoBoundsMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationGeoBoundsMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// GeoHash returns geo-hash aggregation results. +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geohashgrid-aggregation.html +func (a Aggregations) GeoHash(name string) (*AggregationBucketKeyItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketKeyItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// GeoDistance returns geo distance aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geodistance-aggregation.html +func (a Aggregations) GeoDistance(name string) (*AggregationBucketRangeItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketRangeItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// AvgBucket returns average bucket pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-avg-bucket-aggregation.html +func (a Aggregations) AvgBucket(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// SumBucket returns sum bucket pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-sum-bucket-aggregation.html +func (a Aggregations) SumBucket(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// MaxBucket returns maximum bucket pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-max-bucket-aggregation.html +func (a Aggregations) MaxBucket(name string) (*AggregationPipelineBucketMetricValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineBucketMetricValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// MinBucket returns minimum bucket pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-min-bucket-aggregation.html +func (a Aggregations) MinBucket(name string) (*AggregationPipelineBucketMetricValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineBucketMetricValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// MovAvg returns moving average pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html +func (a Aggregations) MovAvg(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Derivative returns derivative pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-derivative-aggregation.html +func (a Aggregations) Derivative(name string) (*AggregationPipelineDerivative, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineDerivative) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// CumulativeSum returns a cumulative sum pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-cumulative-sum-aggregation.html +func (a Aggregations) CumulativeSum(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// BucketScript returns bucket script pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-bucket-script-aggregation.html +func (a Aggregations) BucketScript(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// SerialDiff returns serial differencing pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-serialdiff-aggregation.html +func (a Aggregations) SerialDiff(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// -- Single value metric -- + +// AggregationValueMetric is a single-value metric, returned e.g. by a +// Min or Max aggregation. +type AggregationValueMetric struct { + Aggregations + + Value *float64 //`json:"value"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationValueMetric structure. +func (a *AggregationValueMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["value"]; ok && v != nil { + json.Unmarshal(*v, &a.Value) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Stats metric -- + +// AggregationStatsMetric is a multi-value metric, returned by a Stats aggregation. +type AggregationStatsMetric struct { + Aggregations + + Count int64 // `json:"count"` + Min *float64 //`json:"min,omitempty"` + Max *float64 //`json:"max,omitempty"` + Avg *float64 //`json:"avg,omitempty"` + Sum *float64 //`json:"sum,omitempty"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationStatsMetric structure. +func (a *AggregationStatsMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["count"]; ok && v != nil { + json.Unmarshal(*v, &a.Count) + } + if v, ok := aggs["min"]; ok && v != nil { + json.Unmarshal(*v, &a.Min) + } + if v, ok := aggs["max"]; ok && v != nil { + json.Unmarshal(*v, &a.Max) + } + if v, ok := aggs["avg"]; ok && v != nil { + json.Unmarshal(*v, &a.Avg) + } + if v, ok := aggs["sum"]; ok && v != nil { + json.Unmarshal(*v, &a.Sum) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Extended stats metric -- + +// AggregationExtendedStatsMetric is a multi-value metric, returned by an ExtendedStats aggregation. +type AggregationExtendedStatsMetric struct { + Aggregations + + Count int64 // `json:"count"` + Min *float64 //`json:"min,omitempty"` + Max *float64 //`json:"max,omitempty"` + Avg *float64 //`json:"avg,omitempty"` + Sum *float64 //`json:"sum,omitempty"` + SumOfSquares *float64 //`json:"sum_of_squares,omitempty"` + Variance *float64 //`json:"variance,omitempty"` + StdDeviation *float64 //`json:"std_deviation,omitempty"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationExtendedStatsMetric structure. +func (a *AggregationExtendedStatsMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["count"]; ok && v != nil { + json.Unmarshal(*v, &a.Count) + } + if v, ok := aggs["min"]; ok && v != nil { + json.Unmarshal(*v, &a.Min) + } + if v, ok := aggs["max"]; ok && v != nil { + json.Unmarshal(*v, &a.Max) + } + if v, ok := aggs["avg"]; ok && v != nil { + json.Unmarshal(*v, &a.Avg) + } + if v, ok := aggs["sum"]; ok && v != nil { + json.Unmarshal(*v, &a.Sum) + } + if v, ok := aggs["sum_of_squares"]; ok && v != nil { + json.Unmarshal(*v, &a.SumOfSquares) + } + if v, ok := aggs["variance"]; ok && v != nil { + json.Unmarshal(*v, &a.Variance) + } + if v, ok := aggs["std_deviation"]; ok && v != nil { + json.Unmarshal(*v, &a.StdDeviation) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Percentiles metric -- + +// AggregationPercentilesMetric is a multi-value metric, returned by a Percentiles aggregation. +type AggregationPercentilesMetric struct { + Aggregations + + Values map[string]float64 // `json:"values"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationPercentilesMetric structure. +func (a *AggregationPercentilesMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["values"]; ok && v != nil { + json.Unmarshal(*v, &a.Values) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Top-hits metric -- + +// AggregationTopHitsMetric is a metric returned by a TopHits aggregation. +type AggregationTopHitsMetric struct { + Aggregations + + Hits *SearchHits //`json:"hits"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationTopHitsMetric structure. +func (a *AggregationTopHitsMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + a.Aggregations = aggs + a.Hits = new(SearchHits) + if v, ok := aggs["hits"]; ok && v != nil { + json.Unmarshal(*v, &a.Hits) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + return nil +} + +// -- Geo-bounds metric -- + +// AggregationGeoBoundsMetric is a metric as returned by a GeoBounds aggregation. +type AggregationGeoBoundsMetric struct { + Aggregations + + Bounds struct { + TopLeft struct { + Latitude float64 `json:"lat"` + Longitude float64 `json:"lon"` + } `json:"top_left"` + BottomRight struct { + Latitude float64 `json:"lat"` + Longitude float64 `json:"lon"` + } `json:"bottom_right"` + } `json:"bounds"` + + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationGeoBoundsMetric structure. +func (a *AggregationGeoBoundsMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["bounds"]; ok && v != nil { + json.Unmarshal(*v, &a.Bounds) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Single bucket -- + +// AggregationSingleBucket is a single bucket, returned e.g. via an aggregation of type Global. +type AggregationSingleBucket struct { + Aggregations + + DocCount int64 // `json:"doc_count"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationSingleBucket structure. +func (a *AggregationSingleBucket) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Bucket range items -- + +// AggregationBucketRangeItems is a bucket aggregation that is e.g. returned +// with a range aggregation. +type AggregationBucketRangeItems struct { + Aggregations + + DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"` + SumOfOtherDocCount int64 //`json:"sum_other_doc_count"` + Buckets []*AggregationBucketRangeItem //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItems structure. +func (a *AggregationBucketRangeItems) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCountErrorUpperBound) + } + if v, ok := aggs["sum_other_doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.SumOfOtherDocCount) + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// AggregationBucketKeyedRangeItems is a bucket aggregation that is e.g. returned +// with a keyed range aggregation. +type AggregationBucketKeyedRangeItems struct { + Aggregations + + DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"` + SumOfOtherDocCount int64 //`json:"sum_other_doc_count"` + Buckets map[string]*AggregationBucketRangeItem //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItems structure. +func (a *AggregationBucketKeyedRangeItems) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCountErrorUpperBound) + } + if v, ok := aggs["sum_other_doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.SumOfOtherDocCount) + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// AggregationBucketRangeItem is a single bucket of an AggregationBucketRangeItems structure. +type AggregationBucketRangeItem struct { + Aggregations + + Key string //`json:"key"` + DocCount int64 //`json:"doc_count"` + From *float64 //`json:"from"` + FromAsString string //`json:"from_as_string"` + To *float64 //`json:"to"` + ToAsString string //`json:"to_as_string"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItem structure. +func (a *AggregationBucketRangeItem) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["key"]; ok && v != nil { + json.Unmarshal(*v, &a.Key) + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + if v, ok := aggs["from"]; ok && v != nil { + json.Unmarshal(*v, &a.From) + } + if v, ok := aggs["from_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.FromAsString) + } + if v, ok := aggs["to"]; ok && v != nil { + json.Unmarshal(*v, &a.To) + } + if v, ok := aggs["to_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.ToAsString) + } + a.Aggregations = aggs + return nil +} + +// -- Bucket key items -- + +// AggregationBucketKeyItems is a bucket aggregation that is e.g. returned +// with a terms aggregation. +type AggregationBucketKeyItems struct { + Aggregations + + DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"` + SumOfOtherDocCount int64 //`json:"sum_other_doc_count"` + Buckets []*AggregationBucketKeyItem //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItems structure. +func (a *AggregationBucketKeyItems) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCountErrorUpperBound) + } + if v, ok := aggs["sum_other_doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.SumOfOtherDocCount) + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// AggregationBucketKeyItem is a single bucket of an AggregationBucketKeyItems structure. +type AggregationBucketKeyItem struct { + Aggregations + + Key interface{} //`json:"key"` + KeyNumber json.Number + DocCount int64 //`json:"doc_count"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItem structure. +func (a *AggregationBucketKeyItem) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + dec := json.NewDecoder(bytes.NewReader(data)) + dec.UseNumber() + if err := dec.Decode(&aggs); err != nil { + return err + } + if v, ok := aggs["key"]; ok && v != nil { + json.Unmarshal(*v, &a.Key) + json.Unmarshal(*v, &a.KeyNumber) + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + a.Aggregations = aggs + return nil +} + +// -- Bucket types for significant terms -- + +// AggregationBucketSignificantTerms is a bucket aggregation returned +// with a significant terms aggregation. +type AggregationBucketSignificantTerms struct { + Aggregations + + DocCount int64 //`json:"doc_count"` + Buckets []*AggregationBucketSignificantTerm //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerms structure. +func (a *AggregationBucketSignificantTerms) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// AggregationBucketSignificantTerm is a single bucket of an AggregationBucketSignificantTerms structure. +type AggregationBucketSignificantTerm struct { + Aggregations + + Key string //`json:"key"` + DocCount int64 //`json:"doc_count"` + BgCount int64 //`json:"bg_count"` + Score float64 //`json:"score"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerm structure. +func (a *AggregationBucketSignificantTerm) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["key"]; ok && v != nil { + json.Unmarshal(*v, &a.Key) + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + if v, ok := aggs["bg_count"]; ok && v != nil { + json.Unmarshal(*v, &a.BgCount) + } + if v, ok := aggs["score"]; ok && v != nil { + json.Unmarshal(*v, &a.Score) + } + a.Aggregations = aggs + return nil +} + +// -- Bucket filters -- + +// AggregationBucketFilters is a multi-bucket aggregation that is returned +// with a filters aggregation. +type AggregationBucketFilters struct { + Aggregations + + Buckets []*AggregationBucketKeyItem //`json:"buckets"` + NamedBuckets map[string]*AggregationBucketKeyItem //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketFilters structure. +func (a *AggregationBucketFilters) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + json.Unmarshal(*v, &a.NamedBuckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Bucket histogram items -- + +// AggregationBucketHistogramItems is a bucket aggregation that is returned +// with a date histogram aggregation. +type AggregationBucketHistogramItems struct { + Aggregations + + Buckets []*AggregationBucketHistogramItem //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItems structure. +func (a *AggregationBucketHistogramItems) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// AggregationBucketHistogramItem is a single bucket of an AggregationBucketHistogramItems structure. +type AggregationBucketHistogramItem struct { + Aggregations + + Key int64 //`json:"key"` + KeyAsString *string //`json:"key_as_string"` + DocCount int64 //`json:"doc_count"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItem structure. +func (a *AggregationBucketHistogramItem) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["key"]; ok && v != nil { + json.Unmarshal(*v, &a.Key) + } + if v, ok := aggs["key_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.KeyAsString) + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + a.Aggregations = aggs + return nil +} + +// -- Pipeline simple value -- + +// AggregationPipelineSimpleValue is a simple value, returned e.g. by a +// MovAvg aggregation. +type AggregationPipelineSimpleValue struct { + Aggregations + + Value *float64 // `json:"value"` + ValueAsString string // `json:"value_as_string"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineSimpleValue structure. +func (a *AggregationPipelineSimpleValue) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["value"]; ok && v != nil { + json.Unmarshal(*v, &a.Value) + } + if v, ok := aggs["value_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.ValueAsString) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Pipeline simple value -- + +// AggregationPipelineBucketMetricValue is a value returned e.g. by a +// MaxBucket aggregation. +type AggregationPipelineBucketMetricValue struct { + Aggregations + + Keys []interface{} // `json:"keys"` + Value *float64 // `json:"value"` + ValueAsString string // `json:"value_as_string"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineBucketMetricValue structure. +func (a *AggregationPipelineBucketMetricValue) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["keys"]; ok && v != nil { + json.Unmarshal(*v, &a.Keys) + } + if v, ok := aggs["value"]; ok && v != nil { + json.Unmarshal(*v, &a.Value) + } + if v, ok := aggs["value_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.ValueAsString) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Pipeline derivative -- + +// AggregationPipelineDerivative is the value returned by a +// Derivative aggregation. +type AggregationPipelineDerivative struct { + Aggregations + + Value *float64 // `json:"value"` + ValueAsString string // `json:"value_as_string"` + NormalizedValue *float64 // `json:"normalized_value"` + NormalizedValueAsString string // `json:"normalized_value_as_string"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineDerivative structure. +func (a *AggregationPipelineDerivative) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["value"]; ok && v != nil { + json.Unmarshal(*v, &a.Value) + } + if v, ok := aggs["value_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.ValueAsString) + } + if v, ok := aggs["normalized_value"]; ok && v != nil { + json.Unmarshal(*v, &a.NormalizedValue) + } + if v, ok := aggs["normalized_value_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.NormalizedValueAsString) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_children.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_children.go new file mode 100644 index 000000000..903e5461f --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_children.go @@ -0,0 +1,76 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ChildrenAggregation is a special single bucket aggregation that enables +// aggregating from buckets on parent document types to buckets on child documents. +// It is available from 1.4.0.Beta1 upwards. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-children-aggregation.html +type ChildrenAggregation struct { + typ string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewChildrenAggregation() *ChildrenAggregation { + return &ChildrenAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *ChildrenAggregation) Type(typ string) *ChildrenAggregation { + a.typ = typ + return a +} + +func (a *ChildrenAggregation) SubAggregation(name string, subAggregation Aggregation) *ChildrenAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *ChildrenAggregation) Meta(metaData map[string]interface{}) *ChildrenAggregation { + a.meta = metaData + return a +} + +func (a *ChildrenAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "to-answers" : { + // "children": { + // "type" : "answer" + // } + // } + // } + // } + // This method returns only the { "type" : ... } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["children"] = opts + opts["type"] = a.typ + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_children_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_children_test.go new file mode 100644 index 000000000..a305073f3 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_children_test.go @@ -0,0 +1,46 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestChildrenAggregation(t *testing.T) { + agg := NewChildrenAggregation().Type("answer") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"children":{"type":"answer"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestChildrenAggregationWithSubAggregation(t *testing.T) { + subAgg := NewTermsAggregation().Field("owner.display_name").Size(10) + agg := NewChildrenAggregation().Type("answer") + agg = agg.SubAggregation("top-names", subAgg) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"top-names":{"terms":{"field":"owner.display_name","size":10}}},"children":{"type":"answer"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_histogram.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_histogram.go new file mode 100644 index 000000000..231c51ef8 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_histogram.go @@ -0,0 +1,285 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// DateHistogramAggregation is a multi-bucket aggregation similar to the +// histogram except it can only be applied on date values. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html +type DateHistogramAggregation struct { + field string + script *Script + missing interface{} + subAggregations map[string]Aggregation + meta map[string]interface{} + + interval string + order string + orderAsc bool + minDocCount *int64 + extendedBoundsMin interface{} + extendedBoundsMax interface{} + timeZone string + format string + offset string +} + +// NewDateHistogramAggregation creates a new DateHistogramAggregation. +func NewDateHistogramAggregation() *DateHistogramAggregation { + return &DateHistogramAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +// Field on which the aggregation is processed. +func (a *DateHistogramAggregation) Field(field string) *DateHistogramAggregation { + a.field = field + return a +} + +func (a *DateHistogramAggregation) Script(script *Script) *DateHistogramAggregation { + a.script = script + return a +} + +// Missing configures the value to use when documents miss a value. +func (a *DateHistogramAggregation) Missing(missing interface{}) *DateHistogramAggregation { + a.missing = missing + return a +} + +func (a *DateHistogramAggregation) SubAggregation(name string, subAggregation Aggregation) *DateHistogramAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *DateHistogramAggregation) Meta(metaData map[string]interface{}) *DateHistogramAggregation { + a.meta = metaData + return a +} + +// Interval by which the aggregation gets processed. +// Allowed values are: "year", "quarter", "month", "week", "day", +// "hour", "minute". It also supports time settings like "1.5h" +// (up to "w" for weeks). +func (a *DateHistogramAggregation) Interval(interval string) *DateHistogramAggregation { + a.interval = interval + return a +} + +// Order specifies the sort order. Valid values for order are: +// "_key", "_count", a sub-aggregation name, or a sub-aggregation name +// with a metric. +func (a *DateHistogramAggregation) Order(order string, asc bool) *DateHistogramAggregation { + a.order = order + a.orderAsc = asc + return a +} + +func (a *DateHistogramAggregation) OrderByCount(asc bool) *DateHistogramAggregation { + // "order" : { "_count" : "asc" } + a.order = "_count" + a.orderAsc = asc + return a +} + +func (a *DateHistogramAggregation) OrderByCountAsc() *DateHistogramAggregation { + return a.OrderByCount(true) +} + +func (a *DateHistogramAggregation) OrderByCountDesc() *DateHistogramAggregation { + return a.OrderByCount(false) +} + +func (a *DateHistogramAggregation) OrderByKey(asc bool) *DateHistogramAggregation { + // "order" : { "_key" : "asc" } + a.order = "_key" + a.orderAsc = asc + return a +} + +func (a *DateHistogramAggregation) OrderByKeyAsc() *DateHistogramAggregation { + return a.OrderByKey(true) +} + +func (a *DateHistogramAggregation) OrderByKeyDesc() *DateHistogramAggregation { + return a.OrderByKey(false) +} + +// OrderByAggregation creates a bucket ordering strategy which sorts buckets +// based on a single-valued calc get. +func (a *DateHistogramAggregation) OrderByAggregation(aggName string, asc bool) *DateHistogramAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "avg_height" : "desc" } + // }, + // "aggs" : { + // "avg_height" : { "avg" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + a.orderAsc = asc + return a +} + +// OrderByAggregationAndMetric creates a bucket ordering strategy which +// sorts buckets based on a multi-valued calc get. +func (a *DateHistogramAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) *DateHistogramAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "height_stats.avg" : "desc" } + // }, + // "aggs" : { + // "height_stats" : { "stats" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + "." + metric + a.orderAsc = asc + return a +} + +// MinDocCount sets the minimum document count per bucket. +// Buckets with less documents than this min value will not be returned. +func (a *DateHistogramAggregation) MinDocCount(minDocCount int64) *DateHistogramAggregation { + a.minDocCount = &minDocCount + return a +} + +// TimeZone sets the timezone in which to translate dates before computing buckets. +func (a *DateHistogramAggregation) TimeZone(timeZone string) *DateHistogramAggregation { + a.timeZone = timeZone + return a +} + +// Format sets the format to use for dates. +func (a *DateHistogramAggregation) Format(format string) *DateHistogramAggregation { + a.format = format + return a +} + +// Offset sets the offset of time intervals in the histogram, e.g. "+6h". +func (a *DateHistogramAggregation) Offset(offset string) *DateHistogramAggregation { + a.offset = offset + return a +} + +// ExtendedBounds accepts int, int64, string, or time.Time values. +// In case the lower value in the histogram would be greater than min or the +// upper value would be less than max, empty buckets will be generated. +func (a *DateHistogramAggregation) ExtendedBounds(min, max interface{}) *DateHistogramAggregation { + a.extendedBoundsMin = min + a.extendedBoundsMax = max + return a +} + +// ExtendedBoundsMin accepts int, int64, string, or time.Time values. +func (a *DateHistogramAggregation) ExtendedBoundsMin(min interface{}) *DateHistogramAggregation { + a.extendedBoundsMin = min + return a +} + +// ExtendedBoundsMax accepts int, int64, string, or time.Time values. +func (a *DateHistogramAggregation) ExtendedBoundsMax(max interface{}) *DateHistogramAggregation { + a.extendedBoundsMax = max + return a +} + +func (a *DateHistogramAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "articles_over_time" : { + // "date_histogram" : { + // "field" : "date", + // "interval" : "month" + // } + // } + // } + // } + // + // This method returns only the { "date_histogram" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["date_histogram"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.missing != nil { + opts["missing"] = a.missing + } + + opts["interval"] = a.interval + if a.minDocCount != nil { + opts["min_doc_count"] = *a.minDocCount + } + if a.order != "" { + o := make(map[string]interface{}) + if a.orderAsc { + o[a.order] = "asc" + } else { + o[a.order] = "desc" + } + opts["order"] = o + } + if a.timeZone != "" { + opts["time_zone"] = a.timeZone + } + if a.offset != "" { + opts["offset"] = a.offset + } + if a.format != "" { + opts["format"] = a.format + } + if a.extendedBoundsMin != nil || a.extendedBoundsMax != nil { + bounds := make(map[string]interface{}) + if a.extendedBoundsMin != nil { + bounds["min"] = a.extendedBoundsMin + } + if a.extendedBoundsMax != nil { + bounds["max"] = a.extendedBoundsMax + } + opts["extended_bounds"] = bounds + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_histogram_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_histogram_test.go new file mode 100644 index 000000000..3c826ce9e --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_histogram_test.go @@ -0,0 +1,49 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestDateHistogramAggregation(t *testing.T) { + agg := NewDateHistogramAggregation(). + Field("date"). + Interval("month"). + Format("YYYY-MM"). + TimeZone("UTC"). + Offset("+6h") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"date_histogram":{"field":"date","format":"YYYY-MM","interval":"month","offset":"+6h","time_zone":"UTC"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestDateHistogramAggregationWithMissing(t *testing.T) { + agg := NewDateHistogramAggregation().Field("date").Interval("year").Missing("1900") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"date_histogram":{"field":"date","interval":"year","missing":"1900"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_range.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_range.go new file mode 100644 index 000000000..82de0696b --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_range.go @@ -0,0 +1,234 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "time" +) + +// DateRangeAggregation is a range aggregation that is dedicated for +// date values. The main difference between this aggregation and the +// normal range aggregation is that the from and to values can be expressed +// in Date Math expressions, and it is also possible to specify a +// date format by which the from and to response fields will be returned. +// Note that this aggregration includes the from value and excludes the to +// value for each range. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-daterange-aggregation.html +type DateRangeAggregation struct { + field string + script *Script + subAggregations map[string]Aggregation + meta map[string]interface{} + keyed *bool + unmapped *bool + format string + entries []DateRangeAggregationEntry +} + +type DateRangeAggregationEntry struct { + Key string + From interface{} + To interface{} +} + +func NewDateRangeAggregation() *DateRangeAggregation { + return &DateRangeAggregation{ + subAggregations: make(map[string]Aggregation), + entries: make([]DateRangeAggregationEntry, 0), + } +} + +func (a *DateRangeAggregation) Field(field string) *DateRangeAggregation { + a.field = field + return a +} + +func (a *DateRangeAggregation) Script(script *Script) *DateRangeAggregation { + a.script = script + return a +} + +func (a *DateRangeAggregation) SubAggregation(name string, subAggregation Aggregation) *DateRangeAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *DateRangeAggregation) Meta(metaData map[string]interface{}) *DateRangeAggregation { + a.meta = metaData + return a +} + +func (a *DateRangeAggregation) Keyed(keyed bool) *DateRangeAggregation { + a.keyed = &keyed + return a +} + +func (a *DateRangeAggregation) Unmapped(unmapped bool) *DateRangeAggregation { + a.unmapped = &unmapped + return a +} + +func (a *DateRangeAggregation) Format(format string) *DateRangeAggregation { + a.format = format + return a +} + +func (a *DateRangeAggregation) AddRange(from, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: to}) + return a +} + +func (a *DateRangeAggregation) AddRangeWithKey(key string, from, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: to}) + return a +} + +func (a *DateRangeAggregation) AddUnboundedTo(from interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: nil}) + return a +} + +func (a *DateRangeAggregation) AddUnboundedToWithKey(key string, from interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: nil}) + return a +} + +func (a *DateRangeAggregation) AddUnboundedFrom(to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: nil, To: to}) + return a +} + +func (a *DateRangeAggregation) AddUnboundedFromWithKey(key string, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: nil, To: to}) + return a +} + +func (a *DateRangeAggregation) Lt(to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: nil, To: to}) + return a +} + +func (a *DateRangeAggregation) LtWithKey(key string, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: nil, To: to}) + return a +} + +func (a *DateRangeAggregation) Between(from, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: to}) + return a +} + +func (a *DateRangeAggregation) BetweenWithKey(key string, from, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: to}) + return a +} + +func (a *DateRangeAggregation) Gt(from interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: nil}) + return a +} + +func (a *DateRangeAggregation) GtWithKey(key string, from interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: nil}) + return a +} + +func (a *DateRangeAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "range" : { + // "date_range": { + // "field": "date", + // "format": "MM-yyy", + // "ranges": [ + // { "to": "now-10M/M" }, + // { "from": "now-10M/M" } + // ] + // } + // } + // } + // } + // } + // + // This method returns only the { "date_range" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["date_range"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + + if a.keyed != nil { + opts["keyed"] = *a.keyed + } + if a.unmapped != nil { + opts["unmapped"] = *a.unmapped + } + if a.format != "" { + opts["format"] = a.format + } + + ranges := make([]interface{}, 0) + for _, ent := range a.entries { + r := make(map[string]interface{}) + if ent.Key != "" { + r["key"] = ent.Key + } + if ent.From != nil { + switch from := ent.From.(type) { + case int, int16, int32, int64, float32, float64: + r["from"] = from + case time.Time: + r["from"] = from.Format(time.RFC3339) + case string: + r["from"] = from + } + } + if ent.To != nil { + switch to := ent.To.(type) { + case int, int16, int32, int64, float32, float64: + r["to"] = to + case time.Time: + r["to"] = to.Format(time.RFC3339) + case string: + r["to"] = to + } + } + ranges = append(ranges, r) + } + opts["ranges"] = ranges + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_range_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_range_test.go new file mode 100644 index 000000000..42c525121 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_range_test.go @@ -0,0 +1,130 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestDateRangeAggregation(t *testing.T) { + agg := NewDateRangeAggregation().Field("created_at") + agg = agg.AddRange(nil, "2012-12-31") + agg = agg.AddRange("2013-01-01", "2013-12-31") + agg = agg.AddRange("2014-01-01", nil) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestDateRangeAggregationWithUnbounded(t *testing.T) { + agg := NewDateRangeAggregation().Field("created_at"). + AddUnboundedFrom("2012-12-31"). + AddRange("2013-01-01", "2013-12-31"). + AddUnboundedTo("2014-01-01") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestDateRangeAggregationWithLtAndCo(t *testing.T) { + agg := NewDateRangeAggregation().Field("created_at"). + Lt("2012-12-31"). + Between("2013-01-01", "2013-12-31"). + Gt("2014-01-01") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestDateRangeAggregationWithKeyedFlag(t *testing.T) { + agg := NewDateRangeAggregation().Field("created_at"). + Keyed(true). + Lt("2012-12-31"). + Between("2013-01-01", "2013-12-31"). + Gt("2014-01-01") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"date_range":{"field":"created_at","keyed":true,"ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestDateRangeAggregationWithKeys(t *testing.T) { + agg := NewDateRangeAggregation().Field("created_at"). + Keyed(true). + LtWithKey("pre-2012", "2012-12-31"). + BetweenWithKey("2013", "2013-01-01", "2013-12-31"). + GtWithKey("post-2013", "2014-01-01") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"date_range":{"field":"created_at","keyed":true,"ranges":[{"key":"pre-2012","to":"2012-12-31"},{"from":"2013-01-01","key":"2013","to":"2013-12-31"},{"from":"2014-01-01","key":"post-2013"}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestDateRangeAggregationWithSpecialNames(t *testing.T) { + agg := NewDateRangeAggregation().Field("created_at"). + AddRange("now-10M/M", "now+10M/M") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"date_range":{"field":"created_at","ranges":[{"from":"now-10M/M","to":"now+10M/M"}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filter.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filter.go new file mode 100644 index 000000000..101399882 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filter.go @@ -0,0 +1,77 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// FilterAggregation defines a single bucket of all the documents +// in the current document set context that match a specified filter. +// Often this will be used to narrow down the current aggregation context +// to a specific set of documents. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filter-aggregation.html +type FilterAggregation struct { + filter Query + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewFilterAggregation() *FilterAggregation { + return &FilterAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *FilterAggregation) SubAggregation(name string, subAggregation Aggregation) *FilterAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *FilterAggregation) Meta(metaData map[string]interface{}) *FilterAggregation { + a.meta = metaData + return a +} + +func (a *FilterAggregation) Filter(filter Query) *FilterAggregation { + a.filter = filter + return a +} + +func (a *FilterAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "in_stock_products" : { + // "filter" : { "range" : { "stock" : { "gt" : 0 } } } + // } + // } + // } + // This method returns only the { "filter" : {} } part. + + src, err := a.filter.Source() + if err != nil { + return nil, err + } + source := make(map[string]interface{}) + source["filter"] = src + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filter_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filter_test.go new file mode 100644 index 000000000..5c6262a26 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filter_test.go @@ -0,0 +1,66 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestFilterAggregation(t *testing.T) { + filter := NewRangeQuery("stock").Gt(0) + agg := NewFilterAggregation().Filter(filter) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"filter":{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFilterAggregationWithSubAggregation(t *testing.T) { + avgPriceAgg := NewAvgAggregation().Field("price") + filter := NewRangeQuery("stock").Gt(0) + agg := NewFilterAggregation().Filter(filter). + SubAggregation("avg_price", avgPriceAgg) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"avg_price":{"avg":{"field":"price"}}},"filter":{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFilterAggregationWithMeta(t *testing.T) { + filter := NewRangeQuery("stock").Gt(0) + agg := NewFilterAggregation().Filter(filter).Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"filter":{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},"meta":{"name":"Oliver"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filters.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filters.go new file mode 100644 index 000000000..6dda39c61 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filters.go @@ -0,0 +1,96 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// FiltersAggregation defines a multi bucket aggregations where each bucket +// is associated with a filter. Each bucket will collect all documents that +// match its associated filter. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filters-aggregation.html +type FiltersAggregation struct { + filters []Query + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewFiltersAggregation() *FiltersAggregation { + return &FiltersAggregation{ + filters: make([]Query, 0), + subAggregations: make(map[string]Aggregation), + } +} + +func (a *FiltersAggregation) Filter(filter Query) *FiltersAggregation { + a.filters = append(a.filters, filter) + return a +} + +func (a *FiltersAggregation) Filters(filters ...Query) *FiltersAggregation { + if len(filters) > 0 { + a.filters = append(a.filters, filters...) + } + return a +} + +func (a *FiltersAggregation) SubAggregation(name string, subAggregation Aggregation) *FiltersAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *FiltersAggregation) Meta(metaData map[string]interface{}) *FiltersAggregation { + a.meta = metaData + return a +} + +func (a *FiltersAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "messages" : { + // "filters" : { + // "filters" : { + // "errors" : { "term" : { "body" : "error" }}, + // "warnings" : { "term" : { "body" : "warning" }} + // } + // } + // } + // } + // } + // This method returns only the (outer) { "filters" : {} } part. + + source := make(map[string]interface{}) + filters := make(map[string]interface{}) + source["filters"] = filters + + arr := make([]interface{}, len(a.filters)) + for i, filter := range a.filters { + src, err := filter.Source() + if err != nil { + return nil, err + } + arr[i] = src + } + filters["filters"] = arr + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filters_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filters_test.go new file mode 100644 index 000000000..4977d5162 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filters_test.go @@ -0,0 +1,68 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestFiltersAggregation(t *testing.T) { + f1 := NewRangeQuery("stock").Gt(0) + f2 := NewTermQuery("symbol", "GOOG") + agg := NewFiltersAggregation().Filters(f1, f2) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"filters":{"filters":[{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},{"term":{"symbol":"GOOG"}}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFiltersAggregationWithSubAggregation(t *testing.T) { + avgPriceAgg := NewAvgAggregation().Field("price") + f1 := NewRangeQuery("stock").Gt(0) + f2 := NewTermQuery("symbol", "GOOG") + agg := NewFiltersAggregation().Filters(f1, f2).SubAggregation("avg_price", avgPriceAgg) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"avg_price":{"avg":{"field":"price"}}},"filters":{"filters":[{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},{"term":{"symbol":"GOOG"}}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFiltersAggregationWithMetaData(t *testing.T) { + f1 := NewRangeQuery("stock").Gt(0) + f2 := NewTermQuery("symbol", "GOOG") + agg := NewFiltersAggregation().Filters(f1, f2).Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"filters":{"filters":[{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},{"term":{"symbol":"GOOG"}}]},"meta":{"name":"Oliver"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_geo_distance.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_geo_distance.go new file mode 100644 index 000000000..3a1372221 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_geo_distance.go @@ -0,0 +1,194 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// GeoDistanceAggregation is a multi-bucket aggregation that works on geo_point fields +// and conceptually works very similar to the range aggregation. +// The user can define a point of origin and a set of distance range buckets. +// The aggregation evaluate the distance of each document value from +// the origin point and determines the buckets it belongs to based on +// the ranges (a document belongs to a bucket if the distance between the +// document and the origin falls within the distance range of the bucket). +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-aggregations-bucket-geodistance-aggregation.html +type GeoDistanceAggregation struct { + field string + unit string + distanceType string + point string + ranges []geoDistAggRange + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +type geoDistAggRange struct { + Key string + From interface{} + To interface{} +} + +func NewGeoDistanceAggregation() *GeoDistanceAggregation { + return &GeoDistanceAggregation{ + subAggregations: make(map[string]Aggregation), + ranges: make([]geoDistAggRange, 0), + } +} + +func (a *GeoDistanceAggregation) Field(field string) *GeoDistanceAggregation { + a.field = field + return a +} + +func (a *GeoDistanceAggregation) Unit(unit string) *GeoDistanceAggregation { + a.unit = unit + return a +} + +func (a *GeoDistanceAggregation) DistanceType(distanceType string) *GeoDistanceAggregation { + a.distanceType = distanceType + return a +} + +func (a *GeoDistanceAggregation) Point(latLon string) *GeoDistanceAggregation { + a.point = latLon + return a +} + +func (a *GeoDistanceAggregation) SubAggregation(name string, subAggregation Aggregation) *GeoDistanceAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *GeoDistanceAggregation) Meta(metaData map[string]interface{}) *GeoDistanceAggregation { + a.meta = metaData + return a +} +func (a *GeoDistanceAggregation) AddRange(from, to interface{}) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{From: from, To: to}) + return a +} + +func (a *GeoDistanceAggregation) AddRangeWithKey(key string, from, to interface{}) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: to}) + return a +} + +func (a *GeoDistanceAggregation) AddUnboundedTo(from float64) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{From: from, To: nil}) + return a +} + +func (a *GeoDistanceAggregation) AddUnboundedToWithKey(key string, from float64) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: nil}) + return a +} + +func (a *GeoDistanceAggregation) AddUnboundedFrom(to float64) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{From: nil, To: to}) + return a +} + +func (a *GeoDistanceAggregation) AddUnboundedFromWithKey(key string, to float64) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: nil, To: to}) + return a +} + +func (a *GeoDistanceAggregation) Between(from, to interface{}) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{From: from, To: to}) + return a +} + +func (a *GeoDistanceAggregation) BetweenWithKey(key string, from, to interface{}) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: to}) + return a +} + +func (a *GeoDistanceAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "rings_around_amsterdam" : { + // "geo_distance" : { + // "field" : "location", + // "origin" : "52.3760, 4.894", + // "ranges" : [ + // { "to" : 100 }, + // { "from" : 100, "to" : 300 }, + // { "from" : 300 } + // ] + // } + // } + // } + // } + // + // This method returns only the { "range" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["geo_distance"] = opts + + if a.field != "" { + opts["field"] = a.field + } + if a.unit != "" { + opts["unit"] = a.unit + } + if a.distanceType != "" { + opts["distance_type"] = a.distanceType + } + if a.point != "" { + opts["origin"] = a.point + } + + ranges := make([]interface{}, 0) + for _, ent := range a.ranges { + r := make(map[string]interface{}) + if ent.Key != "" { + r["key"] = ent.Key + } + if ent.From != nil { + switch from := ent.From.(type) { + case int, int16, int32, int64, float32, float64: + r["from"] = from + case *int, *int16, *int32, *int64, *float32, *float64: + r["from"] = from + case string: + r["from"] = from + } + } + if ent.To != nil { + switch to := ent.To.(type) { + case int, int16, int32, int64, float32, float64: + r["to"] = to + case *int, *int16, *int32, *int64, *float32, *float64: + r["to"] = to + case string: + r["to"] = to + } + } + ranges = append(ranges, r) + } + opts["ranges"] = ranges + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_geo_distance_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_geo_distance_test.go new file mode 100644 index 000000000..4cb0cd9f8 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_geo_distance_test.go @@ -0,0 +1,71 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestGeoDistanceAggregation(t *testing.T) { + agg := NewGeoDistanceAggregation().Field("location").Point("52.3760, 4.894") + agg = agg.AddRange(nil, 100) + agg = agg.AddRange(100, 300) + agg = agg.AddRange(300, nil) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_distance":{"field":"location","origin":"52.3760, 4.894","ranges":[{"to":100},{"from":100,"to":300},{"from":300}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoDistanceAggregationWithUnbounded(t *testing.T) { + agg := NewGeoDistanceAggregation().Field("location").Point("52.3760, 4.894") + agg = agg.AddUnboundedFrom(100) + agg = agg.AddRange(100, 300) + agg = agg.AddUnboundedTo(300) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_distance":{"field":"location","origin":"52.3760, 4.894","ranges":[{"to":100},{"from":100,"to":300},{"from":300}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoDistanceAggregationWithMetaData(t *testing.T) { + agg := NewGeoDistanceAggregation().Field("location").Point("52.3760, 4.894") + agg = agg.AddRange(nil, 100) + agg = agg.AddRange(100, 300) + agg = agg.AddRange(300, nil) + agg = agg.Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_distance":{"field":"location","origin":"52.3760, 4.894","ranges":[{"to":100},{"from":100,"to":300},{"from":300}]},"meta":{"name":"Oliver"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_global.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_global.go new file mode 100644 index 000000000..49e24d60f --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_global.go @@ -0,0 +1,71 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// GlobalAggregation defines a single bucket of all the documents within +// the search execution context. This context is defined by the indices +// and the document types you’re searching on, but is not influenced +// by the search query itself. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-global-aggregation.html +type GlobalAggregation struct { + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewGlobalAggregation() *GlobalAggregation { + return &GlobalAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *GlobalAggregation) SubAggregation(name string, subAggregation Aggregation) *GlobalAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *GlobalAggregation) Meta(metaData map[string]interface{}) *GlobalAggregation { + a.meta = metaData + return a +} + +func (a *GlobalAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "all_products" : { + // "global" : {}, + // "aggs" : { + // "avg_price" : { "avg" : { "field" : "price" } } + // } + // } + // } + // } + // This method returns only the { "global" : {} } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["global"] = opts + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_global_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_global_test.go new file mode 100644 index 000000000..8b55010c7 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_global_test.go @@ -0,0 +1,44 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestGlobalAggregation(t *testing.T) { + agg := NewGlobalAggregation() + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"global":{}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGlobalAggregationWithMetaData(t *testing.T) { + agg := NewGlobalAggregation().Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"global":{},"meta":{"name":"Oliver"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_histogram.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_histogram.go new file mode 100644 index 000000000..7821adbc0 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_histogram.go @@ -0,0 +1,253 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// HistogramAggregation is a multi-bucket values source based aggregation +// that can be applied on numeric values extracted from the documents. +// It dynamically builds fixed size (a.k.a. interval) buckets over the +// values. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-histogram-aggregation.html +type HistogramAggregation struct { + field string + script *Script + missing interface{} + subAggregations map[string]Aggregation + meta map[string]interface{} + + interval int64 + order string + orderAsc bool + minDocCount *int64 + extendedBoundsMin *int64 + extendedBoundsMax *int64 + offset *int64 +} + +func NewHistogramAggregation() *HistogramAggregation { + return &HistogramAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *HistogramAggregation) Field(field string) *HistogramAggregation { + a.field = field + return a +} + +func (a *HistogramAggregation) Script(script *Script) *HistogramAggregation { + a.script = script + return a +} + +// Missing configures the value to use when documents miss a value. +func (a *HistogramAggregation) Missing(missing interface{}) *HistogramAggregation { + a.missing = missing + return a +} + +func (a *HistogramAggregation) SubAggregation(name string, subAggregation Aggregation) *HistogramAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *HistogramAggregation) Meta(metaData map[string]interface{}) *HistogramAggregation { + a.meta = metaData + return a +} + +func (a *HistogramAggregation) Interval(interval int64) *HistogramAggregation { + a.interval = interval + return a +} + +// Order specifies the sort order. Valid values for order are: +// "_key", "_count", a sub-aggregation name, or a sub-aggregation name +// with a metric. +func (a *HistogramAggregation) Order(order string, asc bool) *HistogramAggregation { + a.order = order + a.orderAsc = asc + return a +} + +func (a *HistogramAggregation) OrderByCount(asc bool) *HistogramAggregation { + // "order" : { "_count" : "asc" } + a.order = "_count" + a.orderAsc = asc + return a +} + +func (a *HistogramAggregation) OrderByCountAsc() *HistogramAggregation { + return a.OrderByCount(true) +} + +func (a *HistogramAggregation) OrderByCountDesc() *HistogramAggregation { + return a.OrderByCount(false) +} + +func (a *HistogramAggregation) OrderByKey(asc bool) *HistogramAggregation { + // "order" : { "_key" : "asc" } + a.order = "_key" + a.orderAsc = asc + return a +} + +func (a *HistogramAggregation) OrderByKeyAsc() *HistogramAggregation { + return a.OrderByKey(true) +} + +func (a *HistogramAggregation) OrderByKeyDesc() *HistogramAggregation { + return a.OrderByKey(false) +} + +// OrderByAggregation creates a bucket ordering strategy which sorts buckets +// based on a single-valued calc get. +func (a *HistogramAggregation) OrderByAggregation(aggName string, asc bool) *HistogramAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "avg_height" : "desc" } + // }, + // "aggs" : { + // "avg_height" : { "avg" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + a.orderAsc = asc + return a +} + +// OrderByAggregationAndMetric creates a bucket ordering strategy which +// sorts buckets based on a multi-valued calc get. +func (a *HistogramAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) *HistogramAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "height_stats.avg" : "desc" } + // }, + // "aggs" : { + // "height_stats" : { "stats" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + "." + metric + a.orderAsc = asc + return a +} + +func (a *HistogramAggregation) MinDocCount(minDocCount int64) *HistogramAggregation { + a.minDocCount = &minDocCount + return a +} + +func (a *HistogramAggregation) ExtendedBounds(min, max int64) *HistogramAggregation { + a.extendedBoundsMin = &min + a.extendedBoundsMax = &max + return a +} + +func (a *HistogramAggregation) ExtendedBoundsMin(min int64) *HistogramAggregation { + a.extendedBoundsMin = &min + return a +} + +func (a *HistogramAggregation) ExtendedBoundsMax(max int64) *HistogramAggregation { + a.extendedBoundsMax = &max + return a +} + +func (a *HistogramAggregation) Offset(offset int64) *HistogramAggregation { + a.offset = &offset + return a +} + +func (a *HistogramAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "prices" : { + // "histogram" : { + // "field" : "price", + // "interval" : 50 + // } + // } + // } + // } + // + // This method returns only the { "histogram" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["histogram"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.missing != nil { + opts["missing"] = a.missing + } + + opts["interval"] = a.interval + if a.order != "" { + o := make(map[string]interface{}) + if a.orderAsc { + o[a.order] = "asc" + } else { + o[a.order] = "desc" + } + opts["order"] = o + } + if a.offset != nil { + opts["offset"] = *a.offset + } + if a.minDocCount != nil { + opts["min_doc_count"] = *a.minDocCount + } + if a.extendedBoundsMin != nil || a.extendedBoundsMax != nil { + bounds := make(map[string]interface{}) + if a.extendedBoundsMin != nil { + bounds["min"] = a.extendedBoundsMin + } + if a.extendedBoundsMax != nil { + bounds["max"] = a.extendedBoundsMax + } + opts["extended_bounds"] = bounds + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_histogram_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_histogram_test.go new file mode 100644 index 000000000..6a5d5fb92 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_histogram_test.go @@ -0,0 +1,61 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestHistogramAggregation(t *testing.T) { + agg := NewHistogramAggregation().Field("price").Interval(50) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"histogram":{"field":"price","interval":50}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestHistogramAggregationWithMetaData(t *testing.T) { + agg := NewHistogramAggregation().Field("price").Offset(10).Interval(50).Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"histogram":{"field":"price","interval":50,"offset":10},"meta":{"name":"Oliver"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestHistogramAggregationWithMissing(t *testing.T) { + agg := NewHistogramAggregation().Field("price").Interval(50).Missing("n/a") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"histogram":{"field":"price","interval":50,"missing":"n/a"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_missing.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_missing.go new file mode 100644 index 000000000..ca610c953 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_missing.go @@ -0,0 +1,81 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MissingAggregation is a field data based single bucket aggregation, +// that creates a bucket of all documents in the current document set context +// that are missing a field value (effectively, missing a field or having +// the configured NULL value set). This aggregator will often be used in +// conjunction with other field data bucket aggregators (such as ranges) +// to return information for all the documents that could not be placed +// in any of the other buckets due to missing field data values. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-missing-aggregation.html +type MissingAggregation struct { + field string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewMissingAggregation() *MissingAggregation { + return &MissingAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *MissingAggregation) Field(field string) *MissingAggregation { + a.field = field + return a +} + +func (a *MissingAggregation) SubAggregation(name string, subAggregation Aggregation) *MissingAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MissingAggregation) Meta(metaData map[string]interface{}) *MissingAggregation { + a.meta = metaData + return a +} + +func (a *MissingAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "products_without_a_price" : { + // "missing" : { "field" : "price" } + // } + // } + // } + // This method returns only the { "missing" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["missing"] = opts + + if a.field != "" { + opts["field"] = a.field + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_missing_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_missing_test.go new file mode 100644 index 000000000..b52a96511 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_missing_test.go @@ -0,0 +1,44 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMissingAggregation(t *testing.T) { + agg := NewMissingAggregation().Field("price") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"missing":{"field":"price"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMissingAggregationWithMetaData(t *testing.T) { + agg := NewMissingAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"missing":{"field":"price"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_nested.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_nested.go new file mode 100644 index 000000000..f65da8048 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_nested.go @@ -0,0 +1,82 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// NestedAggregation is a special single bucket aggregation that enables +// aggregating nested documents. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-aggregations-bucket-nested-aggregation.html +type NestedAggregation struct { + path string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewNestedAggregation() *NestedAggregation { + return &NestedAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *NestedAggregation) SubAggregation(name string, subAggregation Aggregation) *NestedAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *NestedAggregation) Meta(metaData map[string]interface{}) *NestedAggregation { + a.meta = metaData + return a +} + +func (a *NestedAggregation) Path(path string) *NestedAggregation { + a.path = path + return a +} + +func (a *NestedAggregation) Source() (interface{}, error) { + // Example: + // { + // "query" : { + // "match" : { "name" : "led tv" } + // } + // "aggs" : { + // "resellers" : { + // "nested" : { + // "path" : "resellers" + // }, + // "aggs" : { + // "min_price" : { "min" : { "field" : "resellers.price" } } + // } + // } + // } + // } + // This method returns only the { "nested" : {} } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["nested"] = opts + + opts["path"] = a.path + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_nested_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_nested_test.go new file mode 100644 index 000000000..c55612f07 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_nested_test.go @@ -0,0 +1,62 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestNestedAggregation(t *testing.T) { + agg := NewNestedAggregation().Path("resellers") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"nested":{"path":"resellers"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestNestedAggregationWithSubAggregation(t *testing.T) { + minPriceAgg := NewMinAggregation().Field("resellers.price") + agg := NewNestedAggregation().Path("resellers").SubAggregation("min_price", minPriceAgg) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"min_price":{"min":{"field":"resellers.price"}}},"nested":{"path":"resellers"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestNestedAggregationWithMetaData(t *testing.T) { + agg := NewNestedAggregation().Path("resellers").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"nested":{"path":"resellers"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_range.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_range.go new file mode 100644 index 000000000..bc017c60f --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_range.go @@ -0,0 +1,232 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "time" +) + +// RangeAggregation is a multi-bucket value source based aggregation that +// enables the user to define a set of ranges - each representing a bucket. +// During the aggregation process, the values extracted from each document +// will be checked against each bucket range and "bucket" the +// relevant/matching document. Note that this aggregration includes the +// from value and excludes the to value for each range. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-range-aggregation.html +type RangeAggregation struct { + field string + script *Script + missing interface{} + subAggregations map[string]Aggregation + meta map[string]interface{} + keyed *bool + unmapped *bool + entries []rangeAggregationEntry +} + +type rangeAggregationEntry struct { + Key string + From interface{} + To interface{} +} + +func NewRangeAggregation() *RangeAggregation { + return &RangeAggregation{ + subAggregations: make(map[string]Aggregation), + entries: make([]rangeAggregationEntry, 0), + } +} + +func (a *RangeAggregation) Field(field string) *RangeAggregation { + a.field = field + return a +} + +func (a *RangeAggregation) Script(script *Script) *RangeAggregation { + a.script = script + return a +} + +// Missing configures the value to use when documents miss a value. +func (a *RangeAggregation) Missing(missing interface{}) *RangeAggregation { + a.missing = missing + return a +} + +func (a *RangeAggregation) SubAggregation(name string, subAggregation Aggregation) *RangeAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *RangeAggregation) Meta(metaData map[string]interface{}) *RangeAggregation { + a.meta = metaData + return a +} + +func (a *RangeAggregation) Keyed(keyed bool) *RangeAggregation { + a.keyed = &keyed + return a +} + +func (a *RangeAggregation) Unmapped(unmapped bool) *RangeAggregation { + a.unmapped = &unmapped + return a +} + +func (a *RangeAggregation) AddRange(from, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: from, To: to}) + return a +} + +func (a *RangeAggregation) AddRangeWithKey(key string, from, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: to}) + return a +} + +func (a *RangeAggregation) AddUnboundedTo(from interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: from, To: nil}) + return a +} + +func (a *RangeAggregation) AddUnboundedToWithKey(key string, from interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: nil}) + return a +} + +func (a *RangeAggregation) AddUnboundedFrom(to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: nil, To: to}) + return a +} + +func (a *RangeAggregation) AddUnboundedFromWithKey(key string, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: nil, To: to}) + return a +} + +func (a *RangeAggregation) Lt(to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: nil, To: to}) + return a +} + +func (a *RangeAggregation) LtWithKey(key string, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: nil, To: to}) + return a +} + +func (a *RangeAggregation) Between(from, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: from, To: to}) + return a +} + +func (a *RangeAggregation) BetweenWithKey(key string, from, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: to}) + return a +} + +func (a *RangeAggregation) Gt(from interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: from, To: nil}) + return a +} + +func (a *RangeAggregation) GtWithKey(key string, from interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: nil}) + return a +} + +func (a *RangeAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "price_ranges" : { + // "range" : { + // "field" : "price", + // "ranges" : [ + // { "to" : 50 }, + // { "from" : 50, "to" : 100 }, + // { "from" : 100 } + // ] + // } + // } + // } + // } + // + // This method returns only the { "range" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["range"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.missing != nil { + opts["missing"] = a.missing + } + + if a.keyed != nil { + opts["keyed"] = *a.keyed + } + if a.unmapped != nil { + opts["unmapped"] = *a.unmapped + } + + ranges := make([]interface{}, 0) + for _, ent := range a.entries { + r := make(map[string]interface{}) + if ent.Key != "" { + r["key"] = ent.Key + } + if ent.From != nil { + switch from := ent.From.(type) { + case int, int16, int32, int64, float32, float64: + r["from"] = from + case time.Time: + r["from"] = from.Format(time.RFC3339) + case string: + r["from"] = from + } + } + if ent.To != nil { + switch to := ent.To.(type) { + case int, int16, int32, int64, float32, float64: + r["to"] = to + case time.Time: + r["to"] = to.Format(time.RFC3339) + case string: + r["to"] = to + } + } + ranges = append(ranges, r) + } + opts["ranges"] = ranges + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_range_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_range_test.go new file mode 100644 index 000000000..f0fd5f5fd --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_range_test.go @@ -0,0 +1,156 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestRangeAggregation(t *testing.T) { + agg := NewRangeAggregation().Field("price") + agg = agg.AddRange(nil, 50) + agg = agg.AddRange(50, 100) + agg = agg.AddRange(100, nil) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"range":{"field":"price","ranges":[{"to":50},{"from":50,"to":100},{"from":100}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestRangeAggregationWithUnbounded(t *testing.T) { + agg := NewRangeAggregation().Field("field_name"). + AddUnboundedFrom(50). + AddRange(20, 70). + AddRange(70, 120). + AddUnboundedTo(150) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"range":{"field":"field_name","ranges":[{"to":50},{"from":20,"to":70},{"from":70,"to":120},{"from":150}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestRangeAggregationWithLtAndCo(t *testing.T) { + agg := NewRangeAggregation().Field("field_name"). + Lt(50). + Between(20, 70). + Between(70, 120). + Gt(150) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"range":{"field":"field_name","ranges":[{"to":50},{"from":20,"to":70},{"from":70,"to":120},{"from":150}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestRangeAggregationWithKeyedFlag(t *testing.T) { + agg := NewRangeAggregation().Field("field_name"). + Keyed(true). + Lt(50). + Between(20, 70). + Between(70, 120). + Gt(150) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"range":{"field":"field_name","keyed":true,"ranges":[{"to":50},{"from":20,"to":70},{"from":70,"to":120},{"from":150}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestRangeAggregationWithKeys(t *testing.T) { + agg := NewRangeAggregation().Field("field_name"). + Keyed(true). + LtWithKey("cheap", 50). + BetweenWithKey("affordable", 20, 70). + BetweenWithKey("average", 70, 120). + GtWithKey("expensive", 150) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"range":{"field":"field_name","keyed":true,"ranges":[{"key":"cheap","to":50},{"from":20,"key":"affordable","to":70},{"from":70,"key":"average","to":120},{"from":150,"key":"expensive"}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestRangeAggregationWithMetaData(t *testing.T) { + agg := NewRangeAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"}) + agg = agg.AddRange(nil, 50) + agg = agg.AddRange(50, 100) + agg = agg.AddRange(100, nil) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"range":{"field":"price","ranges":[{"to":50},{"from":50,"to":100},{"from":100}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestRangeAggregationWithMissing(t *testing.T) { + agg := NewRangeAggregation().Field("price").Missing(0) + agg = agg.AddRange(nil, 50) + agg = agg.AddRange(50, 100) + agg = agg.AddRange(100, nil) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"range":{"field":"price","missing":0,"ranges":[{"to":50},{"from":50,"to":100},{"from":100}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_sampler.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_sampler.go new file mode 100644 index 000000000..9a6df15ec --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_sampler.go @@ -0,0 +1,145 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SamplerAggregation is a filtering aggregation used to limit any +// sub aggregations' processing to a sample of the top-scoring documents. +// Optionally, diversity settings can be used to limit the number of matches +// that share a common value such as an "author". +// See: https://www.elastic.co/guide/en/elasticsearch/reference/2.x/search-aggregations-bucket-sampler-aggregation.html +type SamplerAggregation struct { + field string + script *Script + missing interface{} + subAggregations map[string]Aggregation + meta map[string]interface{} + + shardSize int + maxDocsPerValue int + executionHint string +} + +func NewSamplerAggregation() *SamplerAggregation { + return &SamplerAggregation{ + shardSize: -1, + maxDocsPerValue: -1, + subAggregations: make(map[string]Aggregation), + } +} + +func (a *SamplerAggregation) Field(field string) *SamplerAggregation { + a.field = field + return a +} + +func (a *SamplerAggregation) Script(script *Script) *SamplerAggregation { + a.script = script + return a +} + +// Missing configures the value to use when documents miss a value. +func (a *SamplerAggregation) Missing(missing interface{}) *SamplerAggregation { + a.missing = missing + return a +} + +func (a *SamplerAggregation) SubAggregation(name string, subAggregation Aggregation) *SamplerAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *SamplerAggregation) Meta(metaData map[string]interface{}) *SamplerAggregation { + a.meta = metaData + return a +} + +// ShardSize sets the maximum number of docs returned from each shard. +func (a *SamplerAggregation) ShardSize(shardSize int) *SamplerAggregation { + a.shardSize = shardSize + return a +} + +func (a *SamplerAggregation) MaxDocsPerValue(maxDocsPerValue int) *SamplerAggregation { + a.maxDocsPerValue = maxDocsPerValue + return a +} + +func (a *SamplerAggregation) ExecutionHint(hint string) *SamplerAggregation { + a.executionHint = hint + return a +} + +func (a *SamplerAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "sample" : { + // "sampler" : { + // "field" : "user.id", + // "shard_size" : 200 + // }, + // "aggs": { + // "keywords": { + // "significant_terms": { + // "field": "text" + // } + // } + // } + // } + // } + // } + // + // This method returns only the { "sampler" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["sampler"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.missing != nil { + opts["missing"] = a.missing + } + + if a.shardSize >= 0 { + opts["shard_size"] = a.shardSize + } + if a.maxDocsPerValue >= 0 { + opts["max_docs_per_value"] = a.maxDocsPerValue + } + if a.executionHint != "" { + opts["execution_hint"] = a.executionHint + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_sampler_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_sampler_test.go new file mode 100644 index 000000000..da4ca5534 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_sampler_test.go @@ -0,0 +1,52 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSamplerAggregation(t *testing.T) { + keywordsAgg := NewSignificantTermsAggregation().Field("text") + agg := NewSamplerAggregation(). + Field("user.id"). + ShardSize(200). + SubAggregation("keywords", keywordsAgg) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"keywords":{"significant_terms":{"field":"text"}}},"sampler":{"field":"user.id","shard_size":200}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSamplerAggregationWithMissing(t *testing.T) { + keywordsAgg := NewSignificantTermsAggregation().Field("text") + agg := NewSamplerAggregation(). + Field("user.id"). + Missing("n/a"). + SubAggregation("keywords", keywordsAgg) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"keywords":{"significant_terms":{"field":"text"}}},"sampler":{"field":"user.id","missing":"n/a"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_significant_terms.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_significant_terms.go new file mode 100644 index 000000000..1008887f0 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_significant_terms.go @@ -0,0 +1,141 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SignificantSignificantTermsAggregation is an aggregation that returns interesting +// or unusual occurrences of terms in a set. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html +type SignificantTermsAggregation struct { + field string + subAggregations map[string]Aggregation + meta map[string]interface{} + + minDocCount *int + shardMinDocCount *int + requiredSize *int + shardSize *int + filter Query + executionHint string +} + +func NewSignificantTermsAggregation() *SignificantTermsAggregation { + return &SignificantTermsAggregation{ + subAggregations: make(map[string]Aggregation, 0), + } +} + +func (a *SignificantTermsAggregation) Field(field string) *SignificantTermsAggregation { + a.field = field + return a +} + +func (a *SignificantTermsAggregation) SubAggregation(name string, subAggregation Aggregation) *SignificantTermsAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *SignificantTermsAggregation) Meta(metaData map[string]interface{}) *SignificantTermsAggregation { + a.meta = metaData + return a +} + +func (a *SignificantTermsAggregation) MinDocCount(minDocCount int) *SignificantTermsAggregation { + a.minDocCount = &minDocCount + return a +} + +func (a *SignificantTermsAggregation) ShardMinDocCount(shardMinDocCount int) *SignificantTermsAggregation { + a.shardMinDocCount = &shardMinDocCount + return a +} + +func (a *SignificantTermsAggregation) RequiredSize(requiredSize int) *SignificantTermsAggregation { + a.requiredSize = &requiredSize + return a +} + +func (a *SignificantTermsAggregation) ShardSize(shardSize int) *SignificantTermsAggregation { + a.shardSize = &shardSize + return a +} + +func (a *SignificantTermsAggregation) BackgroundFilter(filter Query) *SignificantTermsAggregation { + a.filter = filter + return a +} + +func (a *SignificantTermsAggregation) ExecutionHint(hint string) *SignificantTermsAggregation { + a.executionHint = hint + return a +} + +func (a *SignificantTermsAggregation) Source() (interface{}, error) { + // Example: + // { + // "query" : { + // "terms" : {"force" : [ "British Transport Police" ]} + // }, + // "aggregations" : { + // "significantCrimeTypes" : { + // "significant_terms" : { "field" : "crime_type" } + // } + // } + // } + // + // This method returns only the + // { "significant_terms" : { "field" : "crime_type" } + // part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["significant_terms"] = opts + + if a.field != "" { + opts["field"] = a.field + } + if a.requiredSize != nil { + opts["size"] = *a.requiredSize // not a typo! + } + if a.shardSize != nil { + opts["shard_size"] = *a.shardSize + } + if a.minDocCount != nil { + opts["min_doc_count"] = *a.minDocCount + } + if a.shardMinDocCount != nil { + opts["shard_min_doc_count"] = *a.shardMinDocCount + } + if a.filter != nil { + src, err := a.filter.Source() + if err != nil { + return nil, err + } + opts["background_filter"] = src + } + if a.executionHint != "" { + opts["execution_hint"] = a.executionHint + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_significant_terms_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_significant_terms_test.go new file mode 100644 index 000000000..d24f3c9d1 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_significant_terms_test.go @@ -0,0 +1,86 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSignificantTermsAggregation(t *testing.T) { + agg := NewSignificantTermsAggregation().Field("crime_type") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"significant_terms":{"field":"crime_type"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSignificantTermsAggregationWithArgs(t *testing.T) { + agg := NewSignificantTermsAggregation(). + Field("crime_type"). + ExecutionHint("map"). + ShardSize(5). + MinDocCount(10). + BackgroundFilter(NewTermQuery("city", "London")) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"significant_terms":{"background_filter":{"term":{"city":"London"}},"execution_hint":"map","field":"crime_type","min_doc_count":10,"shard_size":5}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSignificantTermsAggregationSubAggregation(t *testing.T) { + crimeTypesAgg := NewSignificantTermsAggregation().Field("crime_type") + agg := NewTermsAggregation().Field("force") + agg = agg.SubAggregation("significantCrimeTypes", crimeTypesAgg) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"significantCrimeTypes":{"significant_terms":{"field":"crime_type"}}},"terms":{"field":"force"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSignificantTermsAggregationWithMetaData(t *testing.T) { + agg := NewSignificantTermsAggregation().Field("crime_type") + agg = agg.Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"significant_terms":{"field":"crime_type"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_terms.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_terms.go new file mode 100644 index 000000000..2d3c0d1ad --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_terms.go @@ -0,0 +1,341 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TermsAggregation is a multi-bucket value source based aggregation +// where buckets are dynamically built - one per unique value. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html +type TermsAggregation struct { + field string + script *Script + missing interface{} + subAggregations map[string]Aggregation + meta map[string]interface{} + + size *int + shardSize *int + requiredSize *int + minDocCount *int + shardMinDocCount *int + valueType string + order string + orderAsc bool + includePattern string + includeFlags *int + excludePattern string + excludeFlags *int + executionHint string + collectionMode string + showTermDocCountError *bool + includeTerms []string + excludeTerms []string +} + +func NewTermsAggregation() *TermsAggregation { + return &TermsAggregation{ + subAggregations: make(map[string]Aggregation, 0), + includeTerms: make([]string, 0), + excludeTerms: make([]string, 0), + } +} + +func (a *TermsAggregation) Field(field string) *TermsAggregation { + a.field = field + return a +} + +func (a *TermsAggregation) Script(script *Script) *TermsAggregation { + a.script = script + return a +} + +// Missing configures the value to use when documents miss a value. +func (a *TermsAggregation) Missing(missing interface{}) *TermsAggregation { + a.missing = missing + return a +} + +func (a *TermsAggregation) SubAggregation(name string, subAggregation Aggregation) *TermsAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *TermsAggregation) Meta(metaData map[string]interface{}) *TermsAggregation { + a.meta = metaData + return a +} + +func (a *TermsAggregation) Size(size int) *TermsAggregation { + a.size = &size + return a +} + +func (a *TermsAggregation) RequiredSize(requiredSize int) *TermsAggregation { + a.requiredSize = &requiredSize + return a +} + +func (a *TermsAggregation) ShardSize(shardSize int) *TermsAggregation { + a.shardSize = &shardSize + return a +} + +func (a *TermsAggregation) MinDocCount(minDocCount int) *TermsAggregation { + a.minDocCount = &minDocCount + return a +} + +func (a *TermsAggregation) ShardMinDocCount(shardMinDocCount int) *TermsAggregation { + a.shardMinDocCount = &shardMinDocCount + return a +} + +func (a *TermsAggregation) Include(regexp string) *TermsAggregation { + a.includePattern = regexp + return a +} + +func (a *TermsAggregation) IncludeWithFlags(regexp string, flags int) *TermsAggregation { + a.includePattern = regexp + a.includeFlags = &flags + return a +} + +func (a *TermsAggregation) Exclude(regexp string) *TermsAggregation { + a.excludePattern = regexp + return a +} + +func (a *TermsAggregation) ExcludeWithFlags(regexp string, flags int) *TermsAggregation { + a.excludePattern = regexp + a.excludeFlags = &flags + return a +} + +// ValueType can be string, long, or double. +func (a *TermsAggregation) ValueType(valueType string) *TermsAggregation { + a.valueType = valueType + return a +} + +func (a *TermsAggregation) Order(order string, asc bool) *TermsAggregation { + a.order = order + a.orderAsc = asc + return a +} + +func (a *TermsAggregation) OrderByCount(asc bool) *TermsAggregation { + // "order" : { "_count" : "asc" } + a.order = "_count" + a.orderAsc = asc + return a +} + +func (a *TermsAggregation) OrderByCountAsc() *TermsAggregation { + return a.OrderByCount(true) +} + +func (a *TermsAggregation) OrderByCountDesc() *TermsAggregation { + return a.OrderByCount(false) +} + +func (a *TermsAggregation) OrderByTerm(asc bool) *TermsAggregation { + // "order" : { "_term" : "asc" } + a.order = "_term" + a.orderAsc = asc + return a +} + +func (a *TermsAggregation) OrderByTermAsc() *TermsAggregation { + return a.OrderByTerm(true) +} + +func (a *TermsAggregation) OrderByTermDesc() *TermsAggregation { + return a.OrderByTerm(false) +} + +// OrderByAggregation creates a bucket ordering strategy which sorts buckets +// based on a single-valued calc get. +func (a *TermsAggregation) OrderByAggregation(aggName string, asc bool) *TermsAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "avg_height" : "desc" } + // }, + // "aggs" : { + // "avg_height" : { "avg" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + a.orderAsc = asc + return a +} + +// OrderByAggregationAndMetric creates a bucket ordering strategy which +// sorts buckets based on a multi-valued calc get. +func (a *TermsAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) *TermsAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "height_stats.avg" : "desc" } + // }, + // "aggs" : { + // "height_stats" : { "stats" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + "." + metric + a.orderAsc = asc + return a +} + +func (a *TermsAggregation) ExecutionHint(hint string) *TermsAggregation { + a.executionHint = hint + return a +} + +// Collection mode can be depth_first or breadth_first as of 1.4.0. +func (a *TermsAggregation) CollectionMode(collectionMode string) *TermsAggregation { + a.collectionMode = collectionMode + return a +} + +func (a *TermsAggregation) ShowTermDocCountError(showTermDocCountError bool) *TermsAggregation { + a.showTermDocCountError = &showTermDocCountError + return a +} + +func (a *TermsAggregation) IncludeTerms(terms ...string) *TermsAggregation { + a.includeTerms = append(a.includeTerms, terms...) + return a +} + +func (a *TermsAggregation) ExcludeTerms(terms ...string) *TermsAggregation { + a.excludeTerms = append(a.excludeTerms, terms...) + return a +} + +func (a *TermsAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "genders" : { + // "terms" : { "field" : "gender" } + // } + // } + // } + // This method returns only the { "terms" : { "field" : "gender" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["terms"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.missing != nil { + opts["missing"] = a.missing + } + + // TermsBuilder + if a.size != nil && *a.size >= 0 { + opts["size"] = *a.size + } + if a.shardSize != nil && *a.shardSize >= 0 { + opts["shard_size"] = *a.shardSize + } + if a.requiredSize != nil && *a.requiredSize >= 0 { + opts["required_size"] = *a.requiredSize + } + if a.minDocCount != nil && *a.minDocCount >= 0 { + opts["min_doc_count"] = *a.minDocCount + } + if a.shardMinDocCount != nil && *a.shardMinDocCount >= 0 { + opts["shard_min_doc_count"] = *a.shardMinDocCount + } + if a.showTermDocCountError != nil { + opts["show_term_doc_count_error"] = *a.showTermDocCountError + } + if a.collectionMode != "" { + opts["collect_mode"] = a.collectionMode + } + if a.valueType != "" { + opts["value_type"] = a.valueType + } + if a.order != "" { + o := make(map[string]interface{}) + if a.orderAsc { + o[a.order] = "asc" + } else { + o[a.order] = "desc" + } + opts["order"] = o + } + if len(a.includeTerms) > 0 { + opts["include"] = a.includeTerms + } + if a.includePattern != "" { + if a.includeFlags == nil || *a.includeFlags == 0 { + opts["include"] = a.includePattern + } else { + p := make(map[string]interface{}) + p["pattern"] = a.includePattern + p["flags"] = *a.includeFlags + opts["include"] = p + } + } + if len(a.excludeTerms) > 0 { + opts["exclude"] = a.excludeTerms + } + if a.excludePattern != "" { + if a.excludeFlags == nil || *a.excludeFlags == 0 { + opts["exclude"] = a.excludePattern + } else { + p := make(map[string]interface{}) + p["pattern"] = a.excludePattern + p["flags"] = *a.excludeFlags + opts["exclude"] = p + } + } + if a.executionHint != "" { + opts["execution_hint"] = a.executionHint + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_terms_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_terms_test.go new file mode 100644 index 000000000..e5f979333 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_terms_test.go @@ -0,0 +1,104 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestTermsAggregation(t *testing.T) { + agg := NewTermsAggregation().Field("gender").Size(10).OrderByTermDesc() + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"terms":{"field":"gender","order":{"_term":"desc"},"size":10}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestTermsAggregationWithSubAggregation(t *testing.T) { + subAgg := NewAvgAggregation().Field("height") + agg := NewTermsAggregation().Field("gender").Size(10). + OrderByAggregation("avg_height", false) + agg = agg.SubAggregation("avg_height", subAgg) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"avg_height":{"avg":{"field":"height"}}},"terms":{"field":"gender","order":{"avg_height":"desc"},"size":10}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestTermsAggregationWithMultipleSubAggregation(t *testing.T) { + subAgg1 := NewAvgAggregation().Field("height") + subAgg2 := NewAvgAggregation().Field("width") + agg := NewTermsAggregation().Field("gender").Size(10). + OrderByAggregation("avg_height", false) + agg = agg.SubAggregation("avg_height", subAgg1) + agg = agg.SubAggregation("avg_width", subAgg2) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"avg_height":{"avg":{"field":"height"}},"avg_width":{"avg":{"field":"width"}}},"terms":{"field":"gender","order":{"avg_height":"desc"},"size":10}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestTermsAggregationWithMetaData(t *testing.T) { + agg := NewTermsAggregation().Field("gender").Size(10).OrderByTermDesc() + agg = agg.Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"terms":{"field":"gender","order":{"_term":"desc"},"size":10}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestTermsAggregationWithMissing(t *testing.T) { + agg := NewTermsAggregation().Field("gender").Size(10).Missing("n/a") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"terms":{"field":"gender","missing":"n/a","size":10}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_avg.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_avg.go new file mode 100644 index 000000000..37ec2b7ad --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_avg.go @@ -0,0 +1,101 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// AvgAggregation is a single-value metrics aggregation that computes +// the average of numeric values that are extracted from the +// aggregated documents. These values can be extracted either from +// specific numeric fields in the documents, or be generated by +// a provided script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-avg-aggregation.html +type AvgAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewAvgAggregation() *AvgAggregation { + return &AvgAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *AvgAggregation) Field(field string) *AvgAggregation { + a.field = field + return a +} + +func (a *AvgAggregation) Script(script *Script) *AvgAggregation { + a.script = script + return a +} + +func (a *AvgAggregation) Format(format string) *AvgAggregation { + a.format = format + return a +} + +func (a *AvgAggregation) SubAggregation(name string, subAggregation Aggregation) *AvgAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *AvgAggregation) Meta(metaData map[string]interface{}) *AvgAggregation { + a.meta = metaData + return a +} + +func (a *AvgAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "avg_grade" : { "avg" : { "field" : "grade" } } + // } + // } + // This method returns only the { "avg" : { "field" : "grade" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["avg"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_avg_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_avg_test.go new file mode 100644 index 000000000..c8539d12d --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_avg_test.go @@ -0,0 +1,61 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestAvgAggregation(t *testing.T) { + agg := NewAvgAggregation().Field("grade") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"avg":{"field":"grade"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestAvgAggregationWithFormat(t *testing.T) { + agg := NewAvgAggregation().Field("grade").Format("000.0") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"avg":{"field":"grade","format":"000.0"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestAvgAggregationWithMetaData(t *testing.T) { + agg := NewAvgAggregation().Field("grade").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"avg":{"field":"grade"},"meta":{"name":"Oliver"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_cardinality.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_cardinality.go new file mode 100644 index 000000000..ebf247c79 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_cardinality.go @@ -0,0 +1,120 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// CardinalityAggregation is a single-value metrics aggregation that +// calculates an approximate count of distinct values. +// Values can be extracted either from specific fields in the document +// or generated by a script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html +type CardinalityAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} + precisionThreshold *int64 + rehash *bool +} + +func NewCardinalityAggregation() *CardinalityAggregation { + return &CardinalityAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *CardinalityAggregation) Field(field string) *CardinalityAggregation { + a.field = field + return a +} + +func (a *CardinalityAggregation) Script(script *Script) *CardinalityAggregation { + a.script = script + return a +} + +func (a *CardinalityAggregation) Format(format string) *CardinalityAggregation { + a.format = format + return a +} + +func (a *CardinalityAggregation) SubAggregation(name string, subAggregation Aggregation) *CardinalityAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *CardinalityAggregation) Meta(metaData map[string]interface{}) *CardinalityAggregation { + a.meta = metaData + return a +} + +func (a *CardinalityAggregation) PrecisionThreshold(threshold int64) *CardinalityAggregation { + a.precisionThreshold = &threshold + return a +} + +func (a *CardinalityAggregation) Rehash(rehash bool) *CardinalityAggregation { + a.rehash = &rehash + return a +} + +func (a *CardinalityAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "author_count" : { + // "cardinality" : { "field" : "author" } + // } + // } + // } + // This method returns only the "cardinality" : { "field" : "author" } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["cardinality"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + + if a.format != "" { + opts["format"] = a.format + } + if a.precisionThreshold != nil { + opts["precision_threshold"] = *a.precisionThreshold + } + if a.rehash != nil { + opts["rehash"] = *a.rehash + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_cardinality_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_cardinality_test.go new file mode 100644 index 000000000..bccfa7aae --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_cardinality_test.go @@ -0,0 +1,78 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestCardinalityAggregation(t *testing.T) { + agg := NewCardinalityAggregation().Field("author.hash") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"cardinality":{"field":"author.hash"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestCardinalityAggregationWithOptions(t *testing.T) { + agg := NewCardinalityAggregation().Field("author.hash").PrecisionThreshold(100).Rehash(true) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"cardinality":{"field":"author.hash","precision_threshold":100,"rehash":true}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestCardinalityAggregationWithFormat(t *testing.T) { + agg := NewCardinalityAggregation().Field("author.hash").Format("00000") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"cardinality":{"field":"author.hash","format":"00000"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestCardinalityAggregationWithMetaData(t *testing.T) { + agg := NewCardinalityAggregation().Field("author.hash").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"cardinality":{"field":"author.hash"},"meta":{"name":"Oliver"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_extended_stats.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_extended_stats.go new file mode 100644 index 000000000..69447409c --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_extended_stats.go @@ -0,0 +1,99 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ExtendedExtendedStatsAggregation is a multi-value metrics aggregation that +// computes stats over numeric values extracted from the aggregated documents. +// These values can be extracted either from specific numeric fields +// in the documents, or be generated by a provided script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-extendedstats-aggregation.html +type ExtendedStatsAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewExtendedStatsAggregation() *ExtendedStatsAggregation { + return &ExtendedStatsAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *ExtendedStatsAggregation) Field(field string) *ExtendedStatsAggregation { + a.field = field + return a +} + +func (a *ExtendedStatsAggregation) Script(script *Script) *ExtendedStatsAggregation { + a.script = script + return a +} + +func (a *ExtendedStatsAggregation) Format(format string) *ExtendedStatsAggregation { + a.format = format + return a +} + +func (a *ExtendedStatsAggregation) SubAggregation(name string, subAggregation Aggregation) *ExtendedStatsAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *ExtendedStatsAggregation) Meta(metaData map[string]interface{}) *ExtendedStatsAggregation { + a.meta = metaData + return a +} + +func (a *ExtendedStatsAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "grades_stats" : { "extended_stats" : { "field" : "grade" } } + // } + // } + // This method returns only the { "extended_stats" : { "field" : "grade" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["extended_stats"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_extended_stats_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_extended_stats_test.go new file mode 100644 index 000000000..4a80693cf --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_extended_stats_test.go @@ -0,0 +1,44 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestExtendedStatsAggregation(t *testing.T) { + agg := NewExtendedStatsAggregation().Field("grade") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"extended_stats":{"field":"grade"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestExtendedStatsAggregationWithFormat(t *testing.T) { + agg := NewExtendedStatsAggregation().Field("grade").Format("000.0") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"extended_stats":{"field":"grade","format":"000.0"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_geo_bounds.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_geo_bounds.go new file mode 100644 index 000000000..647ba5139 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_geo_bounds.go @@ -0,0 +1,105 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// GeoBoundsAggregation is a metric aggregation that computes the +// bounding box containing all geo_point values for a field. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-geobounds-aggregation.html +type GeoBoundsAggregation struct { + field string + script *Script + wrapLongitude *bool + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewGeoBoundsAggregation() *GeoBoundsAggregation { + return &GeoBoundsAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *GeoBoundsAggregation) Field(field string) *GeoBoundsAggregation { + a.field = field + return a +} + +func (a *GeoBoundsAggregation) Script(script *Script) *GeoBoundsAggregation { + a.script = script + return a +} + +func (a *GeoBoundsAggregation) WrapLongitude(wrapLongitude bool) *GeoBoundsAggregation { + a.wrapLongitude = &wrapLongitude + return a +} + +func (a *GeoBoundsAggregation) SubAggregation(name string, subAggregation Aggregation) *GeoBoundsAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *GeoBoundsAggregation) Meta(metaData map[string]interface{}) *GeoBoundsAggregation { + a.meta = metaData + return a +} + +func (a *GeoBoundsAggregation) Source() (interface{}, error) { + // Example: + // { + // "query" : { + // "match" : { "business_type" : "shop" } + // }, + // "aggs" : { + // "viewport" : { + // "geo_bounds" : { + // "field" : "location" + // "wrap_longitude" : "true" + // } + // } + // } + // } + // + // This method returns only the { "geo_bounds" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["geo_bounds"] = opts + + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.wrapLongitude != nil { + opts["wrap_longitude"] = *a.wrapLongitude + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_geo_bounds_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_geo_bounds_test.go new file mode 100644 index 000000000..3096b8ee5 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_geo_bounds_test.go @@ -0,0 +1,61 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestGeoBoundsAggregation(t *testing.T) { + agg := NewGeoBoundsAggregation().Field("location") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_bounds":{"field":"location"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoBoundsAggregationWithWrapLongitude(t *testing.T) { + agg := NewGeoBoundsAggregation().Field("location").WrapLongitude(true) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_bounds":{"field":"location","wrap_longitude":true}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoBoundsAggregationWithMetaData(t *testing.T) { + agg := NewGeoBoundsAggregation().Field("location").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_bounds":{"field":"location"},"meta":{"name":"Oliver"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_max.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_max.go new file mode 100644 index 000000000..334cff020 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_max.go @@ -0,0 +1,99 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MaxAggregation is a single-value metrics aggregation that keeps track and +// returns the maximum value among the numeric values extracted from +// the aggregated documents. These values can be extracted either from +// specific numeric fields in the documents, or be generated by +// a provided script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-max-aggregation.html +type MaxAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewMaxAggregation() *MaxAggregation { + return &MaxAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *MaxAggregation) Field(field string) *MaxAggregation { + a.field = field + return a +} + +func (a *MaxAggregation) Script(script *Script) *MaxAggregation { + a.script = script + return a +} + +func (a *MaxAggregation) Format(format string) *MaxAggregation { + a.format = format + return a +} + +func (a *MaxAggregation) SubAggregation(name string, subAggregation Aggregation) *MaxAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MaxAggregation) Meta(metaData map[string]interface{}) *MaxAggregation { + a.meta = metaData + return a +} +func (a *MaxAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "max_price" : { "max" : { "field" : "price" } } + // } + // } + // This method returns only the { "max" : { "field" : "price" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["max"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_max_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_max_test.go new file mode 100644 index 000000000..b5da00c19 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_max_test.go @@ -0,0 +1,61 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMaxAggregation(t *testing.T) { + agg := NewMaxAggregation().Field("price") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"max":{"field":"price"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMaxAggregationWithFormat(t *testing.T) { + agg := NewMaxAggregation().Field("price").Format("00000.00") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"max":{"field":"price","format":"00000.00"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMaxAggregationWithMetaData(t *testing.T) { + agg := NewMaxAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"max":{"field":"price"},"meta":{"name":"Oliver"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_min.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_min.go new file mode 100644 index 000000000..f9e21f7a8 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_min.go @@ -0,0 +1,100 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MinAggregation is a single-value metrics aggregation that keeps track and +// returns the minimum value among numeric values extracted from the +// aggregated documents. These values can be extracted either from +// specific numeric fields in the documents, or be generated by a +// provided script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-min-aggregation.html +type MinAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewMinAggregation() *MinAggregation { + return &MinAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *MinAggregation) Field(field string) *MinAggregation { + a.field = field + return a +} + +func (a *MinAggregation) Script(script *Script) *MinAggregation { + a.script = script + return a +} + +func (a *MinAggregation) Format(format string) *MinAggregation { + a.format = format + return a +} + +func (a *MinAggregation) SubAggregation(name string, subAggregation Aggregation) *MinAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MinAggregation) Meta(metaData map[string]interface{}) *MinAggregation { + a.meta = metaData + return a +} + +func (a *MinAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "min_price" : { "min" : { "field" : "price" } } + // } + // } + // This method returns only the { "min" : { "field" : "price" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["min"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_min_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_min_test.go new file mode 100644 index 000000000..170650667 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_min_test.go @@ -0,0 +1,61 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMinAggregation(t *testing.T) { + agg := NewMinAggregation().Field("price") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"min":{"field":"price"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMinAggregationWithFormat(t *testing.T) { + agg := NewMinAggregation().Field("price").Format("00000.00") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"min":{"field":"price","format":"00000.00"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMinAggregationWithMetaData(t *testing.T) { + agg := NewMinAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"min":{"field":"price"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks.go new file mode 100644 index 000000000..c0b3aa663 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks.go @@ -0,0 +1,131 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// PercentileRanksAggregation +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-rank-aggregation.html +type PercentileRanksAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} + values []float64 + compression *float64 + estimator string +} + +func NewPercentileRanksAggregation() *PercentileRanksAggregation { + return &PercentileRanksAggregation{ + subAggregations: make(map[string]Aggregation), + values: make([]float64, 0), + } +} + +func (a *PercentileRanksAggregation) Field(field string) *PercentileRanksAggregation { + a.field = field + return a +} + +func (a *PercentileRanksAggregation) Script(script *Script) *PercentileRanksAggregation { + a.script = script + return a +} + +func (a *PercentileRanksAggregation) Format(format string) *PercentileRanksAggregation { + a.format = format + return a +} + +func (a *PercentileRanksAggregation) SubAggregation(name string, subAggregation Aggregation) *PercentileRanksAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *PercentileRanksAggregation) Meta(metaData map[string]interface{}) *PercentileRanksAggregation { + a.meta = metaData + return a +} + +func (a *PercentileRanksAggregation) Values(values ...float64) *PercentileRanksAggregation { + a.values = append(a.values, values...) + return a +} + +func (a *PercentileRanksAggregation) Compression(compression float64) *PercentileRanksAggregation { + a.compression = &compression + return a +} + +func (a *PercentileRanksAggregation) Estimator(estimator string) *PercentileRanksAggregation { + a.estimator = estimator + return a +} + +func (a *PercentileRanksAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "load_time_outlier" : { + // "percentile_ranks" : { + // "field" : "load_time" + // "values" : [15, 30] + // } + // } + // } + // } + // This method returns only the + // { "percentile_ranks" : { "field" : "load_time", "values" : [15, 30] } } + // part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["percentile_ranks"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + if len(a.values) > 0 { + opts["values"] = a.values + } + if a.compression != nil { + opts["compression"] = *a.compression + } + if a.estimator != "" { + opts["estimator"] = a.estimator + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks_test.go new file mode 100644 index 000000000..df4b7c4a3 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks_test.go @@ -0,0 +1,78 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestPercentileRanksAggregation(t *testing.T) { + agg := NewPercentileRanksAggregation().Field("load_time") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"percentile_ranks":{"field":"load_time"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPercentileRanksAggregationWithCustomValues(t *testing.T) { + agg := NewPercentileRanksAggregation().Field("load_time").Values(15, 30) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"percentile_ranks":{"field":"load_time","values":[15,30]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPercentileRanksAggregationWithFormat(t *testing.T) { + agg := NewPercentileRanksAggregation().Field("load_time").Format("000.0") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"percentile_ranks":{"field":"load_time","format":"000.0"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPercentileRanksAggregationWithMetaData(t *testing.T) { + agg := NewPercentileRanksAggregation().Field("load_time").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"percentile_ranks":{"field":"load_time"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentiles.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentiles.go new file mode 100644 index 000000000..b1695ebb3 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentiles.go @@ -0,0 +1,130 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// PercentilesAggregation +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-aggregation.html +type PercentilesAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} + percentiles []float64 + compression *float64 + estimator string +} + +func NewPercentilesAggregation() *PercentilesAggregation { + return &PercentilesAggregation{ + subAggregations: make(map[string]Aggregation), + percentiles: make([]float64, 0), + } +} + +func (a *PercentilesAggregation) Field(field string) *PercentilesAggregation { + a.field = field + return a +} + +func (a *PercentilesAggregation) Script(script *Script) *PercentilesAggregation { + a.script = script + return a +} + +func (a *PercentilesAggregation) Format(format string) *PercentilesAggregation { + a.format = format + return a +} + +func (a *PercentilesAggregation) SubAggregation(name string, subAggregation Aggregation) *PercentilesAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *PercentilesAggregation) Meta(metaData map[string]interface{}) *PercentilesAggregation { + a.meta = metaData + return a +} + +func (a *PercentilesAggregation) Percentiles(percentiles ...float64) *PercentilesAggregation { + a.percentiles = append(a.percentiles, percentiles...) + return a +} + +func (a *PercentilesAggregation) Compression(compression float64) *PercentilesAggregation { + a.compression = &compression + return a +} + +func (a *PercentilesAggregation) Estimator(estimator string) *PercentilesAggregation { + a.estimator = estimator + return a +} + +func (a *PercentilesAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "load_time_outlier" : { + // "percentiles" : { + // "field" : "load_time" + // } + // } + // } + // } + // This method returns only the + // { "percentiles" : { "field" : "load_time" } } + // part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["percentiles"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + if len(a.percentiles) > 0 { + opts["percents"] = a.percentiles + } + if a.compression != nil { + opts["compression"] = *a.compression + } + if a.estimator != "" { + opts["estimator"] = a.estimator + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentiles_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentiles_test.go new file mode 100644 index 000000000..da2d2055e --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentiles_test.go @@ -0,0 +1,78 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestPercentilesAggregation(t *testing.T) { + agg := NewPercentilesAggregation().Field("price") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"percentiles":{"field":"price"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPercentilesAggregationWithCustomPercents(t *testing.T) { + agg := NewPercentilesAggregation().Field("price").Percentiles(0.2, 0.5, 0.9) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"percentiles":{"field":"price","percents":[0.2,0.5,0.9]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPercentilesAggregationWithFormat(t *testing.T) { + agg := NewPercentilesAggregation().Field("price").Format("00000.00") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"percentiles":{"field":"price","format":"00000.00"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPercentilesAggregationWithMetaData(t *testing.T) { + agg := NewPercentilesAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"percentiles":{"field":"price"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_stats.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_stats.go new file mode 100644 index 000000000..42da9c854 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_stats.go @@ -0,0 +1,99 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// StatsAggregation is a multi-value metrics aggregation that computes stats +// over numeric values extracted from the aggregated documents. +// These values can be extracted either from specific numeric fields +// in the documents, or be generated by a provided script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-stats-aggregation.html +type StatsAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewStatsAggregation() *StatsAggregation { + return &StatsAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *StatsAggregation) Field(field string) *StatsAggregation { + a.field = field + return a +} + +func (a *StatsAggregation) Script(script *Script) *StatsAggregation { + a.script = script + return a +} + +func (a *StatsAggregation) Format(format string) *StatsAggregation { + a.format = format + return a +} + +func (a *StatsAggregation) SubAggregation(name string, subAggregation Aggregation) *StatsAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *StatsAggregation) Meta(metaData map[string]interface{}) *StatsAggregation { + a.meta = metaData + return a +} + +func (a *StatsAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "grades_stats" : { "stats" : { "field" : "grade" } } + // } + // } + // This method returns only the { "stats" : { "field" : "grade" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["stats"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_stats_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_stats_test.go new file mode 100644 index 000000000..0ea0b175d --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_stats_test.go @@ -0,0 +1,61 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestStatsAggregation(t *testing.T) { + agg := NewStatsAggregation().Field("grade") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"stats":{"field":"grade"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestStatsAggregationWithFormat(t *testing.T) { + agg := NewStatsAggregation().Field("grade").Format("0000.0") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"stats":{"field":"grade","format":"0000.0"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestStatsAggregationWithMetaData(t *testing.T) { + agg := NewStatsAggregation().Field("grade").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"stats":{"field":"grade"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_sum.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_sum.go new file mode 100644 index 000000000..6f783e7e1 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_sum.go @@ -0,0 +1,99 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SumAggregation is a single-value metrics aggregation that sums up +// numeric values that are extracted from the aggregated documents. +// These values can be extracted either from specific numeric fields +// in the documents, or be generated by a provided script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-sum-aggregation.html +type SumAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewSumAggregation() *SumAggregation { + return &SumAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *SumAggregation) Field(field string) *SumAggregation { + a.field = field + return a +} + +func (a *SumAggregation) Script(script *Script) *SumAggregation { + a.script = script + return a +} + +func (a *SumAggregation) Format(format string) *SumAggregation { + a.format = format + return a +} + +func (a *SumAggregation) SubAggregation(name string, subAggregation Aggregation) *SumAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *SumAggregation) Meta(metaData map[string]interface{}) *SumAggregation { + a.meta = metaData + return a +} + +func (a *SumAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "intraday_return" : { "sum" : { "field" : "change" } } + // } + // } + // This method returns only the { "sum" : { "field" : "change" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["sum"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_sum_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_sum_test.go new file mode 100644 index 000000000..737808931 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_sum_test.go @@ -0,0 +1,61 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSumAggregation(t *testing.T) { + agg := NewSumAggregation().Field("price") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"sum":{"field":"price"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSumAggregationWithFormat(t *testing.T) { + agg := NewSumAggregation().Field("price").Format("00000.00") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"sum":{"field":"price","format":"00000.00"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSumAggregationWithMetaData(t *testing.T) { + agg := NewSumAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"sum":{"field":"price"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_top_hits.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_top_hits.go new file mode 100644 index 000000000..c017abb98 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_top_hits.go @@ -0,0 +1,143 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TopHitsAggregation keeps track of the most relevant document +// being aggregated. This aggregator is intended to be used as a +// sub aggregator, so that the top matching documents +// can be aggregated per bucket. +// +// It can effectively be used to group result sets by certain fields via +// a bucket aggregator. One or more bucket aggregators determines by +// which properties a result set get sliced into. +// +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-top-hits-aggregation.html +type TopHitsAggregation struct { + searchSource *SearchSource +} + +func NewTopHitsAggregation() *TopHitsAggregation { + return &TopHitsAggregation{ + searchSource: NewSearchSource(), + } +} + +func (a *TopHitsAggregation) From(from int) *TopHitsAggregation { + a.searchSource = a.searchSource.From(from) + return a +} + +func (a *TopHitsAggregation) Size(size int) *TopHitsAggregation { + a.searchSource = a.searchSource.Size(size) + return a +} + +func (a *TopHitsAggregation) TrackScores(trackScores bool) *TopHitsAggregation { + a.searchSource = a.searchSource.TrackScores(trackScores) + return a +} + +func (a *TopHitsAggregation) Explain(explain bool) *TopHitsAggregation { + a.searchSource = a.searchSource.Explain(explain) + return a +} + +func (a *TopHitsAggregation) Version(version bool) *TopHitsAggregation { + a.searchSource = a.searchSource.Version(version) + return a +} + +func (a *TopHitsAggregation) NoFields() *TopHitsAggregation { + a.searchSource = a.searchSource.NoFields() + return a +} + +func (a *TopHitsAggregation) FetchSource(fetchSource bool) *TopHitsAggregation { + a.searchSource = a.searchSource.FetchSource(fetchSource) + return a +} + +func (a *TopHitsAggregation) FetchSourceContext(fetchSourceContext *FetchSourceContext) *TopHitsAggregation { + a.searchSource = a.searchSource.FetchSourceContext(fetchSourceContext) + return a +} + +func (a *TopHitsAggregation) FieldDataFields(fieldDataFields ...string) *TopHitsAggregation { + a.searchSource = a.searchSource.FieldDataFields(fieldDataFields...) + return a +} + +func (a *TopHitsAggregation) FieldDataField(fieldDataField string) *TopHitsAggregation { + a.searchSource = a.searchSource.FieldDataField(fieldDataField) + return a +} + +func (a *TopHitsAggregation) ScriptFields(scriptFields ...*ScriptField) *TopHitsAggregation { + a.searchSource = a.searchSource.ScriptFields(scriptFields...) + return a +} + +func (a *TopHitsAggregation) ScriptField(scriptField *ScriptField) *TopHitsAggregation { + a.searchSource = a.searchSource.ScriptField(scriptField) + return a +} + +func (a *TopHitsAggregation) Sort(field string, ascending bool) *TopHitsAggregation { + a.searchSource = a.searchSource.Sort(field, ascending) + return a +} + +func (a *TopHitsAggregation) SortWithInfo(info SortInfo) *TopHitsAggregation { + a.searchSource = a.searchSource.SortWithInfo(info) + return a +} + +func (a *TopHitsAggregation) SortBy(sorter ...Sorter) *TopHitsAggregation { + a.searchSource = a.searchSource.SortBy(sorter...) + return a +} + +func (a *TopHitsAggregation) Highlight(highlight *Highlight) *TopHitsAggregation { + a.searchSource = a.searchSource.Highlight(highlight) + return a +} + +func (a *TopHitsAggregation) Highlighter() *Highlight { + return a.searchSource.Highlighter() +} + +func (a *TopHitsAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs": { + // "top_tag_hits": { + // "top_hits": { + // "sort": [ + // { + // "last_activity_date": { + // "order": "desc" + // } + // } + // ], + // "_source": { + // "include": [ + // "title" + // ] + // }, + // "size" : 1 + // } + // } + // } + // } + // This method returns only the { "top_hits" : { ... } } part. + + source := make(map[string]interface{}) + src, err := a.searchSource.Source() + if err != nil { + return nil, err + } + source["top_hits"] = src + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_top_hits_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_top_hits_test.go new file mode 100644 index 000000000..2634a22b6 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_top_hits_test.go @@ -0,0 +1,31 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestTopHitsAggregation(t *testing.T) { + fsc := NewFetchSourceContext(true).Include("title") + agg := NewTopHitsAggregation(). + Sort("last_activity_date", false). + FetchSourceContext(fsc). + Size(1) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"top_hits":{"_source":{"excludes":[],"includes":["title"]},"size":1,"sort":[{"last_activity_date":{"order":"desc"}}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_value_count.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_value_count.go new file mode 100644 index 000000000..b2e3e8241 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_value_count.go @@ -0,0 +1,102 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ValueCountAggregation is a single-value metrics aggregation that counts +// the number of values that are extracted from the aggregated documents. +// These values can be extracted either from specific fields in the documents, +// or be generated by a provided script. Typically, this aggregator will be +// used in conjunction with other single-value aggregations. +// For example, when computing the avg one might be interested in the +// number of values the average is computed over. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-valuecount-aggregation.html +type ValueCountAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewValueCountAggregation() *ValueCountAggregation { + return &ValueCountAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *ValueCountAggregation) Field(field string) *ValueCountAggregation { + a.field = field + return a +} + +func (a *ValueCountAggregation) Script(script *Script) *ValueCountAggregation { + a.script = script + return a +} + +func (a *ValueCountAggregation) Format(format string) *ValueCountAggregation { + a.format = format + return a +} + +func (a *ValueCountAggregation) SubAggregation(name string, subAggregation Aggregation) *ValueCountAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *ValueCountAggregation) Meta(metaData map[string]interface{}) *ValueCountAggregation { + a.meta = metaData + return a +} + +func (a *ValueCountAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "grades_count" : { "value_count" : { "field" : "grade" } } + // } + // } + // This method returns only the { "value_count" : { "field" : "grade" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["value_count"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_value_count_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_value_count_test.go new file mode 100644 index 000000000..eee189b51 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_value_count_test.go @@ -0,0 +1,63 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestValueCountAggregation(t *testing.T) { + agg := NewValueCountAggregation().Field("grade") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"value_count":{"field":"grade"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestValueCountAggregationWithFormat(t *testing.T) { + // Format comes with 1.5.0+ + agg := NewValueCountAggregation().Field("grade").Format("0000.0") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"value_count":{"field":"grade","format":"0000.0"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestValueCountAggregationWithMetaData(t *testing.T) { + agg := NewValueCountAggregation().Field("grade") + agg = agg.Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"value_count":{"field":"grade"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket.go new file mode 100644 index 000000000..5cd93d5cc --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket.go @@ -0,0 +1,113 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// AvgBucketAggregation is a sibling pipeline aggregation which calculates +// the (mean) average value of a specified metric in a sibling aggregation. +// The specified metric must be numeric and the sibling aggregation must +// be a multi-bucket aggregation. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-avg-bucket-aggregation.html +type AvgBucketAggregation struct { + format string + gapPolicy string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewAvgBucketAggregation creates and initializes a new AvgBucketAggregation. +func NewAvgBucketAggregation() *AvgBucketAggregation { + return &AvgBucketAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *AvgBucketAggregation) Format(format string) *AvgBucketAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *AvgBucketAggregation) GapPolicy(gapPolicy string) *AvgBucketAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *AvgBucketAggregation) GapInsertZeros() *AvgBucketAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *AvgBucketAggregation) GapSkip() *AvgBucketAggregation { + a.gapPolicy = "skip" + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *AvgBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *AvgBucketAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *AvgBucketAggregation) Meta(metaData map[string]interface{}) *AvgBucketAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *AvgBucketAggregation) BucketsPath(bucketsPaths ...string) *AvgBucketAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *AvgBucketAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["avg_bucket"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket_test.go new file mode 100644 index 000000000..0e6509ecb --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestAvgBucketAggregation(t *testing.T) { + agg := NewAvgBucketAggregation().BucketsPath("the_sum").GapPolicy("skip") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"avg_bucket":{"buckets_path":"the_sum","gap_policy":"skip"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_script.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_script.go new file mode 100644 index 000000000..44d6bc624 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_script.go @@ -0,0 +1,132 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// BucketScriptAggregation is a parent pipeline aggregation which executes +// a script which can perform per bucket computations on specified metrics +// in the parent multi-bucket aggregation. The specified metric must be +// numeric and the script must return a numeric value. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-bucket-script-aggregation.html +type BucketScriptAggregation struct { + format string + gapPolicy string + script *Script + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPathsMap map[string]string +} + +// NewBucketScriptAggregation creates and initializes a new BucketScriptAggregation. +func NewBucketScriptAggregation() *BucketScriptAggregation { + return &BucketScriptAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPathsMap: make(map[string]string), + } +} + +func (a *BucketScriptAggregation) Format(format string) *BucketScriptAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *BucketScriptAggregation) GapPolicy(gapPolicy string) *BucketScriptAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *BucketScriptAggregation) GapInsertZeros() *BucketScriptAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *BucketScriptAggregation) GapSkip() *BucketScriptAggregation { + a.gapPolicy = "skip" + return a +} + +// Script is the script to run. +func (a *BucketScriptAggregation) Script(script *Script) *BucketScriptAggregation { + a.script = script + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *BucketScriptAggregation) SubAggregation(name string, subAggregation Aggregation) *BucketScriptAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *BucketScriptAggregation) Meta(metaData map[string]interface{}) *BucketScriptAggregation { + a.meta = metaData + return a +} + +// BucketsPathsMap sets the paths to the buckets to use for this pipeline aggregator. +func (a *BucketScriptAggregation) BucketsPathsMap(bucketsPathsMap map[string]string) *BucketScriptAggregation { + a.bucketsPathsMap = bucketsPathsMap + return a +} + +// AddBucketsPath adds a bucket path to use for this pipeline aggregator. +func (a *BucketScriptAggregation) AddBucketsPath(name, path string) *BucketScriptAggregation { + if a.bucketsPathsMap == nil { + a.bucketsPathsMap = make(map[string]string) + } + a.bucketsPathsMap[name] = path + return a +} + +func (a *BucketScriptAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["bucket_script"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + params["script"] = src + } + + // Add buckets paths + if len(a.bucketsPathsMap) > 0 { + params["buckets_path"] = a.bucketsPathsMap + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_script_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_script_test.go new file mode 100644 index 000000000..7f4d966d0 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_script_test.go @@ -0,0 +1,30 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestBucketScriptAggregation(t *testing.T) { + agg := NewBucketScriptAggregation(). + AddBucketsPath("tShirtSales", "t-shirts>sales"). + AddBucketsPath("totalSales", "total_sales"). + Script(NewScript("tShirtSales / totalSales * 100")) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"bucket_script":{"buckets_path":{"tShirtSales":"t-shirts\u003esales","totalSales":"total_sales"},"script":"tShirtSales / totalSales * 100"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector.go new file mode 100644 index 000000000..ce17ec1f6 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector.go @@ -0,0 +1,134 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// BucketSelectorAggregation is a parent pipeline aggregation which +// determines whether the current bucket will be retained in the parent +// multi-bucket aggregation. The specific metric must be numeric and +// the script must return a boolean value. If the script language is +// expression then a numeric return value is permitted. In this case 0.0 +// will be evaluated as false and all other values will evaluate to true. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-bucket-selector-aggregation.html +type BucketSelectorAggregation struct { + format string + gapPolicy string + script *Script + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPathsMap map[string]string +} + +// NewBucketSelectorAggregation creates and initializes a new BucketSelectorAggregation. +func NewBucketSelectorAggregation() *BucketSelectorAggregation { + return &BucketSelectorAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPathsMap: make(map[string]string), + } +} + +func (a *BucketSelectorAggregation) Format(format string) *BucketSelectorAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *BucketSelectorAggregation) GapPolicy(gapPolicy string) *BucketSelectorAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *BucketSelectorAggregation) GapInsertZeros() *BucketSelectorAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *BucketSelectorAggregation) GapSkip() *BucketSelectorAggregation { + a.gapPolicy = "skip" + return a +} + +// Script is the script to run. +func (a *BucketSelectorAggregation) Script(script *Script) *BucketSelectorAggregation { + a.script = script + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *BucketSelectorAggregation) SubAggregation(name string, subAggregation Aggregation) *BucketSelectorAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *BucketSelectorAggregation) Meta(metaData map[string]interface{}) *BucketSelectorAggregation { + a.meta = metaData + return a +} + +// BucketsPathsMap sets the paths to the buckets to use for this pipeline aggregator. +func (a *BucketSelectorAggregation) BucketsPathsMap(bucketsPathsMap map[string]string) *BucketSelectorAggregation { + a.bucketsPathsMap = bucketsPathsMap + return a +} + +// AddBucketsPath adds a bucket path to use for this pipeline aggregator. +func (a *BucketSelectorAggregation) AddBucketsPath(name, path string) *BucketSelectorAggregation { + if a.bucketsPathsMap == nil { + a.bucketsPathsMap = make(map[string]string) + } + a.bucketsPathsMap[name] = path + return a +} + +func (a *BucketSelectorAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["bucket_selector"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + params["script"] = src + } + + // Add buckets paths + if len(a.bucketsPathsMap) > 0 { + params["buckets_path"] = a.bucketsPathsMap + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector_test.go new file mode 100644 index 000000000..d4e0206de --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector_test.go @@ -0,0 +1,29 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestBucketSelectorAggregation(t *testing.T) { + agg := NewBucketSelectorAggregation(). + AddBucketsPath("totalSales", "total_sales"). + Script(NewScript("totalSales >= 1000")) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"bucket_selector":{"buckets_path":{"totalSales":"total_sales"},"script":"totalSales \u003e= 1000"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum.go new file mode 100644 index 000000000..018eb918f --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum.go @@ -0,0 +1,90 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// CumulativeSumAggregation is a parent pipeline aggregation which calculates +// the cumulative sum of a specified metric in a parent histogram (or date_histogram) +// aggregation. The specified metric must be numeric and the enclosing +// histogram must have min_doc_count set to 0 (default for histogram aggregations). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-cumulative-sum-aggregation.html +type CumulativeSumAggregation struct { + format string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewCumulativeSumAggregation creates and initializes a new CumulativeSumAggregation. +func NewCumulativeSumAggregation() *CumulativeSumAggregation { + return &CumulativeSumAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *CumulativeSumAggregation) Format(format string) *CumulativeSumAggregation { + a.format = format + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *CumulativeSumAggregation) SubAggregation(name string, subAggregation Aggregation) *CumulativeSumAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *CumulativeSumAggregation) Meta(metaData map[string]interface{}) *CumulativeSumAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *CumulativeSumAggregation) BucketsPath(bucketsPaths ...string) *CumulativeSumAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *CumulativeSumAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["cumulative_sum"] = params + + if a.format != "" { + params["format"] = a.format + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum_test.go new file mode 100644 index 000000000..a4023d84e --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestCumulativeSumAggregation(t *testing.T) { + agg := NewCumulativeSumAggregation().BucketsPath("sales") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"cumulative_sum":{"buckets_path":"sales"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_derivative.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_derivative.go new file mode 100644 index 000000000..66611f46e --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_derivative.go @@ -0,0 +1,124 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// DerivativeAggregation is a parent pipeline aggregation which calculates +// the derivative of a specified metric in a parent histogram (or date_histogram) +// aggregation. The specified metric must be numeric and the enclosing +// histogram must have min_doc_count set to 0 (default for histogram aggregations). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-derivative-aggregation.html +type DerivativeAggregation struct { + format string + gapPolicy string + unit string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewDerivativeAggregation creates and initializes a new DerivativeAggregation. +func NewDerivativeAggregation() *DerivativeAggregation { + return &DerivativeAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *DerivativeAggregation) Format(format string) *DerivativeAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *DerivativeAggregation) GapPolicy(gapPolicy string) *DerivativeAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *DerivativeAggregation) GapInsertZeros() *DerivativeAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *DerivativeAggregation) GapSkip() *DerivativeAggregation { + a.gapPolicy = "skip" + return a +} + +// Unit sets the unit provided, e.g. "1d" or "1y". +// It is only useful when calculating the derivative using a date_histogram. +func (a *DerivativeAggregation) Unit(unit string) *DerivativeAggregation { + a.unit = unit + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *DerivativeAggregation) SubAggregation(name string, subAggregation Aggregation) *DerivativeAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *DerivativeAggregation) Meta(metaData map[string]interface{}) *DerivativeAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *DerivativeAggregation) BucketsPath(bucketsPaths ...string) *DerivativeAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *DerivativeAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["derivative"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + if a.unit != "" { + params["unit"] = a.unit + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_derivative_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_derivative_test.go new file mode 100644 index 000000000..1d2ec2d38 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_derivative_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestDerivativeAggregation(t *testing.T) { + agg := NewDerivativeAggregation().BucketsPath("sales") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"derivative":{"buckets_path":"sales"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_max_bucket.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_max_bucket.go new file mode 100644 index 000000000..da6f9ef36 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_max_bucket.go @@ -0,0 +1,114 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MaxBucketAggregation is a sibling pipeline aggregation which identifies +// the bucket(s) with the maximum value of a specified metric in a sibling +// aggregation and outputs both the value and the key(s) of the bucket(s). +// The specified metric must be numeric and the sibling aggregation must +// be a multi-bucket aggregation. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-max-bucket-aggregation.html +type MaxBucketAggregation struct { + format string + gapPolicy string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewMaxBucketAggregation creates and initializes a new MaxBucketAggregation. +func NewMaxBucketAggregation() *MaxBucketAggregation { + return &MaxBucketAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *MaxBucketAggregation) Format(format string) *MaxBucketAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *MaxBucketAggregation) GapPolicy(gapPolicy string) *MaxBucketAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *MaxBucketAggregation) GapInsertZeros() *MaxBucketAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *MaxBucketAggregation) GapSkip() *MaxBucketAggregation { + a.gapPolicy = "skip" + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *MaxBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *MaxBucketAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MaxBucketAggregation) Meta(metaData map[string]interface{}) *MaxBucketAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *MaxBucketAggregation) BucketsPath(bucketsPaths ...string) *MaxBucketAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *MaxBucketAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["max_bucket"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_max_bucket_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_max_bucket_test.go new file mode 100644 index 000000000..8bdde8fcd --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_max_bucket_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMaxBucketAggregation(t *testing.T) { + agg := NewMaxBucketAggregation().BucketsPath("the_sum").GapPolicy("skip") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"max_bucket":{"buckets_path":"the_sum","gap_policy":"skip"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_min_bucket.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_min_bucket.go new file mode 100644 index 000000000..325f00f03 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_min_bucket.go @@ -0,0 +1,114 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MinBucketAggregation is a sibling pipeline aggregation which identifies +// the bucket(s) with the maximum value of a specified metric in a sibling +// aggregation and outputs both the value and the key(s) of the bucket(s). +// The specified metric must be numeric and the sibling aggregation must +// be a multi-bucket aggregation. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-min-bucket-aggregation.html +type MinBucketAggregation struct { + format string + gapPolicy string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewMinBucketAggregation creates and initializes a new MinBucketAggregation. +func NewMinBucketAggregation() *MinBucketAggregation { + return &MinBucketAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *MinBucketAggregation) Format(format string) *MinBucketAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *MinBucketAggregation) GapPolicy(gapPolicy string) *MinBucketAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *MinBucketAggregation) GapInsertZeros() *MinBucketAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *MinBucketAggregation) GapSkip() *MinBucketAggregation { + a.gapPolicy = "skip" + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *MinBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *MinBucketAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MinBucketAggregation) Meta(metaData map[string]interface{}) *MinBucketAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *MinBucketAggregation) BucketsPath(bucketsPaths ...string) *MinBucketAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *MinBucketAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["min_bucket"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_min_bucket_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_min_bucket_test.go new file mode 100644 index 000000000..86fc9cd7f --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_min_bucket_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMinBucketAggregation(t *testing.T) { + agg := NewMinBucketAggregation().BucketsPath("sales_per_month>sales").GapPolicy("skip") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"min_bucket":{"buckets_path":"sales_per_month\u003esales","gap_policy":"skip"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_mov_avg.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_mov_avg.go new file mode 100644 index 000000000..021144ddc --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_mov_avg.go @@ -0,0 +1,393 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MovAvgAggregation operates on a series of data. It will slide a window +// across the data and emit the average value of that window. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html +type MovAvgAggregation struct { + format string + gapPolicy string + model MovAvgModel + window *int + predict *int + minimize *bool + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewMovAvgAggregation creates and initializes a new MovAvgAggregation. +func NewMovAvgAggregation() *MovAvgAggregation { + return &MovAvgAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *MovAvgAggregation) Format(format string) *MovAvgAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *MovAvgAggregation) GapPolicy(gapPolicy string) *MovAvgAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *MovAvgAggregation) GapInsertZeros() *MovAvgAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *MovAvgAggregation) GapSkip() *MovAvgAggregation { + a.gapPolicy = "skip" + return a +} + +// Model is used to define what type of moving average you want to use +// in the series. +func (a *MovAvgAggregation) Model(model MovAvgModel) *MovAvgAggregation { + a.model = model + return a +} + +// Window sets the window size for the moving average. This window will +// "slide" across the series, and the values inside that window will +// be used to calculate the moving avg value. +func (a *MovAvgAggregation) Window(window int) *MovAvgAggregation { + a.window = &window + return a +} + +// Predict sets the number of predictions that should be returned. +// Each prediction will be spaced at the intervals in the histogram. +// E.g. a predict of 2 will return two new buckets at the end of the +// histogram with the predicted values. +func (a *MovAvgAggregation) Predict(numPredictions int) *MovAvgAggregation { + a.predict = &numPredictions + return a +} + +// Minimize determines if the model should be fit to the data using a +// cost minimizing algorithm. +func (a *MovAvgAggregation) Minimize(minimize bool) *MovAvgAggregation { + a.minimize = &minimize + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *MovAvgAggregation) SubAggregation(name string, subAggregation Aggregation) *MovAvgAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MovAvgAggregation) Meta(metaData map[string]interface{}) *MovAvgAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *MovAvgAggregation) BucketsPath(bucketsPaths ...string) *MovAvgAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *MovAvgAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["moving_avg"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + if a.model != nil { + params["model"] = a.model.Name() + settings := a.model.Settings() + if len(settings) > 0 { + params["settings"] = settings + } + } + if a.window != nil { + params["window"] = *a.window + } + if a.predict != nil { + params["predict"] = *a.predict + } + if a.minimize != nil { + params["minimize"] = *a.minimize + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} + +// -- Models for moving averages -- +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_models + +// MovAvgModel specifies the model to use with the MovAvgAggregation. +type MovAvgModel interface { + Name() string + Settings() map[string]interface{} +} + +// -- EWMA -- + +// EWMAMovAvgModel calculates an exponentially weighted moving average. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_ewma_exponentially_weighted +type EWMAMovAvgModel struct { + alpha *float64 +} + +// NewEWMAMovAvgModel creates and initializes a new EWMAMovAvgModel. +func NewEWMAMovAvgModel() *EWMAMovAvgModel { + return &EWMAMovAvgModel{} +} + +// Alpha controls the smoothing of the data. Alpha = 1 retains no memory +// of past values (e.g. a random walk), while alpha = 0 retains infinite +// memory of past values (e.g. the series mean). Useful values are somewhere +// in between. Defaults to 0.5. +func (m *EWMAMovAvgModel) Alpha(alpha float64) *EWMAMovAvgModel { + m.alpha = &alpha + return m +} + +// Name of the model. +func (m *EWMAMovAvgModel) Name() string { + return "ewma" +} + +// Settings of the model. +func (m *EWMAMovAvgModel) Settings() map[string]interface{} { + settings := make(map[string]interface{}) + if m.alpha != nil { + settings["alpha"] = *m.alpha + } + return settings +} + +// -- Holt linear -- + +// HoltLinearMovAvgModel calculates a doubly exponential weighted moving average. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_holt_linear +type HoltLinearMovAvgModel struct { + alpha *float64 + beta *float64 +} + +// NewHoltLinearMovAvgModel creates and initializes a new HoltLinearMovAvgModel. +func NewHoltLinearMovAvgModel() *HoltLinearMovAvgModel { + return &HoltLinearMovAvgModel{} +} + +// Alpha controls the smoothing of the data. Alpha = 1 retains no memory +// of past values (e.g. a random walk), while alpha = 0 retains infinite +// memory of past values (e.g. the series mean). Useful values are somewhere +// in between. Defaults to 0.5. +func (m *HoltLinearMovAvgModel) Alpha(alpha float64) *HoltLinearMovAvgModel { + m.alpha = &alpha + return m +} + +// Beta is equivalent to Alpha but controls the smoothing of the trend +// instead of the data. +func (m *HoltLinearMovAvgModel) Beta(beta float64) *HoltLinearMovAvgModel { + m.beta = &beta + return m +} + +// Name of the model. +func (m *HoltLinearMovAvgModel) Name() string { + return "holt" +} + +// Settings of the model. +func (m *HoltLinearMovAvgModel) Settings() map[string]interface{} { + settings := make(map[string]interface{}) + if m.alpha != nil { + settings["alpha"] = *m.alpha + } + if m.beta != nil { + settings["beta"] = *m.beta + } + return settings +} + +// -- Holt Winters -- + +// HoltWintersMovAvgModel calculates a triple exponential weighted moving average. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_holt_winters +type HoltWintersMovAvgModel struct { + alpha *float64 + beta *float64 + gamma *float64 + period *int + seasonalityType string + pad *bool +} + +// NewHoltWintersMovAvgModel creates and initializes a new HoltWintersMovAvgModel. +func NewHoltWintersMovAvgModel() *HoltWintersMovAvgModel { + return &HoltWintersMovAvgModel{} +} + +// Alpha controls the smoothing of the data. Alpha = 1 retains no memory +// of past values (e.g. a random walk), while alpha = 0 retains infinite +// memory of past values (e.g. the series mean). Useful values are somewhere +// in between. Defaults to 0.5. +func (m *HoltWintersMovAvgModel) Alpha(alpha float64) *HoltWintersMovAvgModel { + m.alpha = &alpha + return m +} + +// Beta is equivalent to Alpha but controls the smoothing of the trend +// instead of the data. +func (m *HoltWintersMovAvgModel) Beta(beta float64) *HoltWintersMovAvgModel { + m.beta = &beta + return m +} + +func (m *HoltWintersMovAvgModel) Gamma(gamma float64) *HoltWintersMovAvgModel { + m.gamma = &gamma + return m +} + +func (m *HoltWintersMovAvgModel) Period(period int) *HoltWintersMovAvgModel { + m.period = &period + return m +} + +func (m *HoltWintersMovAvgModel) SeasonalityType(typ string) *HoltWintersMovAvgModel { + m.seasonalityType = typ + return m +} + +func (m *HoltWintersMovAvgModel) Pad(pad bool) *HoltWintersMovAvgModel { + m.pad = &pad + return m +} + +// Name of the model. +func (m *HoltWintersMovAvgModel) Name() string { + return "holt_winters" +} + +// Settings of the model. +func (m *HoltWintersMovAvgModel) Settings() map[string]interface{} { + settings := make(map[string]interface{}) + if m.alpha != nil { + settings["alpha"] = *m.alpha + } + if m.beta != nil { + settings["beta"] = *m.beta + } + if m.gamma != nil { + settings["gamma"] = *m.gamma + } + if m.period != nil { + settings["period"] = *m.period + } + if m.pad != nil { + settings["pad"] = *m.pad + } + if m.seasonalityType != "" { + settings["type"] = m.seasonalityType + } + return settings +} + +// -- Linear -- + +// LinearMovAvgModel calculates a linearly weighted moving average, such +// that older values are linearly less important. "Time" is determined +// by position in collection. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_linear +type LinearMovAvgModel struct { +} + +// NewLinearMovAvgModel creates and initializes a new LinearMovAvgModel. +func NewLinearMovAvgModel() *LinearMovAvgModel { + return &LinearMovAvgModel{} +} + +// Name of the model. +func (m *LinearMovAvgModel) Name() string { + return "linear" +} + +// Settings of the model. +func (m *LinearMovAvgModel) Settings() map[string]interface{} { + return nil +} + +// -- Simple -- + +// SimpleMovAvgModel calculates a simple unweighted (arithmetic) moving average. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_simple +type SimpleMovAvgModel struct { +} + +// NewSimpleMovAvgModel creates and initializes a new SimpleMovAvgModel. +func NewSimpleMovAvgModel() *SimpleMovAvgModel { + return &SimpleMovAvgModel{} +} + +// Name of the model. +func (m *SimpleMovAvgModel) Name() string { + return "simple" +} + +// Settings of the model. +func (m *SimpleMovAvgModel) Settings() map[string]interface{} { + return nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_mov_avg_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_mov_avg_test.go new file mode 100644 index 000000000..e17c1c0a0 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_mov_avg_test.go @@ -0,0 +1,132 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMovAvgAggregation(t *testing.T) { + agg := NewMovAvgAggregation().BucketsPath("the_sum") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"moving_avg":{"buckets_path":"the_sum"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMovAvgAggregationWithSimpleModel(t *testing.T) { + agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30).Model(NewSimpleMovAvgModel()) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"moving_avg":{"buckets_path":"the_sum","model":"simple","window":30}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMovAvgAggregationWithLinearModel(t *testing.T) { + agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30).Model(NewLinearMovAvgModel()) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"moving_avg":{"buckets_path":"the_sum","model":"linear","window":30}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMovAvgAggregationWithEWMAModel(t *testing.T) { + agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30).Model(NewEWMAMovAvgModel().Alpha(0.5)) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"moving_avg":{"buckets_path":"the_sum","model":"ewma","settings":{"alpha":0.5},"window":30}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMovAvgAggregationWithHoltLinearModel(t *testing.T) { + agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30). + Model(NewHoltLinearMovAvgModel().Alpha(0.5).Beta(0.4)) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"moving_avg":{"buckets_path":"the_sum","model":"holt","settings":{"alpha":0.5,"beta":0.4},"window":30}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMovAvgAggregationWithHoltWintersModel(t *testing.T) { + agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30).Predict(10).Minimize(true). + Model(NewHoltWintersMovAvgModel().Alpha(0.5).Beta(0.4).Gamma(0.3).Period(7).Pad(true)) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"moving_avg":{"buckets_path":"the_sum","minimize":true,"model":"holt_winters","predict":10,"settings":{"alpha":0.5,"beta":0.4,"gamma":0.3,"pad":true,"period":7},"window":30}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMovAvgAggregationWithSubAggs(t *testing.T) { + agg := NewMovAvgAggregation().BucketsPath("the_sum") + agg = agg.SubAggregation("avg_sum", NewAvgAggregation().Field("height")) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"avg_sum":{"avg":{"field":"height"}}},"moving_avg":{"buckets_path":"the_sum"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_serial_diff.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_serial_diff.go new file mode 100644 index 000000000..db81d3cf4 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_serial_diff.go @@ -0,0 +1,124 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SerialDiffAggregation implements serial differencing. +// Serial differencing is a technique where values in a time series are +// subtracted from itself at different time lags or periods. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-serialdiff-aggregation.html +type SerialDiffAggregation struct { + format string + gapPolicy string + lag *int + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewSerialDiffAggregation creates and initializes a new SerialDiffAggregation. +func NewSerialDiffAggregation() *SerialDiffAggregation { + return &SerialDiffAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *SerialDiffAggregation) Format(format string) *SerialDiffAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *SerialDiffAggregation) GapPolicy(gapPolicy string) *SerialDiffAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *SerialDiffAggregation) GapInsertZeros() *SerialDiffAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *SerialDiffAggregation) GapSkip() *SerialDiffAggregation { + a.gapPolicy = "skip" + return a +} + +// Lag specifies the historical bucket to subtract from the current value. +// E.g. a lag of 7 will subtract the current value from the value 7 buckets +// ago. Lag must be a positive, non-zero integer. +func (a *SerialDiffAggregation) Lag(lag int) *SerialDiffAggregation { + a.lag = &lag + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *SerialDiffAggregation) SubAggregation(name string, subAggregation Aggregation) *SerialDiffAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *SerialDiffAggregation) Meta(metaData map[string]interface{}) *SerialDiffAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *SerialDiffAggregation) BucketsPath(bucketsPaths ...string) *SerialDiffAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *SerialDiffAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["serial_diff"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + if a.lag != nil { + params["lag"] = *a.lag + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_serial_diff_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_serial_diff_test.go new file mode 100644 index 000000000..17e512c5d --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_serial_diff_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSerialDiffAggregation(t *testing.T) { + agg := NewSerialDiffAggregation().BucketsPath("the_sum").Lag(7) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"serial_diff":{"buckets_path":"the_sum","lag":7}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket.go new file mode 100644 index 000000000..16ef64986 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket.go @@ -0,0 +1,113 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SumBucketAggregation is a sibling pipeline aggregation which calculates +// the sum across all buckets of a specified metric in a sibling aggregation. +// The specified metric must be numeric and the sibling aggregation must +// be a multi-bucket aggregation. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-sum-bucket-aggregation.html +type SumBucketAggregation struct { + format string + gapPolicy string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewSumBucketAggregation creates and initializes a new SumBucketAggregation. +func NewSumBucketAggregation() *SumBucketAggregation { + return &SumBucketAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *SumBucketAggregation) Format(format string) *SumBucketAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *SumBucketAggregation) GapPolicy(gapPolicy string) *SumBucketAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *SumBucketAggregation) GapInsertZeros() *SumBucketAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *SumBucketAggregation) GapSkip() *SumBucketAggregation { + a.gapPolicy = "skip" + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *SumBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *SumBucketAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *SumBucketAggregation) Meta(metaData map[string]interface{}) *SumBucketAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *SumBucketAggregation) BucketsPath(bucketsPaths ...string) *SumBucketAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *SumBucketAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["sum_bucket"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket_test.go new file mode 100644 index 000000000..a1c84026d --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSumBucketAggregation(t *testing.T) { + agg := NewSumBucketAggregation().BucketsPath("the_sum") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"sum_bucket":{"buckets_path":"the_sum"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_test.go new file mode 100644 index 000000000..be6bbfc87 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_test.go @@ -0,0 +1,1000 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestAggsIntegrationAvgBucket(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("sales", NewSumAggregation().Field("price")) + builder = builder.Aggregation("sales_per_month", h) + builder = builder.Aggregation("avg_monthly_sales", NewAvgBucketAggregation().BucketsPath("sales_per_month>sales")) + + res, err := builder.Do() + if err != nil { + t.Fatal(err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.AvgBucket("avg_monthly_sales") + if !found { + t.Fatal("expected avg_monthly_sales aggregation") + } + if agg == nil { + t.Fatal("expected avg_monthly_sales aggregation") + } + if agg.Value == nil { + t.Fatal("expected avg_monthly_sales.value != nil") + } + if got, want := *agg.Value, float64(939.2); got != want { + t.Fatalf("expected avg_monthly_sales.value=%v; got: %v", want, got) + } +} + +func TestAggsIntegrationDerivative(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("sales", NewSumAggregation().Field("price")) + h = h.SubAggregation("sales_deriv", NewDerivativeAggregation().BucketsPath("sales")) + builder = builder.Aggregation("sales_per_month", h) + + res, err := builder.Do() + if err != nil { + t.Fatal(err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.DateHistogram("sales_per_month") + if !found { + t.Fatal("expected sales_per_month aggregation") + } + if agg == nil { + t.Fatal("expected sales_per_month aggregation") + } + if got, want := len(agg.Buckets), 6; got != want { + t.Fatalf("expected %d buckets; got: %d", want, got) + } + + if got, want := agg.Buckets[0].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[1].DocCount, int64(0); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[2].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[3].DocCount, int64(3); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[4].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[5].DocCount, int64(2); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + + d, found := agg.Buckets[0].Derivative("sales_deriv") + if found { + t.Fatal("expected no sales_deriv aggregation") + } + if d != nil { + t.Fatal("expected no sales_deriv aggregation") + } + + d, found = agg.Buckets[1].Derivative("sales_deriv") + if !found { + t.Fatal("expected sales_deriv aggregation") + } + if d == nil { + t.Fatal("expected sales_deriv aggregation") + } + if d.Value != nil { + t.Fatal("expected sales_deriv value == nil") + } + + d, found = agg.Buckets[2].Derivative("sales_deriv") + if !found { + t.Fatal("expected sales_deriv aggregation") + } + if d == nil { + t.Fatal("expected sales_deriv aggregation") + } + if d.Value != nil { + t.Fatal("expected sales_deriv value == nil") + } + + d, found = agg.Buckets[3].Derivative("sales_deriv") + if !found { + t.Fatal("expected sales_deriv aggregation") + } + if d == nil { + t.Fatal("expected sales_deriv aggregation") + } + if d.Value == nil { + t.Fatal("expected sales_deriv value != nil") + } + if got, want := *d.Value, float64(2348.0); got != want { + t.Fatalf("expected sales_deriv.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[4].Derivative("sales_deriv") + if !found { + t.Fatal("expected sales_deriv aggregation") + } + if d == nil { + t.Fatal("expected sales_deriv aggregation") + } + if d.Value == nil { + t.Fatal("expected sales_deriv value != nil") + } + if got, want := *d.Value, float64(-1658.0); got != want { + t.Fatalf("expected sales_deriv.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[5].Derivative("sales_deriv") + if !found { + t.Fatal("expected sales_deriv aggregation") + } + if d == nil { + t.Fatal("expected sales_deriv aggregation") + } + if d.Value == nil { + t.Fatal("expected sales_deriv value != nil") + } + if got, want := *d.Value, float64(-722.0); got != want { + t.Fatalf("expected sales_deriv.value=%v; got: %v", want, got) + } +} + +func TestAggsIntegrationMaxBucket(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("sales", NewSumAggregation().Field("price")) + builder = builder.Aggregation("sales_per_month", h) + builder = builder.Aggregation("max_monthly_sales", NewMaxBucketAggregation().BucketsPath("sales_per_month>sales")) + + res, err := builder.Do() + if err != nil { + t.Fatal(err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.MaxBucket("max_monthly_sales") + if !found { + t.Fatal("expected max_monthly_sales aggregation") + } + if agg == nil { + t.Fatal("expected max_monthly_sales aggregation") + } + if got, want := len(agg.Keys), 1; got != want { + t.Fatalf("expected len(max_monthly_sales.keys)=%d; got: %d", want, got) + } + if got, want := agg.Keys[0], "2015-04-01"; got != want { + t.Fatalf("expected max_monthly_sales.keys[0]=%v; got: %v", want, got) + } + if agg.Value == nil { + t.Fatal("expected max_monthly_sales.value != nil") + } + if got, want := *agg.Value, float64(2448); got != want { + t.Fatalf("expected max_monthly_sales.value=%v; got: %v", want, got) + } +} + +func TestAggsIntegrationMinBucket(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("sales", NewSumAggregation().Field("price")) + builder = builder.Aggregation("sales_per_month", h) + builder = builder.Aggregation("min_monthly_sales", NewMinBucketAggregation().BucketsPath("sales_per_month>sales")) + + res, err := builder.Do() + if err != nil { + t.Fatal(err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.MinBucket("min_monthly_sales") + if !found { + t.Fatal("expected min_monthly_sales aggregation") + } + if agg == nil { + t.Fatal("expected min_monthly_sales aggregation") + } + if got, want := len(agg.Keys), 1; got != want { + t.Fatalf("expected len(min_monthly_sales.keys)=%d; got: %d", want, got) + } + if got, want := agg.Keys[0], "2015-06-01"; got != want { + t.Fatalf("expected min_monthly_sales.keys[0]=%v; got: %v", want, got) + } + if agg.Value == nil { + t.Fatal("expected min_monthly_sales.value != nil") + } + if got, want := *agg.Value, float64(68); got != want { + t.Fatalf("expected min_monthly_sales.value=%v; got: %v", want, got) + } +} + +func TestAggsIntegrationSumBucket(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("sales", NewSumAggregation().Field("price")) + builder = builder.Aggregation("sales_per_month", h) + builder = builder.Aggregation("sum_monthly_sales", NewSumBucketAggregation().BucketsPath("sales_per_month>sales")) + + res, err := builder.Do() + if err != nil { + t.Fatal(err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.SumBucket("sum_monthly_sales") + if !found { + t.Fatal("expected sum_monthly_sales aggregation") + } + if agg == nil { + t.Fatal("expected sum_monthly_sales aggregation") + } + if agg.Value == nil { + t.Fatal("expected sum_monthly_sales.value != nil") + } + if got, want := *agg.Value, float64(4696.0); got != want { + t.Fatalf("expected sum_monthly_sales.value=%v; got: %v", want, got) + } +} + +func TestAggsIntegrationMovAvg(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("the_sum", NewSumAggregation().Field("price")) + h = h.SubAggregation("the_movavg", NewMovAvgAggregation().BucketsPath("the_sum")) + builder = builder.Aggregation("my_date_histo", h) + + res, err := builder.Do() + if err != nil { + t.Fatal(err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.DateHistogram("my_date_histo") + if !found { + t.Fatal("expected sum_monthly_sales aggregation") + } + if agg == nil { + t.Fatal("expected sum_monthly_sales aggregation") + } + if got, want := len(agg.Buckets), 6; got != want { + t.Fatalf("expected %d buckets; got: %d", want, got) + } + + d, found := agg.Buckets[0].MovAvg("the_movavg") + if found { + t.Fatal("expected no the_movavg aggregation") + } + if d != nil { + t.Fatal("expected no the_movavg aggregation") + } + + d, found = agg.Buckets[1].MovAvg("the_movavg") + if found { + t.Fatal("expected no the_movavg aggregation") + } + if d != nil { + t.Fatal("expected no the_movavg aggregation") + } + + d, found = agg.Buckets[2].MovAvg("the_movavg") + if !found { + t.Fatal("expected the_movavg aggregation") + } + if d == nil { + t.Fatal("expected the_movavg aggregation") + } + if d.Value == nil { + t.Fatal("expected the_movavg value") + } + if got, want := *d.Value, float64(1290.0); got != want { + t.Fatalf("expected %v buckets; got: %v", want, got) + } + + d, found = agg.Buckets[3].MovAvg("the_movavg") + if !found { + t.Fatal("expected the_movavg aggregation") + } + if d == nil { + t.Fatal("expected the_movavg aggregation") + } + if d.Value == nil { + t.Fatal("expected the_movavg value") + } + if got, want := *d.Value, float64(695.0); got != want { + t.Fatalf("expected %v buckets; got: %v", want, got) + } + + d, found = agg.Buckets[4].MovAvg("the_movavg") + if !found { + t.Fatal("expected the_movavg aggregation") + } + if d == nil { + t.Fatal("expected the_movavg aggregation") + } + if d.Value == nil { + t.Fatal("expected the_movavg value") + } + if got, want := *d.Value, float64(1279.3333333333333); got != want { + t.Fatalf("expected %v buckets; got: %v", want, got) + } + + d, found = agg.Buckets[5].MovAvg("the_movavg") + if !found { + t.Fatal("expected the_movavg aggregation") + } + if d == nil { + t.Fatal("expected the_movavg aggregation") + } + if d.Value == nil { + t.Fatal("expected the_movavg value") + } + if got, want := *d.Value, float64(1157.0); got != want { + t.Fatalf("expected %v buckets; got: %v", want, got) + } +} + +func TestAggsIntegrationCumulativeSum(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("sales", NewSumAggregation().Field("price")) + h = h.SubAggregation("cumulative_sales", NewCumulativeSumAggregation().BucketsPath("sales")) + builder = builder.Aggregation("sales_per_month", h) + + res, err := builder.Do() + if err != nil { + t.Fatal(err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.DateHistogram("sales_per_month") + if !found { + t.Fatal("expected sales_per_month aggregation") + } + if agg == nil { + t.Fatal("expected sales_per_month aggregation") + } + if got, want := len(agg.Buckets), 6; got != want { + t.Fatalf("expected %d buckets; got: %d", want, got) + } + + if got, want := agg.Buckets[0].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[1].DocCount, int64(0); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[2].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[3].DocCount, int64(3); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[4].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[5].DocCount, int64(2); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + + d, found := agg.Buckets[0].CumulativeSum("cumulative_sales") + if !found { + t.Fatal("expected cumulative_sales aggregation") + } + if d == nil { + t.Fatal("expected cumulative_sales aggregation") + } + if d.Value == nil { + t.Fatal("expected cumulative_sales value != nil") + } + if got, want := *d.Value, float64(1290.0); got != want { + t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[1].CumulativeSum("cumulative_sales") + if !found { + t.Fatal("expected cumulative_sales aggregation") + } + if d == nil { + t.Fatal("expected cumulative_sales aggregation") + } + if d.Value == nil { + t.Fatal("expected cumulative_sales value != nil") + } + if got, want := *d.Value, float64(1290.0); got != want { + t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[2].CumulativeSum("cumulative_sales") + if !found { + t.Fatal("expected cumulative_sales aggregation") + } + if d == nil { + t.Fatal("expected cumulative_sales aggregation") + } + if d.Value == nil { + t.Fatal("expected cumulative_sales value != nil") + } + if got, want := *d.Value, float64(1390.0); got != want { + t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[3].CumulativeSum("cumulative_sales") + if !found { + t.Fatal("expected cumulative_sales aggregation") + } + if d == nil { + t.Fatal("expected cumulative_sales aggregation") + } + if d.Value == nil { + t.Fatal("expected cumulative_sales value != nil") + } + if got, want := *d.Value, float64(3838.0); got != want { + t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[4].CumulativeSum("cumulative_sales") + if !found { + t.Fatal("expected cumulative_sales aggregation") + } + if d == nil { + t.Fatal("expected cumulative_sales aggregation") + } + if d.Value == nil { + t.Fatal("expected cumulative_sales value != nil") + } + if got, want := *d.Value, float64(4628.0); got != want { + t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[5].CumulativeSum("cumulative_sales") + if !found { + t.Fatal("expected cumulative_sales aggregation") + } + if d == nil { + t.Fatal("expected cumulative_sales aggregation") + } + if d.Value == nil { + t.Fatal("expected cumulative_sales value != nil") + } + if got, want := *d.Value, float64(4696.0); got != want { + t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got) + } +} + +func TestAggsIntegrationBucketScript(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("total_sales", NewSumAggregation().Field("price")) + appleFilter := NewFilterAggregation().Filter(NewTermQuery("manufacturer", "Apple")) + appleFilter = appleFilter.SubAggregation("sales", NewSumAggregation().Field("price")) + h = h.SubAggregation("apple_sales", appleFilter) + h = h.SubAggregation("apple_percentage", + NewBucketScriptAggregation(). + GapPolicy("insert_zeros"). + AddBucketsPath("appleSales", "apple_sales>sales"). + AddBucketsPath("totalSales", "total_sales"). + Script(NewScript("appleSales / totalSales * 100"))) + builder = builder.Aggregation("sales_per_month", h) + + res, err := builder.Do() + if err != nil { + t.Fatalf("%v (maybe scripting is disabled?)", err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.DateHistogram("sales_per_month") + if !found { + t.Fatal("expected sales_per_month aggregation") + } + if agg == nil { + t.Fatal("expected sales_per_month aggregation") + } + if got, want := len(agg.Buckets), 6; got != want { + t.Fatalf("expected %d buckets; got: %d", want, got) + } + + if got, want := agg.Buckets[0].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[1].DocCount, int64(0); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[2].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[3].DocCount, int64(3); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[4].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[5].DocCount, int64(2); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + + d, found := agg.Buckets[0].BucketScript("apple_percentage") + if !found { + t.Fatal("expected apple_percentage aggregation") + } + if d == nil { + t.Fatal("expected apple_percentage aggregation") + } + if d.Value == nil { + t.Fatal("expected apple_percentage value != nil") + } + if got, want := *d.Value, float64(100.0); got != want { + t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[1].BucketScript("apple_percentage") + if !found { + t.Fatal("expected apple_percentage aggregation") + } + if d == nil { + t.Fatal("expected apple_percentage aggregation") + } + if d.Value != nil { + t.Fatal("expected apple_percentage value == nil") + } + + d, found = agg.Buckets[2].BucketScript("apple_percentage") + if !found { + t.Fatal("expected apple_percentage aggregation") + } + if d == nil { + t.Fatal("expected apple_percentage aggregation") + } + if d.Value == nil { + t.Fatal("expected apple_percentage value != nil") + } + if got, want := *d.Value, float64(0.0); got != want { + t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[3].BucketScript("apple_percentage") + if !found { + t.Fatal("expected apple_percentage aggregation") + } + if d == nil { + t.Fatal("expected apple_percentage aggregation") + } + if d.Value == nil { + t.Fatal("expected apple_percentage value != nil") + } + if got, want := *d.Value, float64(34.64052287581699); got != want { + t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[4].BucketScript("apple_percentage") + if !found { + t.Fatal("expected apple_percentage aggregation") + } + if d == nil { + t.Fatal("expected apple_percentage aggregation") + } + if d.Value == nil { + t.Fatal("expected apple_percentage value != nil") + } + if got, want := *d.Value, float64(0.0); got != want { + t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[5].BucketScript("apple_percentage") + if !found { + t.Fatal("expected apple_percentage aggregation") + } + if d == nil { + t.Fatal("expected apple_percentage aggregation") + } + if d.Value == nil { + t.Fatal("expected apple_percentage value != nil") + } + if got, want := *d.Value, float64(0.0); got != want { + t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got) + } +} + +func TestAggsIntegrationBucketSelector(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("total_sales", NewSumAggregation().Field("price")) + h = h.SubAggregation("sales_bucket_filter", + NewBucketSelectorAggregation(). + AddBucketsPath("totalSales", "total_sales"). + Script(NewScript("totalSales <= 100"))) + builder = builder.Aggregation("sales_per_month", h) + + res, err := builder.Do() + if err != nil { + t.Fatalf("%v (maybe scripting is disabled?)", err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.DateHistogram("sales_per_month") + if !found { + t.Fatal("expected sales_per_month aggregation") + } + if agg == nil { + t.Fatal("expected sales_per_month aggregation") + } + if got, want := len(agg.Buckets), 2; got != want { + t.Fatalf("expected %d buckets; got: %d", want, got) + } + + if got, want := agg.Buckets[0].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[1].DocCount, int64(2); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } +} + +func TestAggsIntegrationSerialDiff(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("sales", NewSumAggregation().Field("price")) + h = h.SubAggregation("the_diff", NewSerialDiffAggregation().BucketsPath("sales").Lag(1)) + builder = builder.Aggregation("sales_per_month", h) + + res, err := builder.Do() + if err != nil { + t.Fatal(err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.DateHistogram("sales_per_month") + if !found { + t.Fatal("expected sales_per_month aggregation") + } + if agg == nil { + t.Fatal("expected sales_per_month aggregation") + } + if got, want := len(agg.Buckets), 6; got != want { + t.Fatalf("expected %d buckets; got: %d", want, got) + } + + if got, want := agg.Buckets[0].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[1].DocCount, int64(0); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[2].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[3].DocCount, int64(3); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[4].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[5].DocCount, int64(2); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + + d, found := agg.Buckets[0].SerialDiff("the_diff") + if found { + t.Fatal("expected no the_diff aggregation") + } + if d != nil { + t.Fatal("expected no the_diff aggregation") + } + + d, found = agg.Buckets[1].SerialDiff("the_diff") + if found { + t.Fatal("expected no the_diff aggregation") + } + if d != nil { + t.Fatal("expected no the_diff aggregation") + } + + d, found = agg.Buckets[2].SerialDiff("the_diff") + if found { + t.Fatal("expected no the_diff aggregation") + } + if d != nil { + t.Fatal("expected no the_diff aggregation") + } + + d, found = agg.Buckets[3].SerialDiff("the_diff") + if !found { + t.Fatal("expected the_diff aggregation") + } + if d == nil { + t.Fatal("expected the_diff aggregation") + } + if d.Value == nil { + t.Fatal("expected the_diff value != nil") + } + if got, want := *d.Value, float64(2348.0); got != want { + t.Fatalf("expected the_diff.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[4].SerialDiff("the_diff") + if !found { + t.Fatal("expected the_diff aggregation") + } + if d == nil { + t.Fatal("expected the_diff aggregation") + } + if d.Value == nil { + t.Fatal("expected the_diff value != nil") + } + if got, want := *d.Value, float64(-1658.0); got != want { + t.Fatalf("expected the_diff.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[5].SerialDiff("the_diff") + if !found { + t.Fatal("expected the_diff aggregation") + } + if d == nil { + t.Fatal("expected the_diff aggregation") + } + if d.Value == nil { + t.Fatal("expected the_diff value != nil") + } + if got, want := *d.Value, float64(-722.0); got != want { + t.Fatalf("expected the_diff.value=%v; got: %v", want, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_test.go new file mode 100644 index 000000000..ef6ec2112 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_test.go @@ -0,0 +1,2996 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "strings" + "testing" + "time" +) + +func TestAggs(t *testing.T) { + //client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndex(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + tweet1 := tweet{ + User: "olivere", + Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Image: "http://golang.org/doc/gopher/gophercolor.png", + Tags: []string{"golang", "elasticsearch"}, + Location: "48.1333,11.5667", // lat,lon + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + tweet2 := tweet{ + User: "olivere", + Retweets: 0, + Message: "Another unrelated topic.", + Tags: []string{"golang"}, + Location: "48.1189,11.4289", // lat,lon + Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), + } + tweet3 := tweet{ + User: "sandrae", + Retweets: 12, + Message: "Cycling is fun.", + Tags: []string{"sports", "cycling"}, + Location: "47.7167,11.7167", // lat,lon + Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), + } + + // Add all documents + _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + all := NewMatchAllQuery() + + // Terms Aggregate by user name + globalAgg := NewGlobalAggregation() + usersAgg := NewTermsAggregation().Field("user").Size(10).OrderByCountDesc() + retweetsAgg := NewTermsAggregation().Field("retweets").Size(10) + avgRetweetsAgg := NewAvgAggregation().Field("retweets") + avgRetweetsWithMetaAgg := NewAvgAggregation().Field("retweetsMeta").Meta(map[string]interface{}{"meta": true}) + minRetweetsAgg := NewMinAggregation().Field("retweets") + maxRetweetsAgg := NewMaxAggregation().Field("retweets") + sumRetweetsAgg := NewSumAggregation().Field("retweets") + statsRetweetsAgg := NewStatsAggregation().Field("retweets") + extstatsRetweetsAgg := NewExtendedStatsAggregation().Field("retweets") + valueCountRetweetsAgg := NewValueCountAggregation().Field("retweets") + percentilesRetweetsAgg := NewPercentilesAggregation().Field("retweets") + percentileRanksRetweetsAgg := NewPercentileRanksAggregation().Field("retweets").Values(25, 50, 75) + cardinalityAgg := NewCardinalityAggregation().Field("user") + significantTermsAgg := NewSignificantTermsAggregation().Field("message") + samplerAgg := NewSamplerAggregation().Field("user").SubAggregation("tagged_with", NewTermsAggregation().Field("tags")) + retweetsRangeAgg := NewRangeAggregation().Field("retweets").Lt(10).Between(10, 100).Gt(100) + retweetsKeyedRangeAgg := NewRangeAggregation().Field("retweets").Keyed(true).Lt(10).Between(10, 100).Gt(100) + dateRangeAgg := NewDateRangeAggregation().Field("created").Lt("2012-01-01").Between("2012-01-01", "2013-01-01").Gt("2013-01-01") + missingTagsAgg := NewMissingAggregation().Field("tags") + retweetsHistoAgg := NewHistogramAggregation().Field("retweets").Interval(100) + dateHistoAgg := NewDateHistogramAggregation().Field("created").Interval("year") + retweetsFilterAgg := NewFilterAggregation().Filter( + NewRangeQuery("created").Gte("2012-01-01").Lte("2012-12-31")). + SubAggregation("avgRetweetsSub", NewAvgAggregation().Field("retweets")) + queryFilterAgg := NewFilterAggregation().Filter(NewTermQuery("tags", "golang")) + topTagsHitsAgg := NewTopHitsAggregation().Sort("created", false).Size(5).FetchSource(true) + topTagsAgg := NewTermsAggregation().Field("tags").Size(3).SubAggregation("top_tag_hits", topTagsHitsAgg) + geoBoundsAgg := NewGeoBoundsAggregation().Field("location") + + // Run query + builder := client.Search().Index(testIndexName).Query(all).Pretty(true) + builder = builder.Aggregation("global", globalAgg) + builder = builder.Aggregation("users", usersAgg) + builder = builder.Aggregation("retweets", retweetsAgg) + builder = builder.Aggregation("avgRetweets", avgRetweetsAgg) + if esversion >= "2.0" { + builder = builder.Aggregation("avgRetweetsWithMeta", avgRetweetsWithMetaAgg) + } + builder = builder.Aggregation("minRetweets", minRetweetsAgg) + builder = builder.Aggregation("maxRetweets", maxRetweetsAgg) + builder = builder.Aggregation("sumRetweets", sumRetweetsAgg) + builder = builder.Aggregation("statsRetweets", statsRetweetsAgg) + builder = builder.Aggregation("extstatsRetweets", extstatsRetweetsAgg) + builder = builder.Aggregation("valueCountRetweets", valueCountRetweetsAgg) + builder = builder.Aggregation("percentilesRetweets", percentilesRetweetsAgg) + builder = builder.Aggregation("percentileRanksRetweets", percentileRanksRetweetsAgg) + builder = builder.Aggregation("usersCardinality", cardinalityAgg) + builder = builder.Aggregation("significantTerms", significantTermsAgg) + builder = builder.Aggregation("sample", samplerAgg) + builder = builder.Aggregation("retweetsRange", retweetsRangeAgg) + builder = builder.Aggregation("retweetsKeyedRange", retweetsKeyedRangeAgg) + builder = builder.Aggregation("dateRange", dateRangeAgg) + builder = builder.Aggregation("missingTags", missingTagsAgg) + builder = builder.Aggregation("retweetsHisto", retweetsHistoAgg) + builder = builder.Aggregation("dateHisto", dateHistoAgg) + builder = builder.Aggregation("retweetsFilter", retweetsFilterAgg) + builder = builder.Aggregation("queryFilter", queryFilterAgg) + builder = builder.Aggregation("top-tags", topTagsAgg) + builder = builder.Aggregation("viewport", geoBoundsAgg) + if esversion >= "1.4" { + countByUserAgg := NewFiltersAggregation().Filters(NewTermQuery("user", "olivere"), NewTermQuery("user", "sandrae")) + builder = builder.Aggregation("countByUser", countByUserAgg) + } + if esversion >= "2.0" { + // AvgBucket + dateHisto := NewDateHistogramAggregation().Field("created").Interval("year") + dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) + builder = builder.Aggregation("avgBucketDateHisto", dateHisto) + builder = builder.Aggregation("avgSumOfRetweets", NewAvgBucketAggregation().BucketsPath("avgBucketDateHisto>sumOfRetweets")) + // MinBucket + dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") + dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) + builder = builder.Aggregation("minBucketDateHisto", dateHisto) + builder = builder.Aggregation("minBucketSumOfRetweets", NewMinBucketAggregation().BucketsPath("minBucketDateHisto>sumOfRetweets")) + // MaxBucket + dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") + dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) + builder = builder.Aggregation("maxBucketDateHisto", dateHisto) + builder = builder.Aggregation("maxBucketSumOfRetweets", NewMaxBucketAggregation().BucketsPath("maxBucketDateHisto>sumOfRetweets")) + // SumBucket + dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") + dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) + builder = builder.Aggregation("sumBucketDateHisto", dateHisto) + builder = builder.Aggregation("sumBucketSumOfRetweets", NewSumBucketAggregation().BucketsPath("sumBucketDateHisto>sumOfRetweets")) + // MovAvg + dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") + dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) + dateHisto = dateHisto.SubAggregation("movingAvg", NewMovAvgAggregation().BucketsPath("sumOfRetweets")) + builder = builder.Aggregation("movingAvgDateHisto", dateHisto) + } + searchResult, err := builder.Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + if searchResult.Hits.TotalHits != 3 { + t.Errorf("expected Hits.TotalHits = %d; got: %d", 3, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 3 { + t.Errorf("expected len(Hits.Hits) = %d; got: %d", 3, len(searchResult.Hits.Hits)) + } + agg := searchResult.Aggregations + if agg == nil { + t.Fatalf("expected Aggregations != nil; got: nil") + } + + // Search for non-existent aggregate should return (nil, false) + unknownAgg, found := agg.Terms("no-such-aggregate") + if found { + t.Errorf("expected unknown aggregation to not be found; got: %v", found) + } + if unknownAgg != nil { + t.Errorf("expected unknown aggregation to return %v; got %v", nil, unknownAgg) + } + + // Global + globalAggRes, found := agg.Global("global") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if globalAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if globalAggRes.DocCount != 3 { + t.Errorf("expected DocCount = %d; got: %d", 3, globalAggRes.DocCount) + } + + // Search for existent aggregate (by name) should return (aggregate, true) + termsAggRes, found := agg.Terms("users") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if termsAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if len(termsAggRes.Buckets) != 2 { + t.Fatalf("expected %d; got: %d", 2, len(termsAggRes.Buckets)) + } + if termsAggRes.Buckets[0].Key != "olivere" { + t.Errorf("expected %q; got: %q", "olivere", termsAggRes.Buckets[0].Key) + } + if termsAggRes.Buckets[0].DocCount != 2 { + t.Errorf("expected %d; got: %d", 2, termsAggRes.Buckets[0].DocCount) + } + if termsAggRes.Buckets[1].Key != "sandrae" { + t.Errorf("expected %q; got: %q", "sandrae", termsAggRes.Buckets[1].Key) + } + if termsAggRes.Buckets[1].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, termsAggRes.Buckets[1].DocCount) + } + + // A terms aggregate with keys that are not strings + retweetsAggRes, found := agg.Terms("retweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if retweetsAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if len(retweetsAggRes.Buckets) != 3 { + t.Fatalf("expected %d; got: %d", 3, len(retweetsAggRes.Buckets)) + } + + if retweetsAggRes.Buckets[0].Key != float64(0) { + t.Errorf("expected %v; got: %v", float64(0), retweetsAggRes.Buckets[0].Key) + } + if got, err := retweetsAggRes.Buckets[0].KeyNumber.Int64(); err != nil { + t.Errorf("expected %d; got: %v", 0, retweetsAggRes.Buckets[0].Key) + } else if got != 0 { + t.Errorf("expected %d; got: %d", 0, got) + } + if retweetsAggRes.Buckets[0].KeyNumber != "0" { + t.Errorf("expected %q; got: %q", "0", retweetsAggRes.Buckets[0].KeyNumber) + } + if retweetsAggRes.Buckets[0].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, retweetsAggRes.Buckets[0].DocCount) + } + + if retweetsAggRes.Buckets[1].Key != float64(12) { + t.Errorf("expected %v; got: %v", float64(12), retweetsAggRes.Buckets[1].Key) + } + if got, err := retweetsAggRes.Buckets[1].KeyNumber.Int64(); err != nil { + t.Errorf("expected %d; got: %v", 0, retweetsAggRes.Buckets[1].KeyNumber) + } else if got != 12 { + t.Errorf("expected %d; got: %d", 12, got) + } + if retweetsAggRes.Buckets[1].KeyNumber != "12" { + t.Errorf("expected %q; got: %q", "12", retweetsAggRes.Buckets[1].KeyNumber) + } + if retweetsAggRes.Buckets[1].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, retweetsAggRes.Buckets[1].DocCount) + } + + if retweetsAggRes.Buckets[2].Key != float64(108) { + t.Errorf("expected %v; got: %v", float64(108), retweetsAggRes.Buckets[2].Key) + } + if got, err := retweetsAggRes.Buckets[2].KeyNumber.Int64(); err != nil { + t.Errorf("expected %d; got: %v", 108, retweetsAggRes.Buckets[2].KeyNumber) + } else if got != 108 { + t.Errorf("expected %d; got: %d", 108, got) + } + if retweetsAggRes.Buckets[2].KeyNumber != "108" { + t.Errorf("expected %q; got: %q", "108", retweetsAggRes.Buckets[2].KeyNumber) + } + if retweetsAggRes.Buckets[2].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, retweetsAggRes.Buckets[2].DocCount) + } + + // avgRetweets + avgAggRes, found := agg.Avg("avgRetweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if avgAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if avgAggRes.Value == nil { + t.Fatalf("expected != nil; got: %v", *avgAggRes.Value) + } + if *avgAggRes.Value != 40.0 { + t.Errorf("expected %v; got: %v", 40.0, *avgAggRes.Value) + } + + // avgRetweetsWithMeta + if esversion >= "2.0" { + avgMetaAggRes, found := agg.Avg("avgRetweetsWithMeta") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if avgMetaAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if avgMetaAggRes.Meta == nil { + t.Fatalf("expected != nil; got: %v", avgMetaAggRes.Meta) + } + metaDataValue, found := avgMetaAggRes.Meta["meta"] + if !found { + t.Fatalf("expected to return meta data key %q; got: %v", "meta", found) + } + if flag, ok := metaDataValue.(bool); !ok { + t.Fatalf("expected to return meta data key type %T; got: %T", true, metaDataValue) + } else if flag != true { + t.Fatalf("expected to return meta data key value %v; got: %v", true, flag) + } + } + + // minRetweets + minAggRes, found := agg.Min("minRetweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if minAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if minAggRes.Value == nil { + t.Fatalf("expected != nil; got: %v", *minAggRes.Value) + } + if *minAggRes.Value != 0.0 { + t.Errorf("expected %v; got: %v", 0.0, *minAggRes.Value) + } + + // maxRetweets + maxAggRes, found := agg.Max("maxRetweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if maxAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if maxAggRes.Value == nil { + t.Fatalf("expected != nil; got: %v", *maxAggRes.Value) + } + if *maxAggRes.Value != 108.0 { + t.Errorf("expected %v; got: %v", 108.0, *maxAggRes.Value) + } + + // sumRetweets + sumAggRes, found := agg.Sum("sumRetweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if sumAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if sumAggRes.Value == nil { + t.Fatalf("expected != nil; got: %v", *sumAggRes.Value) + } + if *sumAggRes.Value != 120.0 { + t.Errorf("expected %v; got: %v", 120.0, *sumAggRes.Value) + } + + // statsRetweets + statsAggRes, found := agg.Stats("statsRetweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if statsAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if statsAggRes.Count != 3 { + t.Errorf("expected %d; got: %d", 3, statsAggRes.Count) + } + if statsAggRes.Min == nil { + t.Fatalf("expected != nil; got: %v", *statsAggRes.Min) + } + if *statsAggRes.Min != 0.0 { + t.Errorf("expected %v; got: %v", 0.0, *statsAggRes.Min) + } + if statsAggRes.Max == nil { + t.Fatalf("expected != nil; got: %v", *statsAggRes.Max) + } + if *statsAggRes.Max != 108.0 { + t.Errorf("expected %v; got: %v", 108.0, *statsAggRes.Max) + } + if statsAggRes.Avg == nil { + t.Fatalf("expected != nil; got: %v", *statsAggRes.Avg) + } + if *statsAggRes.Avg != 40.0 { + t.Errorf("expected %v; got: %v", 40.0, *statsAggRes.Avg) + } + if statsAggRes.Sum == nil { + t.Fatalf("expected != nil; got: %v", *statsAggRes.Sum) + } + if *statsAggRes.Sum != 120.0 { + t.Errorf("expected %v; got: %v", 120.0, *statsAggRes.Sum) + } + + // extstatsRetweets + extStatsAggRes, found := agg.ExtendedStats("extstatsRetweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if extStatsAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if extStatsAggRes.Count != 3 { + t.Errorf("expected %d; got: %d", 3, extStatsAggRes.Count) + } + if extStatsAggRes.Min == nil { + t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Min) + } + if *extStatsAggRes.Min != 0.0 { + t.Errorf("expected %v; got: %v", 0.0, *extStatsAggRes.Min) + } + if extStatsAggRes.Max == nil { + t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Max) + } + if *extStatsAggRes.Max != 108.0 { + t.Errorf("expected %v; got: %v", 108.0, *extStatsAggRes.Max) + } + if extStatsAggRes.Avg == nil { + t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Avg) + } + if *extStatsAggRes.Avg != 40.0 { + t.Errorf("expected %v; got: %v", 40.0, *extStatsAggRes.Avg) + } + if extStatsAggRes.Sum == nil { + t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Sum) + } + if *extStatsAggRes.Sum != 120.0 { + t.Errorf("expected %v; got: %v", 120.0, *extStatsAggRes.Sum) + } + if extStatsAggRes.SumOfSquares == nil { + t.Fatalf("expected != nil; got: %v", *extStatsAggRes.SumOfSquares) + } + if *extStatsAggRes.SumOfSquares != 11808.0 { + t.Errorf("expected %v; got: %v", 11808.0, *extStatsAggRes.SumOfSquares) + } + if extStatsAggRes.Variance == nil { + t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Variance) + } + if *extStatsAggRes.Variance != 2336.0 { + t.Errorf("expected %v; got: %v", 2336.0, *extStatsAggRes.Variance) + } + if extStatsAggRes.StdDeviation == nil { + t.Fatalf("expected != nil; got: %v", *extStatsAggRes.StdDeviation) + } + if *extStatsAggRes.StdDeviation != 48.33218389437829 { + t.Errorf("expected %v; got: %v", 48.33218389437829, *extStatsAggRes.StdDeviation) + } + + // valueCountRetweets + valueCountAggRes, found := agg.ValueCount("valueCountRetweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if valueCountAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if valueCountAggRes.Value == nil { + t.Fatalf("expected != nil; got: %v", *valueCountAggRes.Value) + } + if *valueCountAggRes.Value != 3.0 { + t.Errorf("expected %v; got: %v", 3.0, *valueCountAggRes.Value) + } + + // percentilesRetweets + percentilesAggRes, found := agg.Percentiles("percentilesRetweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if percentilesAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + // ES 1.4.x returns 7: {"1.0":...} + // ES 1.5.0 returns 14: {"1.0":..., "1.0_as_string":...} + // So we're relaxing the test here. + if len(percentilesAggRes.Values) == 0 { + t.Errorf("expected at least %d value; got: %d\nValues are: %#v", 1, len(percentilesAggRes.Values), percentilesAggRes.Values) + } + if _, found := percentilesAggRes.Values["0.0"]; found { + t.Errorf("expected %v; got: %v", false, found) + } + if percentilesAggRes.Values["1.0"] != 0.24 { + t.Errorf("expected %v; got: %v", 0.24, percentilesAggRes.Values["1.0"]) + } + if percentilesAggRes.Values["25.0"] != 6.0 { + t.Errorf("expected %v; got: %v", 6.0, percentilesAggRes.Values["25.0"]) + } + if percentilesAggRes.Values["99.0"] != 106.08 { + t.Errorf("expected %v; got: %v", 106.08, percentilesAggRes.Values["99.0"]) + } + + // percentileRanksRetweets + percentileRanksAggRes, found := agg.PercentileRanks("percentileRanksRetweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if percentileRanksAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if len(percentileRanksAggRes.Values) == 0 { + t.Errorf("expected at least %d value; got %d\nValues are: %#v", 1, len(percentileRanksAggRes.Values), percentileRanksAggRes.Values) + } + if _, found := percentileRanksAggRes.Values["0.0"]; found { + t.Errorf("expected %v; got: %v", true, found) + } + if percentileRanksAggRes.Values["25.0"] != 21.180555555555557 { + t.Errorf("expected %v; got: %v", 21.180555555555557, percentileRanksAggRes.Values["25.0"]) + } + if percentileRanksAggRes.Values["50.0"] != 29.86111111111111 { + t.Errorf("expected %v; got: %v", 29.86111111111111, percentileRanksAggRes.Values["50.0"]) + } + if percentileRanksAggRes.Values["75.0"] != 38.54166666666667 { + t.Errorf("expected %v; got: %v", 38.54166666666667, percentileRanksAggRes.Values["75.0"]) + } + + // usersCardinality + cardAggRes, found := agg.Cardinality("usersCardinality") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if cardAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if cardAggRes.Value == nil { + t.Fatalf("expected != nil; got: %v", *cardAggRes.Value) + } + if *cardAggRes.Value != 2 { + t.Errorf("expected %v; got: %v", 2, *cardAggRes.Value) + } + + // retweetsFilter + filterAggRes, found := agg.Filter("retweetsFilter") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if filterAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if filterAggRes.DocCount != 2 { + t.Fatalf("expected %v; got: %v", 2, filterAggRes.DocCount) + } + + // Retrieve sub-aggregation + avgRetweetsAggRes, found := filterAggRes.Avg("avgRetweetsSub") + if !found { + t.Error("expected sub-aggregation \"avgRetweets\" to be found; got false") + } + if avgRetweetsAggRes == nil { + t.Fatal("expected sub-aggregation \"avgRetweets\"; got nil") + } + if avgRetweetsAggRes.Value == nil { + t.Fatalf("expected != nil; got: %v", avgRetweetsAggRes.Value) + } + if *avgRetweetsAggRes.Value != 54.0 { + t.Errorf("expected %v; got: %v", 54.0, *avgRetweetsAggRes.Value) + } + + // queryFilter + queryFilterAggRes, found := agg.Filter("queryFilter") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if queryFilterAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if queryFilterAggRes.DocCount != 2 { + t.Fatalf("expected %v; got: %v", 2, queryFilterAggRes.DocCount) + } + + // significantTerms + stAggRes, found := agg.SignificantTerms("significantTerms") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if stAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if stAggRes.DocCount != 3 { + t.Errorf("expected %v; got: %v", 3, stAggRes.DocCount) + } + if len(stAggRes.Buckets) != 0 { + t.Errorf("expected %v; got: %v", 0, len(stAggRes.Buckets)) + } + + // sampler + samplerAggRes, found := agg.Sampler("sample") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if samplerAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if samplerAggRes.DocCount != 2 { + t.Errorf("expected %v; got: %v", 2, samplerAggRes.DocCount) + } + sub, found := samplerAggRes.Aggregations["tagged_with"] + if !found { + t.Fatalf("expected sub aggregation %q", "tagged_with") + } + if sub == nil { + t.Fatalf("expected sub aggregation %q; got: %v", "tagged_with", sub) + } + + // retweetsRange + rangeAggRes, found := agg.Range("retweetsRange") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if rangeAggRes == nil { + t.Fatal("expected != nil; got: nil") + } + if len(rangeAggRes.Buckets) != 3 { + t.Fatalf("expected %d; got: %d", 3, len(rangeAggRes.Buckets)) + } + if rangeAggRes.Buckets[0].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, rangeAggRes.Buckets[0].DocCount) + } + if rangeAggRes.Buckets[1].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, rangeAggRes.Buckets[1].DocCount) + } + if rangeAggRes.Buckets[2].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, rangeAggRes.Buckets[2].DocCount) + } + + // retweetsKeyedRange + keyedRangeAggRes, found := agg.KeyedRange("retweetsKeyedRange") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if keyedRangeAggRes == nil { + t.Fatal("expected != nil; got: nil") + } + if len(keyedRangeAggRes.Buckets) != 3 { + t.Fatalf("expected %d; got: %d", 3, len(keyedRangeAggRes.Buckets)) + } + _, found = keyedRangeAggRes.Buckets["no-such-key"] + if found { + t.Fatalf("expected bucket to not be found; got: %v", found) + } + bucket, found := keyedRangeAggRes.Buckets["*-10.0"] + if !found { + t.Fatalf("expected bucket to be found; got: %v", found) + } + if bucket.DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, bucket.DocCount) + } + bucket, found = keyedRangeAggRes.Buckets["10.0-100.0"] + if !found { + t.Fatalf("expected bucket to be found; got: %v", found) + } + if bucket.DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, bucket.DocCount) + } + bucket, found = keyedRangeAggRes.Buckets["100.0-*"] + if !found { + t.Fatalf("expected bucket to be found; got: %v", found) + } + if bucket.DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, bucket.DocCount) + } + + // dateRange + dateRangeRes, found := agg.DateRange("dateRange") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if dateRangeRes == nil { + t.Fatal("expected != nil; got: nil") + } + if dateRangeRes.Buckets[0].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, dateRangeRes.Buckets[0].DocCount) + } + if dateRangeRes.Buckets[0].From != nil { + t.Fatal("expected From to be nil") + } + if dateRangeRes.Buckets[0].To == nil { + t.Fatal("expected To to be != nil") + } + if *dateRangeRes.Buckets[0].To != 1.325376e+12 { + t.Errorf("expected %v; got: %v", 1.325376e+12, *dateRangeRes.Buckets[0].To) + } + if dateRangeRes.Buckets[0].ToAsString != "2012-01-01T00:00:00.000Z" { + t.Errorf("expected %q; got: %q", "2012-01-01T00:00:00.000Z", dateRangeRes.Buckets[0].ToAsString) + } + if dateRangeRes.Buckets[1].DocCount != 2 { + t.Errorf("expected %d; got: %d", 2, dateRangeRes.Buckets[1].DocCount) + } + if dateRangeRes.Buckets[1].From == nil { + t.Fatal("expected From to be != nil") + } + if *dateRangeRes.Buckets[1].From != 1.325376e+12 { + t.Errorf("expected From = %v; got: %v", 1.325376e+12, *dateRangeRes.Buckets[1].From) + } + if dateRangeRes.Buckets[1].FromAsString != "2012-01-01T00:00:00.000Z" { + t.Errorf("expected FromAsString = %q; got: %q", "2012-01-01T00:00:00.000Z", dateRangeRes.Buckets[1].FromAsString) + } + if dateRangeRes.Buckets[1].To == nil { + t.Fatal("expected To to be != nil") + } + if *dateRangeRes.Buckets[1].To != 1.3569984e+12 { + t.Errorf("expected To = %v; got: %v", 1.3569984e+12, *dateRangeRes.Buckets[1].To) + } + if dateRangeRes.Buckets[1].ToAsString != "2013-01-01T00:00:00.000Z" { + t.Errorf("expected ToAsString = %q; got: %q", "2013-01-01T00:00:00.000Z", dateRangeRes.Buckets[1].ToAsString) + } + if dateRangeRes.Buckets[2].DocCount != 0 { + t.Errorf("expected %d; got: %d", 0, dateRangeRes.Buckets[2].DocCount) + } + if dateRangeRes.Buckets[2].To != nil { + t.Fatal("expected To to be nil") + } + if dateRangeRes.Buckets[2].From == nil { + t.Fatal("expected From to be != nil") + } + if *dateRangeRes.Buckets[2].From != 1.3569984e+12 { + t.Errorf("expected %v; got: %v", 1.3569984e+12, *dateRangeRes.Buckets[2].From) + } + if dateRangeRes.Buckets[2].FromAsString != "2013-01-01T00:00:00.000Z" { + t.Errorf("expected %q; got: %q", "2013-01-01T00:00:00.000Z", dateRangeRes.Buckets[2].FromAsString) + } + + // missingTags + missingRes, found := agg.Missing("missingTags") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if missingRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if missingRes.DocCount != 0 { + t.Errorf("expected searchResult.Aggregations[\"missingTags\"].DocCount = %v; got %v", 0, missingRes.DocCount) + } + + // retweetsHisto + histoRes, found := agg.Histogram("retweetsHisto") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if histoRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if len(histoRes.Buckets) != 2 { + t.Fatalf("expected %d; got: %d", 2, len(histoRes.Buckets)) + } + if histoRes.Buckets[0].DocCount != 2 { + t.Errorf("expected %d; got: %d", 2, histoRes.Buckets[0].DocCount) + } + if histoRes.Buckets[0].Key != 0.0 { + t.Errorf("expected %v; got: %v", 0.0, histoRes.Buckets[0].Key) + } + if histoRes.Buckets[1].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, histoRes.Buckets[1].DocCount) + } + if histoRes.Buckets[1].Key != 100.0 { + t.Errorf("expected %v; got: %v", 100.0, histoRes.Buckets[1].Key) + } + + // dateHisto + dateHistoRes, found := agg.DateHistogram("dateHisto") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if dateHistoRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if len(dateHistoRes.Buckets) != 2 { + t.Fatalf("expected %d; got: %d", 2, len(dateHistoRes.Buckets)) + } + if dateHistoRes.Buckets[0].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, dateHistoRes.Buckets[0].DocCount) + } + if dateHistoRes.Buckets[0].Key != 1.29384e+12 { + t.Errorf("expected %v; got: %v", 1.29384e+12, dateHistoRes.Buckets[0].Key) + } + if dateHistoRes.Buckets[0].KeyAsString == nil { + t.Fatalf("expected != nil; got: %q", dateHistoRes.Buckets[0].KeyAsString) + } + if *dateHistoRes.Buckets[0].KeyAsString != "2011-01-01T00:00:00.000Z" { + t.Errorf("expected %q; got: %q", "2011-01-01T00:00:00.000Z", *dateHistoRes.Buckets[0].KeyAsString) + } + if dateHistoRes.Buckets[1].DocCount != 2 { + t.Errorf("expected %d; got: %d", 2, dateHistoRes.Buckets[1].DocCount) + } + if dateHistoRes.Buckets[1].Key != 1.325376e+12 { + t.Errorf("expected %v; got: %v", 1.325376e+12, dateHistoRes.Buckets[1].Key) + } + if dateHistoRes.Buckets[1].KeyAsString == nil { + t.Fatalf("expected != nil; got: %q", dateHistoRes.Buckets[1].KeyAsString) + } + if *dateHistoRes.Buckets[1].KeyAsString != "2012-01-01T00:00:00.000Z" { + t.Errorf("expected %q; got: %q", "2012-01-01T00:00:00.000Z", *dateHistoRes.Buckets[1].KeyAsString) + } + + // topHits + topTags, found := agg.Terms("top-tags") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if topTags == nil { + t.Fatalf("expected != nil; got: nil") + } + if esversion >= "1.4.0" { + if topTags.DocCountErrorUpperBound != 0 { + t.Errorf("expected %v; got: %v", 0, topTags.DocCountErrorUpperBound) + } + if topTags.SumOfOtherDocCount != 1 { + t.Errorf("expected %v; got: %v", 1, topTags.SumOfOtherDocCount) + } + } + if len(topTags.Buckets) != 3 { + t.Fatalf("expected %d; got: %d", 3, len(topTags.Buckets)) + } + if topTags.Buckets[0].DocCount != 2 { + t.Errorf("expected %d; got: %d", 2, topTags.Buckets[0].DocCount) + } + if topTags.Buckets[0].Key != "golang" { + t.Errorf("expected %v; got: %v", "golang", topTags.Buckets[0].Key) + } + topHits, found := topTags.Buckets[0].TopHits("top_tag_hits") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if topHits == nil { + t.Fatal("expected != nil; got: nil") + } + if topHits.Hits == nil { + t.Fatalf("expected != nil; got: nil") + } + if topHits.Hits.TotalHits != 2 { + t.Errorf("expected %d; got: %d", 2, topHits.Hits.TotalHits) + } + if topHits.Hits.Hits == nil { + t.Fatalf("expected != nil; got: nil") + } + if len(topHits.Hits.Hits) != 2 { + t.Fatalf("expected %d; got: %d", 2, len(topHits.Hits.Hits)) + } + hit := topHits.Hits.Hits[0] + if !found { + t.Fatalf("expected %v; got: %v", true, found) + } + if hit == nil { + t.Fatal("expected != nil; got: nil") + } + var tw tweet + if err := json.Unmarshal(*hit.Source, &tw); err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if tw.Message != "Welcome to Golang and Elasticsearch." { + t.Errorf("expected %q; got: %q", "Welcome to Golang and Elasticsearch.", tw.Message) + } + if topTags.Buckets[1].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, topTags.Buckets[1].DocCount) + } + if topTags.Buckets[1].Key != "cycling" { + t.Errorf("expected %v; got: %v", "cycling", topTags.Buckets[1].Key) + } + topHits, found = topTags.Buckets[1].TopHits("top_tag_hits") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if topHits == nil { + t.Fatal("expected != nil; got: nil") + } + if topHits.Hits == nil { + t.Fatal("expected != nil; got nil") + } + if topHits.Hits.TotalHits != 1 { + t.Errorf("expected %d; got: %d", 1, topHits.Hits.TotalHits) + } + if topTags.Buckets[2].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, topTags.Buckets[2].DocCount) + } + if topTags.Buckets[2].Key != "elasticsearch" { + t.Errorf("expected %v; got: %v", "elasticsearch", topTags.Buckets[2].Key) + } + topHits, found = topTags.Buckets[2].TopHits("top_tag_hits") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if topHits == nil { + t.Fatal("expected != nil; got: nil") + } + if topHits.Hits == nil { + t.Fatal("expected != nil; got: nil") + } + if topHits.Hits.TotalHits != 1 { + t.Errorf("expected %d; got: %d", 1, topHits.Hits.TotalHits) + } + + // viewport via geo_bounds (1.3.0 has an error in that it doesn't output the aggregation name) + geoBoundsRes, found := agg.GeoBounds("viewport") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if geoBoundsRes == nil { + t.Fatalf("expected != nil; got: nil") + } + + if esversion >= "1.4" { + // Filters agg "countByUser" + countByUserAggRes, found := agg.Filters("countByUser") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if countByUserAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if len(countByUserAggRes.Buckets) != 2 { + t.Fatalf("expected %d; got: %d", 2, len(countByUserAggRes.Buckets)) + } + if countByUserAggRes.Buckets[0].DocCount != 2 { + t.Errorf("expected %d; got: %d", 2, countByUserAggRes.Buckets[0].DocCount) + } + if countByUserAggRes.Buckets[1].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, countByUserAggRes.Buckets[1].DocCount) + } + } +} + +// TestAggsMarshal ensures that marshaling aggregations back into a string +// does not yield base64 encoded data. See https://github.com/olivere/elastic/issues/51 +// and https://groups.google.com/forum/#!topic/Golang-Nuts/38ShOlhxAYY for details. +func TestAggsMarshal(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", + Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Image: "http://golang.org/doc/gopher/gophercolor.png", + Tags: []string{"golang", "elasticsearch"}, + Location: "48.1333,11.5667", // lat,lon + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + all := NewMatchAllQuery() + dhagg := NewDateHistogramAggregation().Field("created").Interval("year") + + // Run query + builder := client.Search().Index(testIndexName).Query(all) + builder = builder.Aggregation("dhagg", dhagg) + searchResult, err := builder.Do() + if err != nil { + t.Fatal(err) + } + if searchResult.TotalHits() != 1 { + t.Errorf("expected Hits.TotalHits = %d; got: %d", 1, searchResult.TotalHits()) + } + if _, found := searchResult.Aggregations["dhagg"]; !found { + t.Fatalf("expected aggregation %q", "dhagg") + } + buf, err := json.Marshal(searchResult) + if err != nil { + t.Fatal(err) + } + s := string(buf) + if i := strings.Index(s, `{"dhagg":{"buckets":[{"key_as_string":"2012-01-01`); i < 0 { + t.Errorf("expected to serialize aggregation into string; got: %v", s) + } +} + +func TestAggsMetricsMin(t *testing.T) { + s := `{ + "min_price": { + "value": 10 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Min("min_price") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(10) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(10), *agg.Value) + } +} + +func TestAggsMetricsMax(t *testing.T) { + s := `{ + "max_price": { + "value": 35 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Max("max_price") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(35) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(35), *agg.Value) + } +} + +func TestAggsMetricsSum(t *testing.T) { + s := `{ + "intraday_return": { + "value": 2.18 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Sum("intraday_return") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(2.18) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(2.18), *agg.Value) + } +} + +func TestAggsMetricsAvg(t *testing.T) { + s := `{ + "avg_grade": { + "value": 75 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Avg("avg_grade") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(75) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(75), *agg.Value) + } +} + +func TestAggsMetricsValueCount(t *testing.T) { + s := `{ + "grades_count": { + "value": 10 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.ValueCount("grades_count") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(10) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(10), *agg.Value) + } +} + +func TestAggsMetricsCardinality(t *testing.T) { + s := `{ + "author_count": { + "value": 12 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Cardinality("author_count") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(12) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(12), *agg.Value) + } +} + +func TestAggsMetricsStats(t *testing.T) { + s := `{ + "grades_stats": { + "count": 6, + "min": 60, + "max": 98, + "avg": 78.5, + "sum": 471 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Stats("grades_stats") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Count != int64(6) { + t.Fatalf("expected aggregation Count = %v; got: %v", int64(6), agg.Count) + } + if agg.Min == nil { + t.Fatalf("expected aggregation Min != nil; got: %v", agg.Min) + } + if *agg.Min != float64(60) { + t.Fatalf("expected aggregation Min = %v; got: %v", float64(60), *agg.Min) + } + if agg.Max == nil { + t.Fatalf("expected aggregation Max != nil; got: %v", agg.Max) + } + if *agg.Max != float64(98) { + t.Fatalf("expected aggregation Max = %v; got: %v", float64(98), *agg.Max) + } + if agg.Avg == nil { + t.Fatalf("expected aggregation Avg != nil; got: %v", agg.Avg) + } + if *agg.Avg != float64(78.5) { + t.Fatalf("expected aggregation Avg = %v; got: %v", float64(78.5), *agg.Avg) + } + if agg.Sum == nil { + t.Fatalf("expected aggregation Sum != nil; got: %v", agg.Sum) + } + if *agg.Sum != float64(471) { + t.Fatalf("expected aggregation Sum = %v; got: %v", float64(471), *agg.Sum) + } +} + +func TestAggsMetricsExtendedStats(t *testing.T) { + s := `{ + "grades_stats": { + "count": 6, + "min": 72, + "max": 117.6, + "avg": 94.2, + "sum": 565.2, + "sum_of_squares": 54551.51999999999, + "variance": 218.2799999999976, + "std_deviation": 14.774302013969987 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.ExtendedStats("grades_stats") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Count != int64(6) { + t.Fatalf("expected aggregation Count = %v; got: %v", int64(6), agg.Count) + } + if agg.Min == nil { + t.Fatalf("expected aggregation Min != nil; got: %v", agg.Min) + } + if *agg.Min != float64(72) { + t.Fatalf("expected aggregation Min = %v; got: %v", float64(72), *agg.Min) + } + if agg.Max == nil { + t.Fatalf("expected aggregation Max != nil; got: %v", agg.Max) + } + if *agg.Max != float64(117.6) { + t.Fatalf("expected aggregation Max = %v; got: %v", float64(117.6), *agg.Max) + } + if agg.Avg == nil { + t.Fatalf("expected aggregation Avg != nil; got: %v", agg.Avg) + } + if *agg.Avg != float64(94.2) { + t.Fatalf("expected aggregation Avg = %v; got: %v", float64(94.2), *agg.Avg) + } + if agg.Sum == nil { + t.Fatalf("expected aggregation Sum != nil; got: %v", agg.Sum) + } + if *agg.Sum != float64(565.2) { + t.Fatalf("expected aggregation Sum = %v; got: %v", float64(565.2), *agg.Sum) + } + if agg.SumOfSquares == nil { + t.Fatalf("expected aggregation sum_of_squares != nil; got: %v", agg.SumOfSquares) + } + if *agg.SumOfSquares != float64(54551.51999999999) { + t.Fatalf("expected aggregation sum_of_squares = %v; got: %v", float64(54551.51999999999), *agg.SumOfSquares) + } + if agg.Variance == nil { + t.Fatalf("expected aggregation Variance != nil; got: %v", agg.Variance) + } + if *agg.Variance != float64(218.2799999999976) { + t.Fatalf("expected aggregation Variance = %v; got: %v", float64(218.2799999999976), *agg.Variance) + } + if agg.StdDeviation == nil { + t.Fatalf("expected aggregation StdDeviation != nil; got: %v", agg.StdDeviation) + } + if *agg.StdDeviation != float64(14.774302013969987) { + t.Fatalf("expected aggregation StdDeviation = %v; got: %v", float64(14.774302013969987), *agg.StdDeviation) + } +} + +func TestAggsMetricsPercentiles(t *testing.T) { + s := `{ + "load_time_outlier": { + "values" : { + "1.0": 15, + "5.0": 20, + "25.0": 23, + "50.0": 25, + "75.0": 29, + "95.0": 60, + "99.0": 150 + } + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Percentiles("load_time_outlier") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Values == nil { + t.Fatalf("expected aggregation Values != nil; got: %v", agg.Values) + } + if len(agg.Values) != 7 { + t.Fatalf("expected %d aggregation Values; got: %d", 7, len(agg.Values)) + } + if agg.Values["1.0"] != float64(15) { + t.Errorf("expected aggregation value for \"1.0\" = %v; got: %v", float64(15), agg.Values["1.0"]) + } + if agg.Values["5.0"] != float64(20) { + t.Errorf("expected aggregation value for \"5.0\" = %v; got: %v", float64(20), agg.Values["5.0"]) + } + if agg.Values["25.0"] != float64(23) { + t.Errorf("expected aggregation value for \"25.0\" = %v; got: %v", float64(23), agg.Values["25.0"]) + } + if agg.Values["50.0"] != float64(25) { + t.Errorf("expected aggregation value for \"50.0\" = %v; got: %v", float64(25), agg.Values["50.0"]) + } + if agg.Values["75.0"] != float64(29) { + t.Errorf("expected aggregation value for \"75.0\" = %v; got: %v", float64(29), agg.Values["75.0"]) + } + if agg.Values["95.0"] != float64(60) { + t.Errorf("expected aggregation value for \"95.0\" = %v; got: %v", float64(60), agg.Values["95.0"]) + } + if agg.Values["99.0"] != float64(150) { + t.Errorf("expected aggregation value for \"99.0\" = %v; got: %v", float64(150), agg.Values["99.0"]) + } +} + +func TestAggsMetricsPercentileRanks(t *testing.T) { + s := `{ + "load_time_outlier": { + "values" : { + "15": 92, + "30": 100 + } + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.PercentileRanks("load_time_outlier") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Values == nil { + t.Fatalf("expected aggregation Values != nil; got: %v", agg.Values) + } + if len(agg.Values) != 2 { + t.Fatalf("expected %d aggregation Values; got: %d", 7, len(agg.Values)) + } + if agg.Values["15"] != float64(92) { + t.Errorf("expected aggregation value for \"15\" = %v; got: %v", float64(92), agg.Values["15"]) + } + if agg.Values["30"] != float64(100) { + t.Errorf("expected aggregation value for \"30\" = %v; got: %v", float64(100), agg.Values["30"]) + } +} + +func TestAggsMetricsTopHits(t *testing.T) { + s := `{ + "top-tags": { + "buckets": [ + { + "key": "windows-7", + "doc_count": 25365, + "top_tags_hits": { + "hits": { + "total": 25365, + "max_score": 1, + "hits": [ + { + "_index": "stack", + "_type": "question", + "_id": "602679", + "_score": 1, + "_source": { + "title": "Windows port opening" + }, + "sort": [ + 1370143231177 + ] + } + ] + } + } + }, + { + "key": "linux", + "doc_count": 18342, + "top_tags_hits": { + "hits": { + "total": 18342, + "max_score": 1, + "hits": [ + { + "_index": "stack", + "_type": "question", + "_id": "602672", + "_score": 1, + "_source": { + "title": "Ubuntu RFID Screensaver lock-unlock" + }, + "sort": [ + 1370143379747 + ] + } + ] + } + } + }, + { + "key": "windows", + "doc_count": 18119, + "top_tags_hits": { + "hits": { + "total": 18119, + "max_score": 1, + "hits": [ + { + "_index": "stack", + "_type": "question", + "_id": "602678", + "_score": 1, + "_source": { + "title": "If I change my computers date / time, what could be affected?" + }, + "sort": [ + 1370142868283 + ] + } + ] + } + } + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Terms("top-tags") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 3 { + t.Errorf("expected %d bucket entries; got: %d", 3, len(agg.Buckets)) + } + if agg.Buckets[0].Key != "windows-7" { + t.Errorf("expected bucket key = %q; got: %q", "windows-7", agg.Buckets[0].Key) + } + if agg.Buckets[1].Key != "linux" { + t.Errorf("expected bucket key = %q; got: %q", "linux", agg.Buckets[1].Key) + } + if agg.Buckets[2].Key != "windows" { + t.Errorf("expected bucket key = %q; got: %q", "windows", agg.Buckets[2].Key) + } + + // Sub-aggregation of top-hits + subAgg, found := agg.Buckets[0].TopHits("top_tags_hits") + if !found { + t.Fatalf("expected sub aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub aggregation != nil; got: %v", subAgg) + } + if subAgg.Hits == nil { + t.Fatalf("expected sub aggregation Hits != nil; got: %v", subAgg.Hits) + } + if subAgg.Hits.TotalHits != 25365 { + t.Fatalf("expected sub aggregation Hits.TotalHits = %d; got: %d", 25365, subAgg.Hits.TotalHits) + } + if subAgg.Hits.MaxScore == nil { + t.Fatalf("expected sub aggregation Hits.MaxScore != %v; got: %v", nil, *subAgg.Hits.MaxScore) + } + if *subAgg.Hits.MaxScore != float64(1.0) { + t.Fatalf("expected sub aggregation Hits.MaxScore = %v; got: %v", float64(1.0), *subAgg.Hits.MaxScore) + } + + subAgg, found = agg.Buckets[1].TopHits("top_tags_hits") + if !found { + t.Fatalf("expected sub aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub aggregation != nil; got: %v", subAgg) + } + if subAgg.Hits == nil { + t.Fatalf("expected sub aggregation Hits != nil; got: %v", subAgg.Hits) + } + if subAgg.Hits.TotalHits != 18342 { + t.Fatalf("expected sub aggregation Hits.TotalHits = %d; got: %d", 18342, subAgg.Hits.TotalHits) + } + if subAgg.Hits.MaxScore == nil { + t.Fatalf("expected sub aggregation Hits.MaxScore != %v; got: %v", nil, *subAgg.Hits.MaxScore) + } + if *subAgg.Hits.MaxScore != float64(1.0) { + t.Fatalf("expected sub aggregation Hits.MaxScore = %v; got: %v", float64(1.0), *subAgg.Hits.MaxScore) + } + + subAgg, found = agg.Buckets[2].TopHits("top_tags_hits") + if !found { + t.Fatalf("expected sub aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub aggregation != nil; got: %v", subAgg) + } + if subAgg.Hits == nil { + t.Fatalf("expected sub aggregation Hits != nil; got: %v", subAgg.Hits) + } + if subAgg.Hits.TotalHits != 18119 { + t.Fatalf("expected sub aggregation Hits.TotalHits = %d; got: %d", 18119, subAgg.Hits.TotalHits) + } + if subAgg.Hits.MaxScore == nil { + t.Fatalf("expected sub aggregation Hits.MaxScore != %v; got: %v", nil, *subAgg.Hits.MaxScore) + } + if *subAgg.Hits.MaxScore != float64(1.0) { + t.Fatalf("expected sub aggregation Hits.MaxScore = %v; got: %v", float64(1.0), *subAgg.Hits.MaxScore) + } +} + +func TestAggsBucketGlobal(t *testing.T) { + s := `{ + "all_products" : { + "doc_count" : 100, + "avg_price" : { + "value" : 56.3 + } + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Global("all_products") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.DocCount != 100 { + t.Fatalf("expected aggregation DocCount = %d; got: %d", 100, agg.DocCount) + } + + // Sub-aggregation + subAgg, found := agg.Avg("avg_price") + if !found { + t.Fatalf("expected sub-aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub-aggregation != nil; got: %v", subAgg) + } + if subAgg.Value == nil { + t.Fatalf("expected sub-aggregation value != nil; got: %v", subAgg.Value) + } + if *subAgg.Value != float64(56.3) { + t.Fatalf("expected sub-aggregation value = %v; got: %v", float64(56.3), *subAgg.Value) + } +} + +func TestAggsBucketFilter(t *testing.T) { + s := `{ + "in_stock_products" : { + "doc_count" : 100, + "avg_price" : { "value" : 56.3 } + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Filter("in_stock_products") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.DocCount != 100 { + t.Fatalf("expected aggregation DocCount = %d; got: %d", 100, agg.DocCount) + } + + // Sub-aggregation + subAgg, found := agg.Avg("avg_price") + if !found { + t.Fatalf("expected sub-aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub-aggregation != nil; got: %v", subAgg) + } + if subAgg.Value == nil { + t.Fatalf("expected sub-aggregation value != nil; got: %v", subAgg.Value) + } + if *subAgg.Value != float64(56.3) { + t.Fatalf("expected sub-aggregation value = %v; got: %v", float64(56.3), *subAgg.Value) + } +} + +func TestAggsBucketFiltersWithBuckets(t *testing.T) { + s := `{ + "messages" : { + "buckets" : [ + { + "doc_count" : 34, + "monthly" : { + "buckets" : [] + } + }, + { + "doc_count" : 439, + "monthly" : { + "buckets" : [] + } + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Filters("messages") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != %v; got: %v", nil, agg.Buckets) + } + if len(agg.Buckets) != 2 { + t.Fatalf("expected %d buckets; got: %d", 2, len(agg.Buckets)) + } + + if agg.Buckets[0].DocCount != 34 { + t.Fatalf("expected DocCount = %d; got: %d", 34, agg.Buckets[0].DocCount) + } + subAgg, found := agg.Buckets[0].Histogram("monthly") + if !found { + t.Fatalf("expected sub aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg) + } + + if agg.Buckets[1].DocCount != 439 { + t.Fatalf("expected DocCount = %d; got: %d", 439, agg.Buckets[1].DocCount) + } + subAgg, found = agg.Buckets[1].Histogram("monthly") + if !found { + t.Fatalf("expected sub aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg) + } +} + +func TestAggsBucketFiltersWithNamedBuckets(t *testing.T) { + s := `{ + "messages" : { + "buckets" : { + "errors" : { + "doc_count" : 34, + "monthly" : { + "buckets" : [] + } + }, + "warnings" : { + "doc_count" : 439, + "monthly" : { + "buckets" : [] + } + } + } + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Filters("messages") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.NamedBuckets == nil { + t.Fatalf("expected aggregation buckets != %v; got: %v", nil, agg.NamedBuckets) + } + if len(agg.NamedBuckets) != 2 { + t.Fatalf("expected %d buckets; got: %d", 2, len(agg.NamedBuckets)) + } + + if agg.NamedBuckets["errors"].DocCount != 34 { + t.Fatalf("expected DocCount = %d; got: %d", 34, agg.NamedBuckets["errors"].DocCount) + } + subAgg, found := agg.NamedBuckets["errors"].Histogram("monthly") + if !found { + t.Fatalf("expected sub aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg) + } + + if agg.NamedBuckets["warnings"].DocCount != 439 { + t.Fatalf("expected DocCount = %d; got: %d", 439, agg.NamedBuckets["warnings"].DocCount) + } + subAgg, found = agg.NamedBuckets["warnings"].Histogram("monthly") + if !found { + t.Fatalf("expected sub aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg) + } +} + +func TestAggsBucketMissing(t *testing.T) { + s := `{ + "products_without_a_price" : { + "doc_count" : 10 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Missing("products_without_a_price") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.DocCount != 10 { + t.Fatalf("expected aggregation DocCount = %d; got: %d", 10, agg.DocCount) + } +} + +func TestAggsBucketNested(t *testing.T) { + s := `{ + "resellers": { + "min_price": { + "value" : 350 + } + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Nested("resellers") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.DocCount != 0 { + t.Fatalf("expected aggregation DocCount = %d; got: %d", 0, agg.DocCount) + } + + // Sub-aggregation + subAgg, found := agg.Avg("min_price") + if !found { + t.Fatalf("expected sub-aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub-aggregation != nil; got: %v", subAgg) + } + if subAgg.Value == nil { + t.Fatalf("expected sub-aggregation value != nil; got: %v", subAgg.Value) + } + if *subAgg.Value != float64(350) { + t.Fatalf("expected sub-aggregation value = %v; got: %v", float64(350), *subAgg.Value) + } +} + +func TestAggsBucketReverseNested(t *testing.T) { + s := `{ + "comment_to_issue": { + "doc_count" : 10 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.ReverseNested("comment_to_issue") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.DocCount != 10 { + t.Fatalf("expected aggregation DocCount = %d; got: %d", 10, agg.DocCount) + } +} + +func TestAggsBucketChildren(t *testing.T) { + s := `{ + "to-answers": { + "doc_count" : 10 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Children("to-answers") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.DocCount != 10 { + t.Fatalf("expected aggregation DocCount = %d; got: %d", 10, agg.DocCount) + } +} + +func TestAggsBucketTerms(t *testing.T) { + s := `{ + "users" : { + "doc_count_error_upper_bound" : 1, + "sum_other_doc_count" : 2, + "buckets" : [ { + "key" : "olivere", + "doc_count" : 2 + }, { + "key" : "sandrae", + "doc_count" : 1 + } ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Terms("users") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 2 { + t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) + } + if agg.Buckets[0].Key != "olivere" { + t.Errorf("expected key %q; got: %q", "olivere", agg.Buckets[0].Key) + } + if agg.Buckets[0].DocCount != 2 { + t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[0].DocCount) + } + if agg.Buckets[1].Key != "sandrae" { + t.Errorf("expected key %q; got: %q", "sandrae", agg.Buckets[1].Key) + } + if agg.Buckets[1].DocCount != 1 { + t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[1].DocCount) + } +} + +func TestAggsBucketTermsWithNumericKeys(t *testing.T) { + s := `{ + "users" : { + "doc_count_error_upper_bound" : 1, + "sum_other_doc_count" : 2, + "buckets" : [ { + "key" : 17, + "doc_count" : 2 + }, { + "key" : 21, + "doc_count" : 1 + } ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Terms("users") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 2 { + t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) + } + if agg.Buckets[0].Key != float64(17) { + t.Errorf("expected key %v; got: %v", 17, agg.Buckets[0].Key) + } + if got, err := agg.Buckets[0].KeyNumber.Int64(); err != nil { + t.Errorf("expected to convert key to int64; got: %v", err) + } else if got != 17 { + t.Errorf("expected key %v; got: %v", 17, agg.Buckets[0].Key) + } + if agg.Buckets[0].DocCount != 2 { + t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[0].DocCount) + } + if agg.Buckets[1].Key != float64(21) { + t.Errorf("expected key %v; got: %v", 21, agg.Buckets[1].Key) + } + if got, err := agg.Buckets[1].KeyNumber.Int64(); err != nil { + t.Errorf("expected to convert key to int64; got: %v", err) + } else if got != 21 { + t.Errorf("expected key %v; got: %v", 21, agg.Buckets[1].Key) + } + if agg.Buckets[1].DocCount != 1 { + t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[1].DocCount) + } +} + +func TestAggsBucketTermsWithBoolKeys(t *testing.T) { + s := `{ + "users" : { + "doc_count_error_upper_bound" : 1, + "sum_other_doc_count" : 2, + "buckets" : [ { + "key" : true, + "doc_count" : 2 + }, { + "key" : false, + "doc_count" : 1 + } ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Terms("users") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 2 { + t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) + } + if agg.Buckets[0].Key != true { + t.Errorf("expected key %v; got: %v", true, agg.Buckets[0].Key) + } + if agg.Buckets[0].DocCount != 2 { + t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[0].DocCount) + } + if agg.Buckets[1].Key != false { + t.Errorf("expected key %v; got: %v", false, agg.Buckets[1].Key) + } + if agg.Buckets[1].DocCount != 1 { + t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[1].DocCount) + } +} + +func TestAggsBucketSignificantTerms(t *testing.T) { + s := `{ + "significantCrimeTypes" : { + "doc_count": 47347, + "buckets" : [ + { + "key": "Bicycle theft", + "doc_count": 3640, + "score": 0.371235374214817, + "bg_count": 66799 + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.SignificantTerms("significantCrimeTypes") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.DocCount != 47347 { + t.Fatalf("expected aggregation DocCount != %d; got: %d", 47347, agg.DocCount) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 1 { + t.Errorf("expected %d bucket entries; got: %d", 1, len(agg.Buckets)) + } + if agg.Buckets[0].Key != "Bicycle theft" { + t.Errorf("expected key = %q; got: %q", "Bicycle theft", agg.Buckets[0].Key) + } + if agg.Buckets[0].DocCount != 3640 { + t.Errorf("expected doc count = %d; got: %d", 3640, agg.Buckets[0].DocCount) + } + if agg.Buckets[0].Score != float64(0.371235374214817) { + t.Errorf("expected score = %v; got: %v", float64(0.371235374214817), agg.Buckets[0].Score) + } + if agg.Buckets[0].BgCount != 66799 { + t.Errorf("expected BgCount = %d; got: %d", 66799, agg.Buckets[0].BgCount) + } +} + +func TestAggsBucketSampler(t *testing.T) { + s := `{ + "sample" : { + "doc_count": 1000, + "keywords": { + "doc_count": 1000, + "buckets" : [ + { + "key": "bend", + "doc_count": 58, + "score": 37.982536582524276, + "bg_count": 103 + } + ] + } + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Sampler("sample") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.DocCount != 1000 { + t.Fatalf("expected aggregation DocCount != %d; got: %d", 1000, agg.DocCount) + } + sub, found := agg.Aggregations["keywords"] + if !found { + t.Fatal("expected sub aggregation %q", "keywords") + } + if sub == nil { + t.Fatalf("expected sub aggregation %q; got: %v", "keywords", sub) + } +} + +func TestAggsBucketRange(t *testing.T) { + s := `{ + "price_ranges" : { + "buckets": [ + { + "to": 50, + "doc_count": 2 + }, + { + "from": 50, + "to": 100, + "doc_count": 4 + }, + { + "from": 100, + "doc_count": 4 + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Range("price_ranges") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 3 { + t.Errorf("expected %d bucket entries; got: %d", 3, len(agg.Buckets)) + } + if agg.Buckets[0].From != nil { + t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From) + } + if agg.Buckets[0].To == nil { + t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To) + } + if *agg.Buckets[0].To != float64(50) { + t.Errorf("expected To = %v; got: %v", float64(50), *agg.Buckets[0].To) + } + if agg.Buckets[0].DocCount != 2 { + t.Errorf("expected DocCount = %d; got: %d", 2, agg.Buckets[0].DocCount) + } + if agg.Buckets[1].From == nil { + t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From) + } + if *agg.Buckets[1].From != float64(50) { + t.Errorf("expected From = %v; got: %v", float64(50), *agg.Buckets[1].From) + } + if agg.Buckets[1].To == nil { + t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[1].To) + } + if *agg.Buckets[1].To != float64(100) { + t.Errorf("expected To = %v; got: %v", float64(100), *agg.Buckets[1].To) + } + if agg.Buckets[1].DocCount != 4 { + t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[1].DocCount) + } + if agg.Buckets[2].From == nil { + t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[2].From) + } + if *agg.Buckets[2].From != float64(100) { + t.Errorf("expected From = %v; got: %v", float64(100), *agg.Buckets[2].From) + } + if agg.Buckets[2].To != nil { + t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[2].To) + } + if agg.Buckets[2].DocCount != 4 { + t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[2].DocCount) + } +} + +func TestAggsBucketDateRange(t *testing.T) { + s := `{ + "range": { + "buckets": [ + { + "to": 1.3437792E+12, + "to_as_string": "08-2012", + "doc_count": 7 + }, + { + "from": 1.3437792E+12, + "from_as_string": "08-2012", + "doc_count": 2 + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.DateRange("range") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 2 { + t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) + } + if agg.Buckets[0].From != nil { + t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From) + } + if agg.Buckets[0].To == nil { + t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To) + } + if *agg.Buckets[0].To != float64(1.3437792E+12) { + t.Errorf("expected To = %v; got: %v", float64(1.3437792E+12), *agg.Buckets[0].To) + } + if agg.Buckets[0].ToAsString != "08-2012" { + t.Errorf("expected ToAsString = %q; got: %q", "08-2012", agg.Buckets[0].ToAsString) + } + if agg.Buckets[0].DocCount != 7 { + t.Errorf("expected DocCount = %d; got: %d", 7, agg.Buckets[0].DocCount) + } + if agg.Buckets[1].From == nil { + t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From) + } + if *agg.Buckets[1].From != float64(1.3437792E+12) { + t.Errorf("expected From = %v; got: %v", float64(1.3437792E+12), *agg.Buckets[1].From) + } + if agg.Buckets[1].FromAsString != "08-2012" { + t.Errorf("expected FromAsString = %q; got: %q", "08-2012", agg.Buckets[1].FromAsString) + } + if agg.Buckets[1].To != nil { + t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[1].To) + } + if agg.Buckets[1].DocCount != 2 { + t.Errorf("expected DocCount = %d; got: %d", 2, agg.Buckets[1].DocCount) + } +} + +func TestAggsBucketIPv4Range(t *testing.T) { + s := `{ + "ip_ranges": { + "buckets" : [ + { + "to": 167772165, + "to_as_string": "10.0.0.5", + "doc_count": 4 + }, + { + "from": 167772165, + "from_as_string": "10.0.0.5", + "doc_count": 6 + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.IPv4Range("ip_ranges") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 2 { + t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) + } + if agg.Buckets[0].From != nil { + t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From) + } + if agg.Buckets[0].To == nil { + t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To) + } + if *agg.Buckets[0].To != float64(167772165) { + t.Errorf("expected To = %v; got: %v", float64(167772165), *agg.Buckets[0].To) + } + if agg.Buckets[0].ToAsString != "10.0.0.5" { + t.Errorf("expected ToAsString = %q; got: %q", "10.0.0.5", agg.Buckets[0].ToAsString) + } + if agg.Buckets[0].DocCount != 4 { + t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[0].DocCount) + } + if agg.Buckets[1].From == nil { + t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From) + } + if *agg.Buckets[1].From != float64(167772165) { + t.Errorf("expected From = %v; got: %v", float64(167772165), *agg.Buckets[1].From) + } + if agg.Buckets[1].FromAsString != "10.0.0.5" { + t.Errorf("expected FromAsString = %q; got: %q", "10.0.0.5", agg.Buckets[1].FromAsString) + } + if agg.Buckets[1].To != nil { + t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[1].To) + } + if agg.Buckets[1].DocCount != 6 { + t.Errorf("expected DocCount = %d; got: %d", 6, agg.Buckets[1].DocCount) + } +} + +func TestAggsBucketHistogram(t *testing.T) { + s := `{ + "prices" : { + "buckets": [ + { + "key": 0, + "doc_count": 2 + }, + { + "key": 50, + "doc_count": 4 + }, + { + "key": 150, + "doc_count": 3 + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Histogram("prices") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 3 { + t.Errorf("expected %d buckets; got: %d", 3, len(agg.Buckets)) + } + if agg.Buckets[0].Key != 0 { + t.Errorf("expected key = %v; got: %v", 0, agg.Buckets[0].Key) + } + if agg.Buckets[0].KeyAsString != nil { + t.Fatalf("expected key_as_string = %v; got: %q", nil, *agg.Buckets[0].KeyAsString) + } + if agg.Buckets[0].DocCount != 2 { + t.Errorf("expected doc count = %d; got: %d", 2, agg.Buckets[0].DocCount) + } + if agg.Buckets[1].Key != 50 { + t.Errorf("expected key = %v; got: %v", 50, agg.Buckets[1].Key) + } + if agg.Buckets[1].KeyAsString != nil { + t.Fatalf("expected key_as_string = %v; got: %q", nil, *agg.Buckets[1].KeyAsString) + } + if agg.Buckets[1].DocCount != 4 { + t.Errorf("expected doc count = %d; got: %d", 4, agg.Buckets[1].DocCount) + } + if agg.Buckets[2].Key != 150 { + t.Errorf("expected key = %v; got: %v", 150, agg.Buckets[2].Key) + } + if agg.Buckets[2].KeyAsString != nil { + t.Fatalf("expected key_as_string = %v; got: %q", nil, *agg.Buckets[2].KeyAsString) + } + if agg.Buckets[2].DocCount != 3 { + t.Errorf("expected doc count = %d; got: %d", 3, agg.Buckets[2].DocCount) + } +} + +func TestAggsBucketDateHistogram(t *testing.T) { + s := `{ + "articles_over_time": { + "buckets": [ + { + "key_as_string": "2013-02-02", + "key": 1328140800000, + "doc_count": 1 + }, + { + "key_as_string": "2013-03-02", + "key": 1330646400000, + "doc_count": 2 + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.DateHistogram("articles_over_time") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 2 { + t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) + } + if agg.Buckets[0].Key != 1328140800000 { + t.Errorf("expected key %v; got: %v", 1328140800000, agg.Buckets[0].Key) + } + if agg.Buckets[0].KeyAsString == nil { + t.Fatalf("expected key_as_string != nil; got: %v", agg.Buckets[0].KeyAsString) + } + if *agg.Buckets[0].KeyAsString != "2013-02-02" { + t.Errorf("expected key_as_string %q; got: %q", "2013-02-02", *agg.Buckets[0].KeyAsString) + } + if agg.Buckets[0].DocCount != 1 { + t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[0].DocCount) + } + if agg.Buckets[1].Key != 1330646400000 { + t.Errorf("expected key %v; got: %v", 1330646400000, agg.Buckets[1].Key) + } + if agg.Buckets[1].KeyAsString == nil { + t.Fatalf("expected key_as_string != nil; got: %v", agg.Buckets[1].KeyAsString) + } + if *agg.Buckets[1].KeyAsString != "2013-03-02" { + t.Errorf("expected key_as_string %q; got: %q", "2013-03-02", *agg.Buckets[1].KeyAsString) + } + if agg.Buckets[1].DocCount != 2 { + t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[1].DocCount) + } +} + +func TestAggsMetricsGeoBounds(t *testing.T) { + s := `{ + "viewport": { + "bounds": { + "top_left": { + "lat": 80.45, + "lon": -160.22 + }, + "bottom_right": { + "lat": 40.65, + "lon": 42.57 + } + } + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.GeoBounds("viewport") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Bounds.TopLeft.Latitude != float64(80.45) { + t.Fatalf("expected Bounds.TopLeft.Latitude != %v; got: %v", float64(80.45), agg.Bounds.TopLeft.Latitude) + } + if agg.Bounds.TopLeft.Longitude != float64(-160.22) { + t.Fatalf("expected Bounds.TopLeft.Longitude != %v; got: %v", float64(-160.22), agg.Bounds.TopLeft.Longitude) + } + if agg.Bounds.BottomRight.Latitude != float64(40.65) { + t.Fatalf("expected Bounds.BottomRight.Latitude != %v; got: %v", float64(40.65), agg.Bounds.BottomRight.Latitude) + } + if agg.Bounds.BottomRight.Longitude != float64(42.57) { + t.Fatalf("expected Bounds.BottomRight.Longitude != %v; got: %v", float64(42.57), agg.Bounds.BottomRight.Longitude) + } +} + +func TestAggsBucketGeoHash(t *testing.T) { + s := `{ + "myLarge-GrainGeoHashGrid": { + "buckets": [ + { + "key": "svz", + "doc_count": 10964 + }, + { + "key": "sv8", + "doc_count": 3198 + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.GeoHash("myLarge-GrainGeoHashGrid") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 2 { + t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) + } + if agg.Buckets[0].Key != "svz" { + t.Errorf("expected key %q; got: %q", "svz", agg.Buckets[0].Key) + } + if agg.Buckets[0].DocCount != 10964 { + t.Errorf("expected doc count %d; got: %d", 10964, agg.Buckets[0].DocCount) + } + if agg.Buckets[1].Key != "sv8" { + t.Errorf("expected key %q; got: %q", "sv8", agg.Buckets[1].Key) + } + if agg.Buckets[1].DocCount != 3198 { + t.Errorf("expected doc count %d; got: %d", 3198, agg.Buckets[1].DocCount) + } +} + +func TestAggsBucketGeoDistance(t *testing.T) { + s := `{ + "rings" : { + "buckets": [ + { + "unit": "km", + "to": 100.0, + "doc_count": 3 + }, + { + "unit": "km", + "from": 100.0, + "to": 300.0, + "doc_count": 1 + }, + { + "unit": "km", + "from": 300.0, + "doc_count": 7 + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.GeoDistance("rings") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 3 { + t.Errorf("expected %d bucket entries; got: %d", 3, len(agg.Buckets)) + } + if agg.Buckets[0].From != nil { + t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From) + } + if agg.Buckets[0].To == nil { + t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To) + } + if *agg.Buckets[0].To != float64(100.0) { + t.Errorf("expected To = %v; got: %v", float64(100.0), *agg.Buckets[0].To) + } + if agg.Buckets[0].DocCount != 3 { + t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[0].DocCount) + } + + if agg.Buckets[1].From == nil { + t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From) + } + if *agg.Buckets[1].From != float64(100.0) { + t.Errorf("expected From = %v; got: %v", float64(100.0), *agg.Buckets[1].From) + } + if agg.Buckets[1].To == nil { + t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[1].To) + } + if *agg.Buckets[1].To != float64(300.0) { + t.Errorf("expected From = %v; got: %v", float64(300.0), *agg.Buckets[1].To) + } + if agg.Buckets[1].DocCount != 1 { + t.Errorf("expected DocCount = %d; got: %d", 1, agg.Buckets[1].DocCount) + } + + if agg.Buckets[2].From == nil { + t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[2].From) + } + if *agg.Buckets[2].From != float64(300.0) { + t.Errorf("expected From = %v; got: %v", float64(300.0), *agg.Buckets[2].From) + } + if agg.Buckets[2].To != nil { + t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[2].To) + } + if agg.Buckets[2].DocCount != 7 { + t.Errorf("expected DocCount = %d; got: %d", 7, agg.Buckets[2].DocCount) + } +} + +func TestAggsSubAggregates(t *testing.T) { + rs := `{ + "users" : { + "doc_count_error_upper_bound" : 1, + "sum_other_doc_count" : 2, + "buckets" : [ { + "key" : "olivere", + "doc_count" : 2, + "ts" : { + "buckets" : [ { + "key_as_string" : "2012-01-01T00:00:00.000Z", + "key" : 1325376000000, + "doc_count" : 2 + } ] + } + }, { + "key" : "sandrae", + "doc_count" : 1, + "ts" : { + "buckets" : [ { + "key_as_string" : "2011-01-01T00:00:00.000Z", + "key" : 1293840000000, + "doc_count" : 1 + } ] + } + } ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(rs), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + // Access top-level aggregation + users, found := aggs.Terms("users") + if !found { + t.Fatalf("expected users aggregation to be found; got: %v", found) + } + if users == nil { + t.Fatalf("expected users aggregation; got: %v", users) + } + if users.Buckets == nil { + t.Fatalf("expected users buckets; got: %v", users.Buckets) + } + if len(users.Buckets) != 2 { + t.Errorf("expected %d bucket entries; got: %d", 2, len(users.Buckets)) + } + if users.Buckets[0].Key != "olivere" { + t.Errorf("expected key %q; got: %q", "olivere", users.Buckets[0].Key) + } + if users.Buckets[0].DocCount != 2 { + t.Errorf("expected doc count %d; got: %d", 2, users.Buckets[0].DocCount) + } + if users.Buckets[1].Key != "sandrae" { + t.Errorf("expected key %q; got: %q", "sandrae", users.Buckets[1].Key) + } + if users.Buckets[1].DocCount != 1 { + t.Errorf("expected doc count %d; got: %d", 1, users.Buckets[1].DocCount) + } + + // Access sub-aggregation + ts, found := users.Buckets[0].DateHistogram("ts") + if !found { + t.Fatalf("expected ts aggregation to be found; got: %v", found) + } + if ts == nil { + t.Fatalf("expected ts aggregation; got: %v", ts) + } + if ts.Buckets == nil { + t.Fatalf("expected ts buckets; got: %v", ts.Buckets) + } + if len(ts.Buckets) != 1 { + t.Errorf("expected %d bucket entries; got: %d", 1, len(ts.Buckets)) + } + if ts.Buckets[0].Key != 1325376000000 { + t.Errorf("expected key %v; got: %v", 1325376000000, ts.Buckets[0].Key) + } + if ts.Buckets[0].KeyAsString == nil { + t.Fatalf("expected key_as_string != %v; got: %v", nil, ts.Buckets[0].KeyAsString) + } + if *ts.Buckets[0].KeyAsString != "2012-01-01T00:00:00.000Z" { + t.Errorf("expected key_as_string %q; got: %q", "2012-01-01T00:00:00.000Z", *ts.Buckets[0].KeyAsString) + } +} + +func TestAggsPipelineAvgBucket(t *testing.T) { + s := `{ + "avg_monthly_sales" : { + "value" : 328.33333333333333 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.AvgBucket("avg_monthly_sales") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(328.33333333333333) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(328.33333333333333), *agg.Value) + } +} + +func TestAggsPipelineSumBucket(t *testing.T) { + s := `{ + "sum_monthly_sales" : { + "value" : 985 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.SumBucket("sum_monthly_sales") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(985) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(985), *agg.Value) + } +} + +func TestAggsPipelineMaxBucket(t *testing.T) { + s := `{ + "max_monthly_sales" : { + "keys": ["2015/01/01 00:00:00"], + "value" : 550 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.MaxBucket("max_monthly_sales") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if len(agg.Keys) != 1 { + t.Fatalf("expected 1 key; got: %d", len(agg.Keys)) + } + if got, want := agg.Keys[0], "2015/01/01 00:00:00"; got != want { + t.Fatalf("expected key %q; got: %v (%T)", want, got, got) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(550) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(550), *agg.Value) + } +} + +func TestAggsPipelineMinBucket(t *testing.T) { + s := `{ + "min_monthly_sales" : { + "keys": ["2015/02/01 00:00:00"], + "value" : 60 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.MinBucket("min_monthly_sales") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if len(agg.Keys) != 1 { + t.Fatalf("expected 1 key; got: %d", len(agg.Keys)) + } + if got, want := agg.Keys[0], "2015/02/01 00:00:00"; got != want { + t.Fatalf("expected key %q; got: %v (%T)", want, got, got) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(60) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(60), *agg.Value) + } +} + +func TestAggsPipelineMovAvg(t *testing.T) { + s := `{ + "the_movavg" : { + "value" : 12.0 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.MovAvg("the_movavg") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(12.0) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(12.0), *agg.Value) + } +} + +func TestAggsPipelineDerivative(t *testing.T) { + s := `{ + "sales_deriv" : { + "value" : 315 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Derivative("sales_deriv") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(315) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(315), *agg.Value) + } +} + +func TestAggsPipelineCumulativeSum(t *testing.T) { + s := `{ + "cumulative_sales" : { + "value" : 550 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.CumulativeSum("cumulative_sales") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(550) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(550), *agg.Value) + } +} + +func TestAggsPipelineBucketScript(t *testing.T) { + s := `{ + "t-shirt-percentage" : { + "value" : 20 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.BucketScript("t-shirt-percentage") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(20) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(20), *agg.Value) + } +} + +func TestAggsPipelineSerialDiff(t *testing.T) { + s := `{ + "the_diff" : { + "value" : -722.0 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.SerialDiff("the_diff") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(-722.0) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(20), *agg.Value) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_bool.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_bool.go new file mode 100644 index 000000000..c2cc8697b --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_bool.go @@ -0,0 +1,212 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "fmt" + +// A bool query matches documents matching boolean +// combinations of other queries. +// For more details, see: +// http://www.elasticsearch.org/guide/reference/query-dsl/bool-query.html +type BoolQuery struct { + Query + mustClauses []Query + mustNotClauses []Query + filterClauses []Query + shouldClauses []Query + boost *float64 + disableCoord *bool + minimumShouldMatch string + adjustPureNegative *bool + queryName string +} + +// Creates a new bool query. +func NewBoolQuery() *BoolQuery { + return &BoolQuery{ + mustClauses: make([]Query, 0), + mustNotClauses: make([]Query, 0), + filterClauses: make([]Query, 0), + shouldClauses: make([]Query, 0), + } +} + +func (q *BoolQuery) Must(queries ...Query) *BoolQuery { + q.mustClauses = append(q.mustClauses, queries...) + return q +} + +func (q *BoolQuery) MustNot(queries ...Query) *BoolQuery { + q.mustNotClauses = append(q.mustNotClauses, queries...) + return q +} + +func (q *BoolQuery) Filter(filters ...Query) *BoolQuery { + q.filterClauses = append(q.filterClauses, filters...) + return q +} + +func (q *BoolQuery) Should(queries ...Query) *BoolQuery { + q.shouldClauses = append(q.shouldClauses, queries...) + return q +} + +func (q *BoolQuery) Boost(boost float64) *BoolQuery { + q.boost = &boost + return q +} + +func (q *BoolQuery) DisableCoord(disableCoord bool) *BoolQuery { + q.disableCoord = &disableCoord + return q +} + +func (q *BoolQuery) MinimumShouldMatch(minimumShouldMatch string) *BoolQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +func (q *BoolQuery) MinimumNumberShouldMatch(minimumNumberShouldMatch int) *BoolQuery { + q.minimumShouldMatch = fmt.Sprintf("%d", minimumNumberShouldMatch) + return q +} + +func (q *BoolQuery) AdjustPureNegative(adjustPureNegative bool) *BoolQuery { + q.adjustPureNegative = &adjustPureNegative + return q +} + +func (q *BoolQuery) QueryName(queryName string) *BoolQuery { + q.queryName = queryName + return q +} + +// Creates the query source for the bool query. +func (q *BoolQuery) Source() (interface{}, error) { + // { + // "bool" : { + // "must" : { + // "term" : { "user" : "kimchy" } + // }, + // "must_not" : { + // "range" : { + // "age" : { "from" : 10, "to" : 20 } + // } + // }, + // "filter" : [ + // ... + // ] + // "should" : [ + // { + // "term" : { "tag" : "wow" } + // }, + // { + // "term" : { "tag" : "elasticsearch" } + // } + // ], + // "minimum_number_should_match" : 1, + // "boost" : 1.0 + // } + // } + + query := make(map[string]interface{}) + + boolClause := make(map[string]interface{}) + query["bool"] = boolClause + + // must + if len(q.mustClauses) == 1 { + src, err := q.mustClauses[0].Source() + if err != nil { + return nil, err + } + boolClause["must"] = src + } else if len(q.mustClauses) > 1 { + clauses := make([]interface{}, 0) + for _, subQuery := range q.mustClauses { + src, err := subQuery.Source() + if err != nil { + return nil, err + } + clauses = append(clauses, src) + } + boolClause["must"] = clauses + } + + // must_not + if len(q.mustNotClauses) == 1 { + src, err := q.mustNotClauses[0].Source() + if err != nil { + return nil, err + } + boolClause["must_not"] = src + } else if len(q.mustNotClauses) > 1 { + clauses := make([]interface{}, 0) + for _, subQuery := range q.mustNotClauses { + src, err := subQuery.Source() + if err != nil { + return nil, err + } + clauses = append(clauses, src) + } + boolClause["must_not"] = clauses + } + + // filter + if len(q.filterClauses) == 1 { + src, err := q.filterClauses[0].Source() + if err != nil { + return nil, err + } + boolClause["filter"] = src + } else if len(q.filterClauses) > 1 { + clauses := make([]interface{}, 0) + for _, subQuery := range q.filterClauses { + src, err := subQuery.Source() + if err != nil { + return nil, err + } + clauses = append(clauses, src) + } + boolClause["filter"] = clauses + } + + // should + if len(q.shouldClauses) == 1 { + src, err := q.shouldClauses[0].Source() + if err != nil { + return nil, err + } + boolClause["should"] = src + } else if len(q.shouldClauses) > 1 { + clauses := make([]interface{}, 0) + for _, subQuery := range q.shouldClauses { + src, err := subQuery.Source() + if err != nil { + return nil, err + } + clauses = append(clauses, src) + } + boolClause["should"] = clauses + } + + if q.boost != nil { + boolClause["boost"] = *q.boost + } + if q.disableCoord != nil { + boolClause["disable_coord"] = *q.disableCoord + } + if q.minimumShouldMatch != "" { + boolClause["minimum_should_match"] = q.minimumShouldMatch + } + if q.adjustPureNegative != nil { + boolClause["adjust_pure_negative"] = *q.adjustPureNegative + } + if q.queryName != "" { + boolClause["_name"] = q.queryName + } + + return query, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_bool_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_bool_test.go new file mode 100644 index 000000000..327d3f635 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_bool_test.go @@ -0,0 +1,34 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestBoolQuery(t *testing.T) { + q := NewBoolQuery() + q = q.Must(NewTermQuery("tag", "wow")) + q = q.MustNot(NewRangeQuery("age").From(10).To(20)) + q = q.Filter(NewTermQuery("account", "1")) + q = q.Should(NewTermQuery("tag", "sometag"), NewTermQuery("tag", "sometagtag")) + q = q.Boost(10) + q = q.DisableCoord(true) + q = q.QueryName("Test") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"bool":{"_name":"Test","boost":10,"disable_coord":true,"filter":{"term":{"account":"1"}},"must":{"term":{"tag":"wow"}},"must_not":{"range":{"age":{"from":10,"include_lower":true,"include_upper":true,"to":20}}},"should":[{"term":{"tag":"sometag"}},{"term":{"tag":"sometagtag"}}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_boosting.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_boosting.go new file mode 100644 index 000000000..7f7a53b8b --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_boosting.go @@ -0,0 +1,97 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// A boosting query can be used to effectively +// demote results that match a given query. +// For more details, see: +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-boosting-query.html +type BoostingQuery struct { + Query + positiveClause Query + negativeClause Query + negativeBoost *float64 + boost *float64 +} + +// Creates a new boosting query. +func NewBoostingQuery() *BoostingQuery { + return &BoostingQuery{} +} + +func (q *BoostingQuery) Positive(positive Query) *BoostingQuery { + q.positiveClause = positive + return q +} + +func (q *BoostingQuery) Negative(negative Query) *BoostingQuery { + q.negativeClause = negative + return q +} + +func (q *BoostingQuery) NegativeBoost(negativeBoost float64) *BoostingQuery { + q.negativeBoost = &negativeBoost + return q +} + +func (q *BoostingQuery) Boost(boost float64) *BoostingQuery { + q.boost = &boost + return q +} + +// Creates the query source for the boosting query. +func (q *BoostingQuery) Source() (interface{}, error) { + // { + // "boosting" : { + // "positive" : { + // "term" : { + // "field1" : "value1" + // } + // }, + // "negative" : { + // "term" : { + // "field2" : "value2" + // } + // }, + // "negative_boost" : 0.2 + // } + // } + + query := make(map[string]interface{}) + + boostingClause := make(map[string]interface{}) + query["boosting"] = boostingClause + + // Negative and positive clause as well as negative boost + // are mandatory in the Java client. + + // positive + if q.positiveClause != nil { + src, err := q.positiveClause.Source() + if err != nil { + return nil, err + } + boostingClause["positive"] = src + } + + // negative + if q.negativeClause != nil { + src, err := q.negativeClause.Source() + if err != nil { + return nil, err + } + boostingClause["negative"] = src + } + + if q.negativeBoost != nil { + boostingClause["negative_boost"] = *q.negativeBoost + } + + if q.boost != nil { + boostingClause["boost"] = *q.boost + } + + return query, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_boosting_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_boosting_test.go new file mode 100644 index 000000000..0ef03dfef --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_boosting_test.go @@ -0,0 +1,30 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestBoostingQuery(t *testing.T) { + q := NewBoostingQuery() + q = q.Positive(NewTermQuery("tag", "wow")) + q = q.Negative(NewRangeQuery("age").From(10).To(20)) + q = q.NegativeBoost(0.2) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"boosting":{"negative":{"range":{"age":{"from":10,"include_lower":true,"include_upper":true,"to":20}}},"negative_boost":0.2,"positive":{"term":{"tag":"wow"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_common_terms.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_common_terms.go new file mode 100644 index 000000000..d45825067 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_common_terms.go @@ -0,0 +1,146 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// CommonTermsQuery is a modern alternative to stopwords +// which improves the precision and recall of search results +// (by taking stopwords into account), without sacrificing performance. +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-common-terms-query.html +type CommonTermsQuery struct { + Query + name string + text interface{} + cutoffFreq *float64 + highFreq *float64 + highFreqOp string + highFreqMinimumShouldMatch string + lowFreq *float64 + lowFreqOp string + lowFreqMinimumShouldMatch string + analyzer string + boost *float64 + disableCoord *bool + queryName string +} + +// NewCommonTermsQuery creates and initializes a new common terms query. +func NewCommonTermsQuery(name string, text interface{}) *CommonTermsQuery { + return &CommonTermsQuery{name: name, text: text} +} + +func (q *CommonTermsQuery) CutoffFrequency(f float64) *CommonTermsQuery { + q.cutoffFreq = &f + return q +} + +func (q *CommonTermsQuery) HighFreq(f float64) *CommonTermsQuery { + q.highFreq = &f + return q +} + +func (q *CommonTermsQuery) HighFreqOperator(op string) *CommonTermsQuery { + q.highFreqOp = op + return q +} + +func (q *CommonTermsQuery) HighFreqMinimumShouldMatch(minShouldMatch string) *CommonTermsQuery { + q.highFreqMinimumShouldMatch = minShouldMatch + return q +} + +func (q *CommonTermsQuery) LowFreq(f float64) *CommonTermsQuery { + q.lowFreq = &f + return q +} + +func (q *CommonTermsQuery) LowFreqOperator(op string) *CommonTermsQuery { + q.lowFreqOp = op + return q +} + +func (q *CommonTermsQuery) LowFreqMinimumShouldMatch(minShouldMatch string) *CommonTermsQuery { + q.lowFreqMinimumShouldMatch = minShouldMatch + return q +} + +func (q *CommonTermsQuery) Analyzer(analyzer string) *CommonTermsQuery { + q.analyzer = analyzer + return q +} + +func (q *CommonTermsQuery) Boost(boost float64) *CommonTermsQuery { + q.boost = &boost + return q +} + +func (q *CommonTermsQuery) DisableCoord(disableCoord bool) *CommonTermsQuery { + q.disableCoord = &disableCoord + return q +} + +func (q *CommonTermsQuery) QueryName(queryName string) *CommonTermsQuery { + q.queryName = queryName + return q +} + +// Creates the query source for the common query. +func (q *CommonTermsQuery) Source() (interface{}, error) { + // { + // "common": { + // "body": { + // "query": "this is bonsai cool", + // "cutoff_frequency": 0.001 + // } + // } + // } + source := make(map[string]interface{}) + body := make(map[string]interface{}) + query := make(map[string]interface{}) + + source["common"] = body + body[q.name] = query + query["query"] = q.text + + if q.cutoffFreq != nil { + query["cutoff_frequency"] = *q.cutoffFreq + } + if q.highFreq != nil { + query["high_freq"] = *q.highFreq + } + if q.highFreqOp != "" { + query["high_freq_operator"] = q.highFreqOp + } + if q.lowFreq != nil { + query["low_freq"] = *q.lowFreq + } + if q.lowFreqOp != "" { + query["low_freq_operator"] = q.lowFreqOp + } + if q.lowFreqMinimumShouldMatch != "" || q.highFreqMinimumShouldMatch != "" { + mm := make(map[string]interface{}) + if q.lowFreqMinimumShouldMatch != "" { + mm["low_freq"] = q.lowFreqMinimumShouldMatch + } + if q.highFreqMinimumShouldMatch != "" { + mm["high_freq"] = q.highFreqMinimumShouldMatch + } + query["minimum_should_match"] = mm + } + if q.analyzer != "" { + query["analyzer"] = q.analyzer + } + if q.disableCoord != nil { + query["disable_coord"] = *q.disableCoord + } + if q.boost != nil { + query["boost"] = *q.boost + } + if q.queryName != "" { + query["_name"] = q.queryName + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_common_terms_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_common_terms_test.go new file mode 100644 index 000000000..02c1c2b60 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_common_terms_test.go @@ -0,0 +1,84 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + _ "net/http" + "testing" +) + +func TestCommonTermsQuery(t *testing.T) { + q := NewCommonTermsQuery("message", "Golang").CutoffFrequency(0.001) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"common":{"message":{"cutoff_frequency":0.001,"query":"Golang"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchQueriesCommonTermsQuery(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Common terms query + q := NewCommonTermsQuery("message", "Golang") + searchResult, err := client.Search().Index(testIndexName).Query(q).Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 1 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 1 { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 1, len(searchResult.Hits.Hits)) + } + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_constant_score.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_constant_score.go new file mode 100644 index 000000000..c754d279d --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_constant_score.go @@ -0,0 +1,59 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ConstantScoreQuery is a query that wraps a filter and simply returns +// a constant score equal to the query boost for every document in the filter. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-constant-score-query.html +type ConstantScoreQuery struct { + filter Query + boost *float64 +} + +// ConstantScoreQuery creates and initializes a new constant score query. +func NewConstantScoreQuery(filter Query) *ConstantScoreQuery { + return &ConstantScoreQuery{ + filter: filter, + } +} + +// Boost sets the boost for this query. Documents matching this query +// will (in addition to the normal weightings) have their score multiplied +// by the boost provided. +func (q *ConstantScoreQuery) Boost(boost float64) *ConstantScoreQuery { + q.boost = &boost + return q +} + +// Source returns the query source. +func (q *ConstantScoreQuery) Source() (interface{}, error) { + // "constant_score" : { + // "filter" : { + // .... + // }, + // "boost" : 1.5 + // } + + query := make(map[string]interface{}) + + params := make(map[string]interface{}) + query["constant_score"] = params + + // filter + src, err := q.filter.Source() + if err != nil { + return nil, err + } + params["filter"] = src + + // boost + if q.boost != nil { + params["boost"] = *q.boost + } + + return query, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_constant_score_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_constant_score_test.go new file mode 100644 index 000000000..bdcce659c --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_constant_score_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestConstantScoreQuery(t *testing.T) { + q := NewConstantScoreQuery(NewTermQuery("user", "kimchy")).Boost(1.2) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"constant_score":{"boost":1.2,"filter":{"term":{"user":"kimchy"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_dis_max.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_dis_max.go new file mode 100644 index 000000000..c47d6bb12 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_dis_max.go @@ -0,0 +1,104 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// DisMaxQuery is a query that generates the union of documents produced by +// its subqueries, and that scores each document with the maximum score +// for that document as produced by any subquery, plus a tie breaking +// increment for any additional matching subqueries. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-dis-max-query.html +type DisMaxQuery struct { + queries []Query + boost *float64 + tieBreaker *float64 + queryName string +} + +// NewDisMaxQuery creates and initializes a new dis max query. +func NewDisMaxQuery() *DisMaxQuery { + return &DisMaxQuery{ + queries: make([]Query, 0), + } +} + +// Query adds one or more queries to the dis max query. +func (q *DisMaxQuery) Query(queries ...Query) *DisMaxQuery { + q.queries = append(q.queries, queries...) + return q +} + +// Boost sets the boost for this query. Documents matching this query will +// (in addition to the normal weightings) have their score multiplied by +// the boost provided. +func (q *DisMaxQuery) Boost(boost float64) *DisMaxQuery { + q.boost = &boost + return q +} + +// TieBreaker is the factor by which the score of each non-maximum disjunct +// for a document is multiplied with and added into the final score. +// +// If non-zero, the value should be small, on the order of 0.1, which says +// that 10 occurrences of word in a lower-scored field that is also in a +// higher scored field is just as good as a unique word in the lower scored +// field (i.e., one that is not in any higher scored field). +func (q *DisMaxQuery) TieBreaker(tieBreaker float64) *DisMaxQuery { + q.tieBreaker = &tieBreaker + return q +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched filters per hit. +func (q *DisMaxQuery) QueryName(queryName string) *DisMaxQuery { + q.queryName = queryName + return q +} + +// Source returns the JSON serializable content for this query. +func (q *DisMaxQuery) Source() (interface{}, error) { + // { + // "dis_max" : { + // "tie_breaker" : 0.7, + // "boost" : 1.2, + // "queries" : { + // { + // "term" : { "age" : 34 } + // }, + // { + // "term" : { "age" : 35 } + // } + // ] + // } + // } + + query := make(map[string]interface{}) + params := make(map[string]interface{}) + query["dis_max"] = params + + if q.tieBreaker != nil { + params["tie_breaker"] = *q.tieBreaker + } + if q.boost != nil { + params["boost"] = *q.boost + } + if q.queryName != "" { + params["_name"] = q.queryName + } + + // queries + clauses := make([]interface{}, 0) + for _, subQuery := range q.queries { + src, err := subQuery.Source() + if err != nil { + return nil, err + } + clauses = append(clauses, src) + } + params["queries"] = clauses + + return query, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_dis_max_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_dis_max_test.go new file mode 100644 index 000000000..8b005a61e --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_dis_max_test.go @@ -0,0 +1,28 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestDisMaxQuery(t *testing.T) { + q := NewDisMaxQuery() + q = q.Query(NewTermQuery("age", 34), NewTermQuery("age", 35)).Boost(1.2).TieBreaker(0.7) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"dis_max":{"boost":1.2,"queries":[{"term":{"age":34}},{"term":{"age":35}}],"tie_breaker":0.7}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_exists.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_exists.go new file mode 100644 index 000000000..e117673bd --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_exists.go @@ -0,0 +1,49 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ExistsQuery is a query that only matches on documents that the field +// has a value in them. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-exists-query.html +type ExistsQuery struct { + name string + queryName string +} + +// NewExistsQuery creates and initializes a new dis max query. +func NewExistsQuery(name string) *ExistsQuery { + return &ExistsQuery{ + name: name, + } +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched queries per hit. +func (q *ExistsQuery) QueryName(queryName string) *ExistsQuery { + q.queryName = queryName + return q +} + +// Source returns the JSON serializable content for this query. +func (q *ExistsQuery) Source() (interface{}, error) { + // { + // "exists" : { + // "field" : "user" + // } + // } + + query := make(map[string]interface{}) + params := make(map[string]interface{}) + query["exists"] = params + + params["field"] = q.name + if q.queryName != "" { + params["_name"] = q.queryName + } + + return query, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_exists_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_exists_test.go new file mode 100644 index 000000000..a1112085c --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_exists_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestExistsQuery(t *testing.T) { + q := NewExistsQuery("user") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"exists":{"field":"user"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fsq.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fsq.go new file mode 100644 index 000000000..b7fa15e67 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fsq.go @@ -0,0 +1,172 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// FunctionScoreQuery allows you to modify the score of documents that +// are retrieved by a query. This can be useful if, for example, +// a score function is computationally expensive and it is sufficient +// to compute the score on a filtered set of documents. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html +type FunctionScoreQuery struct { + query Query + filter Query + boost *float64 + maxBoost *float64 + scoreMode string + boostMode string + filters []Query + scoreFuncs []ScoreFunction + minScore *float64 + weight *float64 +} + +// NewFunctionScoreQuery creates and initializes a new function score query. +func NewFunctionScoreQuery() *FunctionScoreQuery { + return &FunctionScoreQuery{ + filters: make([]Query, 0), + scoreFuncs: make([]ScoreFunction, 0), + } +} + +// Query sets the query for the function score query. +func (q *FunctionScoreQuery) Query(query Query) *FunctionScoreQuery { + q.query = query + q.filter = nil + return q +} + +// Filter sets the filter for the function score query. +func (q *FunctionScoreQuery) Filter(filter Query) *FunctionScoreQuery { + q.query = nil + q.filter = filter + return q +} + +// Add adds a score function that will execute on all the documents +// matching the filter. +func (q *FunctionScoreQuery) Add(filter Query, scoreFunc ScoreFunction) *FunctionScoreQuery { + q.filters = append(q.filters, filter) + q.scoreFuncs = append(q.scoreFuncs, scoreFunc) + return q +} + +// AddScoreFunc adds a score function that will execute the function on all documents. +func (q *FunctionScoreQuery) AddScoreFunc(scoreFunc ScoreFunction) *FunctionScoreQuery { + q.filters = append(q.filters, nil) + q.scoreFuncs = append(q.scoreFuncs, scoreFunc) + return q +} + +// ScoreMode defines how results of individual score functions will be aggregated. +// Can be first, avg, max, sum, min, or multiply. +func (q *FunctionScoreQuery) ScoreMode(scoreMode string) *FunctionScoreQuery { + q.scoreMode = scoreMode + return q +} + +// BoostMode defines how the combined result of score functions will +// influence the final score together with the sub query score. +func (q *FunctionScoreQuery) BoostMode(boostMode string) *FunctionScoreQuery { + q.boostMode = boostMode + return q +} + +// MaxBoost is the maximum boost that will be applied by function score. +func (q *FunctionScoreQuery) MaxBoost(maxBoost float64) *FunctionScoreQuery { + q.maxBoost = &maxBoost + return q +} + +// Boost sets the boost for this query. Documents matching this query will +// (in addition to the normal weightings) have their score multiplied by the +// boost provided. +func (q *FunctionScoreQuery) Boost(boost float64) *FunctionScoreQuery { + q.boost = &boost + return q +} + +// MinScore sets the minimum score. +func (q *FunctionScoreQuery) MinScore(minScore float64) *FunctionScoreQuery { + q.minScore = &minScore + return q +} + +// Source returns JSON for the function score query. +func (q *FunctionScoreQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["function_score"] = query + + if q.query != nil { + src, err := q.query.Source() + if err != nil { + return nil, err + } + query["query"] = src + } else if q.filter != nil { + src, err := q.filter.Source() + if err != nil { + return nil, err + } + query["filter"] = src + } + + if len(q.filters) == 1 && q.filters[0] == nil { + // Weight needs to be serialized on this level. + if weight := q.scoreFuncs[0].GetWeight(); weight != nil { + query["weight"] = weight + } + // Serialize the score function + src, err := q.scoreFuncs[0].Source() + if err != nil { + return nil, err + } + query[q.scoreFuncs[0].Name()] = src + } else { + funcs := make([]interface{}, len(q.filters)) + for i, filter := range q.filters { + hsh := make(map[string]interface{}) + if filter != nil { + src, err := filter.Source() + if err != nil { + return nil, err + } + hsh["filter"] = src + } + // Weight needs to be serialized on this level. + if weight := q.scoreFuncs[i].GetWeight(); weight != nil { + hsh["weight"] = weight + } + // Serialize the score function + src, err := q.scoreFuncs[i].Source() + if err != nil { + return nil, err + } + hsh[q.scoreFuncs[i].Name()] = src + funcs[i] = hsh + } + query["functions"] = funcs + } + + if q.scoreMode != "" { + query["score_mode"] = q.scoreMode + } + if q.boostMode != "" { + query["boost_mode"] = q.boostMode + } + if q.maxBoost != nil { + query["max_boost"] = *q.maxBoost + } + if q.boost != nil { + query["boost"] = *q.boost + } + if q.minScore != nil { + query["min_score"] = *q.minScore + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fsq_score_funcs.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fsq_score_funcs.go new file mode 100644 index 000000000..fbce3577d --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fsq_score_funcs.go @@ -0,0 +1,567 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "strings" +) + +// ScoreFunction is used in combination with the Function Score Query. +type ScoreFunction interface { + Name() string + GetWeight() *float64 // returns the weight which must be serialized at the level of FunctionScoreQuery + Source() (interface{}, error) +} + +// -- Exponential Decay -- + +// ExponentialDecayFunction builds an exponential decay score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html +// for details. +type ExponentialDecayFunction struct { + fieldName string + origin interface{} + scale interface{} + decay *float64 + offset interface{} + multiValueMode string + weight *float64 +} + +// NewExponentialDecayFunction creates a new ExponentialDecayFunction. +func NewExponentialDecayFunction() *ExponentialDecayFunction { + return &ExponentialDecayFunction{} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *ExponentialDecayFunction) Name() string { + return "exp" +} + +// FieldName specifies the name of the field to which this decay function is applied to. +func (fn *ExponentialDecayFunction) FieldName(fieldName string) *ExponentialDecayFunction { + fn.fieldName = fieldName + return fn +} + +// Origin defines the "central point" by which the decay function calculates +// "distance". +func (fn *ExponentialDecayFunction) Origin(origin interface{}) *ExponentialDecayFunction { + fn.origin = origin + return fn +} + +// Scale defines the scale to be used with Decay. +func (fn *ExponentialDecayFunction) Scale(scale interface{}) *ExponentialDecayFunction { + fn.scale = scale + return fn +} + +// Decay defines how documents are scored at the distance given a Scale. +// If no decay is defined, documents at the distance Scale will be scored 0.5. +func (fn *ExponentialDecayFunction) Decay(decay float64) *ExponentialDecayFunction { + fn.decay = &decay + return fn +} + +// Offset, if defined, computes the decay function only for a distance +// greater than the defined offset. +func (fn *ExponentialDecayFunction) Offset(offset interface{}) *ExponentialDecayFunction { + fn.offset = offset + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *ExponentialDecayFunction) Weight(weight float64) *ExponentialDecayFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *ExponentialDecayFunction) GetWeight() *float64 { + return fn.weight +} + +// MultiValueMode specifies how the decay function should be calculated +// on a field that has multiple values. +// Valid modes are: min, max, avg, and sum. +func (fn *ExponentialDecayFunction) MultiValueMode(mode string) *ExponentialDecayFunction { + fn.multiValueMode = mode + return fn +} + +// Source returns the serializable JSON data of this score function. +func (fn *ExponentialDecayFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source[fn.fieldName] = params + if fn.origin != nil { + params["origin"] = fn.origin + } + params["scale"] = fn.scale + if fn.decay != nil && *fn.decay > 0 { + params["decay"] = *fn.decay + } + if fn.offset != nil { + params["offset"] = fn.offset + } + if fn.multiValueMode != "" { + source["multi_value_mode"] = fn.multiValueMode + } + return source, nil +} + +// -- Gauss Decay -- + +// GaussDecayFunction builds a gauss decay score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html +// for details. +type GaussDecayFunction struct { + fieldName string + origin interface{} + scale interface{} + decay *float64 + offset interface{} + multiValueMode string + weight *float64 +} + +// NewGaussDecayFunction returns a new GaussDecayFunction. +func NewGaussDecayFunction() *GaussDecayFunction { + return &GaussDecayFunction{} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *GaussDecayFunction) Name() string { + return "gauss" +} + +// FieldName specifies the name of the field to which this decay function is applied to. +func (fn *GaussDecayFunction) FieldName(fieldName string) *GaussDecayFunction { + fn.fieldName = fieldName + return fn +} + +// Origin defines the "central point" by which the decay function calculates +// "distance". +func (fn *GaussDecayFunction) Origin(origin interface{}) *GaussDecayFunction { + fn.origin = origin + return fn +} + +// Scale defines the scale to be used with Decay. +func (fn *GaussDecayFunction) Scale(scale interface{}) *GaussDecayFunction { + fn.scale = scale + return fn +} + +// Decay defines how documents are scored at the distance given a Scale. +// If no decay is defined, documents at the distance Scale will be scored 0.5. +func (fn *GaussDecayFunction) Decay(decay float64) *GaussDecayFunction { + fn.decay = &decay + return fn +} + +// Offset, if defined, computes the decay function only for a distance +// greater than the defined offset. +func (fn *GaussDecayFunction) Offset(offset interface{}) *GaussDecayFunction { + fn.offset = offset + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *GaussDecayFunction) Weight(weight float64) *GaussDecayFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *GaussDecayFunction) GetWeight() *float64 { + return fn.weight +} + +// MultiValueMode specifies how the decay function should be calculated +// on a field that has multiple values. +// Valid modes are: min, max, avg, and sum. +func (fn *GaussDecayFunction) MultiValueMode(mode string) *GaussDecayFunction { + fn.multiValueMode = mode + return fn +} + +// Source returns the serializable JSON data of this score function. +func (fn *GaussDecayFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source[fn.fieldName] = params + if fn.origin != nil { + params["origin"] = fn.origin + } + params["scale"] = fn.scale + if fn.decay != nil && *fn.decay > 0 { + params["decay"] = *fn.decay + } + if fn.offset != nil { + params["offset"] = fn.offset + } + if fn.multiValueMode != "" { + source["multi_value_mode"] = fn.multiValueMode + } + // Notice that the weight has to be serialized in FunctionScoreQuery. + return source, nil +} + +// -- Linear Decay -- + +// LinearDecayFunction builds a linear decay score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html +// for details. +type LinearDecayFunction struct { + fieldName string + origin interface{} + scale interface{} + decay *float64 + offset interface{} + multiValueMode string + weight *float64 +} + +// NewLinearDecayFunction initializes and returns a new LinearDecayFunction. +func NewLinearDecayFunction() *LinearDecayFunction { + return &LinearDecayFunction{} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *LinearDecayFunction) Name() string { + return "linear" +} + +// FieldName specifies the name of the field to which this decay function is applied to. +func (fn *LinearDecayFunction) FieldName(fieldName string) *LinearDecayFunction { + fn.fieldName = fieldName + return fn +} + +// Origin defines the "central point" by which the decay function calculates +// "distance". +func (fn *LinearDecayFunction) Origin(origin interface{}) *LinearDecayFunction { + fn.origin = origin + return fn +} + +// Scale defines the scale to be used with Decay. +func (fn *LinearDecayFunction) Scale(scale interface{}) *LinearDecayFunction { + fn.scale = scale + return fn +} + +// Decay defines how documents are scored at the distance given a Scale. +// If no decay is defined, documents at the distance Scale will be scored 0.5. +func (fn *LinearDecayFunction) Decay(decay float64) *LinearDecayFunction { + fn.decay = &decay + return fn +} + +// Offset, if defined, computes the decay function only for a distance +// greater than the defined offset. +func (fn *LinearDecayFunction) Offset(offset interface{}) *LinearDecayFunction { + fn.offset = offset + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *LinearDecayFunction) Weight(weight float64) *LinearDecayFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *LinearDecayFunction) GetWeight() *float64 { + return fn.weight +} + +// MultiValueMode specifies how the decay function should be calculated +// on a field that has multiple values. +// Valid modes are: min, max, avg, and sum. +func (fn *LinearDecayFunction) MultiValueMode(mode string) *LinearDecayFunction { + fn.multiValueMode = mode + return fn +} + +// GetMultiValueMode returns how the decay function should be calculated +// on a field that has multiple values. +// Valid modes are: min, max, avg, and sum. +func (fn *LinearDecayFunction) GetMultiValueMode() string { + return fn.multiValueMode +} + +// Source returns the serializable JSON data of this score function. +func (fn *LinearDecayFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source[fn.fieldName] = params + if fn.origin != nil { + params["origin"] = fn.origin + } + params["scale"] = fn.scale + if fn.decay != nil && *fn.decay > 0 { + params["decay"] = *fn.decay + } + if fn.offset != nil { + params["offset"] = fn.offset + } + if fn.multiValueMode != "" { + source["multi_value_mode"] = fn.multiValueMode + } + // Notice that the weight has to be serialized in FunctionScoreQuery. + return source, nil +} + +// -- Script -- + +// ScriptFunction builds a script score function. It uses a script to +// compute or influence the score of documents that match with the inner +// query or filter. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_script_score +// for details. +type ScriptFunction struct { + script *Script + weight *float64 +} + +// NewScriptFunction initializes and returns a new ScriptFunction. +func NewScriptFunction(script *Script) *ScriptFunction { + return &ScriptFunction{ + script: script, + } +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *ScriptFunction) Name() string { + return "script_score" +} + +// Script specifies the script to be executed. +func (fn *ScriptFunction) Script(script *Script) *ScriptFunction { + fn.script = script + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *ScriptFunction) Weight(weight float64) *ScriptFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *ScriptFunction) GetWeight() *float64 { + return fn.weight +} + +// Source returns the serializable JSON data of this score function. +func (fn *ScriptFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + if fn.script != nil { + src, err := fn.script.Source() + if err != nil { + return nil, err + } + source["script"] = src + } + // Notice that the weight has to be serialized in FunctionScoreQuery. + return source, nil +} + +// -- Field value factor -- + +// FieldValueFactorFunction is a function score function that allows you +// to use a field from a document to influence the score. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_field_value_factor. +type FieldValueFactorFunction struct { + field string + factor *float64 + missing *float64 + weight *float64 + modifier string +} + +// NewFieldValueFactorFunction initializes and returns a new FieldValueFactorFunction. +func NewFieldValueFactorFunction() *FieldValueFactorFunction { + return &FieldValueFactorFunction{} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *FieldValueFactorFunction) Name() string { + return "field_value_factor" +} + +// Field is the field to be extracted from the document. +func (fn *FieldValueFactorFunction) Field(field string) *FieldValueFactorFunction { + fn.field = field + return fn +} + +// Factor is the (optional) factor to multiply the field with. If you do not +// specify a factor, the default is 1. +func (fn *FieldValueFactorFunction) Factor(factor float64) *FieldValueFactorFunction { + fn.factor = &factor + return fn +} + +// Modifier to apply to the field value. It can be one of: none, log, log1p, +// log2p, ln, ln1p, ln2p, square, sqrt, or reciprocal. Defaults to: none. +func (fn *FieldValueFactorFunction) Modifier(modifier string) *FieldValueFactorFunction { + fn.modifier = modifier + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *FieldValueFactorFunction) Weight(weight float64) *FieldValueFactorFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *FieldValueFactorFunction) GetWeight() *float64 { + return fn.weight +} + +// Missing is used if a document does not have that field. +func (fn *FieldValueFactorFunction) Missing(missing float64) *FieldValueFactorFunction { + fn.missing = &missing + return fn +} + +// Source returns the serializable JSON data of this score function. +func (fn *FieldValueFactorFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + if fn.field != "" { + source["field"] = fn.field + } + if fn.factor != nil { + source["factor"] = *fn.factor + } + if fn.missing != nil { + source["missing"] = *fn.missing + } + if fn.modifier != "" { + source["modifier"] = strings.ToLower(fn.modifier) + } + // Notice that the weight has to be serialized in FunctionScoreQuery. + return source, nil +} + +// -- Weight Factor -- + +// WeightFactorFunction builds a weight factor function that multiplies +// the weight to the score. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_weight +// for details. +type WeightFactorFunction struct { + weight float64 +} + +// NewWeightFactorFunction initializes and returns a new WeightFactorFunction. +func NewWeightFactorFunction(weight float64) *WeightFactorFunction { + return &WeightFactorFunction{weight: weight} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *WeightFactorFunction) Name() string { + return "weight" +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *WeightFactorFunction) Weight(weight float64) *WeightFactorFunction { + fn.weight = weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *WeightFactorFunction) GetWeight() *float64 { + return &fn.weight +} + +// Source returns the serializable JSON data of this score function. +func (fn *WeightFactorFunction) Source() (interface{}, error) { + // Notice that the weight has to be serialized in FunctionScoreQuery. + return fn.weight, nil +} + +// -- Random -- + +// RandomFunction builds a random score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_random +// for details. +type RandomFunction struct { + seed interface{} + weight *float64 +} + +// NewRandomFunction initializes and returns a new RandomFunction. +func NewRandomFunction() *RandomFunction { + return &RandomFunction{} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *RandomFunction) Name() string { + return "random_score" +} + +// Seed is documented in 1.6 as a numeric value. However, in the source code +// of the Java client, it also accepts strings. So we accept both here, too. +func (fn *RandomFunction) Seed(seed interface{}) *RandomFunction { + fn.seed = seed + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *RandomFunction) Weight(weight float64) *RandomFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *RandomFunction) GetWeight() *float64 { + return fn.weight +} + +// Source returns the serializable JSON data of this score function. +func (fn *RandomFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + if fn.seed != nil { + source["seed"] = fn.seed + } + // Notice that the weight has to be serialized in FunctionScoreQuery. + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fsq_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fsq_test.go new file mode 100644 index 000000000..59f1cd191 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fsq_test.go @@ -0,0 +1,166 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestFunctionScoreQuery(t *testing.T) { + q := NewFunctionScoreQuery(). + Query(NewTermQuery("name.last", "banon")). + Add(NewTermQuery("name.last", "banon"), NewWeightFactorFunction(1.5)). + AddScoreFunc(NewWeightFactorFunction(3)). + AddScoreFunc(NewRandomFunction()). + Boost(3). + MaxBoost(10). + ScoreMode("avg") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"function_score":{"boost":3,"functions":[{"filter":{"term":{"name.last":"banon"}},"weight":1.5},{"weight":3},{"random_score":{}}],"max_boost":10,"query":{"term":{"name.last":"banon"}},"score_mode":"avg"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFunctionScoreQueryWithNilFilter(t *testing.T) { + q := NewFunctionScoreQuery(). + Query(NewTermQuery("tag", "wow")). + AddScoreFunc(NewRandomFunction()). + Boost(2.0). + MaxBoost(12.0). + BoostMode("multiply"). + ScoreMode("max") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"function_score":{"boost":2,"boost_mode":"multiply","max_boost":12,"query":{"term":{"tag":"wow"}},"random_score":{},"score_mode":"max"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFieldValueFactor(t *testing.T) { + q := NewFunctionScoreQuery(). + Query(NewTermQuery("name.last", "banon")). + AddScoreFunc(NewFieldValueFactorFunction().Modifier("sqrt").Factor(2).Field("income")). + Boost(2.0). + MaxBoost(12.0). + BoostMode("multiply"). + ScoreMode("max") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"function_score":{"boost":2,"boost_mode":"multiply","field_value_factor":{"factor":2,"field":"income","modifier":"sqrt"},"max_boost":12,"query":{"term":{"name.last":"banon"}},"score_mode":"max"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFieldValueFactorWithWeight(t *testing.T) { + q := NewFunctionScoreQuery(). + Query(NewTermQuery("name.last", "banon")). + AddScoreFunc(NewFieldValueFactorFunction().Modifier("sqrt").Factor(2).Field("income").Weight(2.5)). + Boost(2.0). + MaxBoost(12.0). + BoostMode("multiply"). + ScoreMode("max") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"function_score":{"boost":2,"boost_mode":"multiply","field_value_factor":{"factor":2,"field":"income","modifier":"sqrt"},"max_boost":12,"query":{"term":{"name.last":"banon"}},"score_mode":"max","weight":2.5}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFieldValueFactorWithMultipleScoreFuncsAndWeights(t *testing.T) { + q := NewFunctionScoreQuery(). + Query(NewTermQuery("name.last", "banon")). + AddScoreFunc(NewFieldValueFactorFunction().Modifier("sqrt").Factor(2).Field("income").Weight(2.5)). + AddScoreFunc(NewScriptFunction(NewScript("_score * doc['my_numeric_field'].value")).Weight(1.25)). + AddScoreFunc(NewWeightFactorFunction(0.5)). + Boost(2.0). + MaxBoost(12.0). + BoostMode("multiply"). + ScoreMode("max") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"function_score":{"boost":2,"boost_mode":"multiply","functions":[{"field_value_factor":{"factor":2,"field":"income","modifier":"sqrt"},"weight":2.5},{"script_score":{"script":"_score * doc['my_numeric_field'].value"},"weight":1.25},{"weight":0.5}],"max_boost":12,"query":{"term":{"name.last":"banon"}},"score_mode":"max"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFunctionScoreQueryWithGaussScoreFunc(t *testing.T) { + q := NewFunctionScoreQuery(). + Query(NewTermQuery("name.last", "banon")). + AddScoreFunc(NewGaussDecayFunction().FieldName("pin.location").Origin("11, 12").Scale("2km").Offset("0km").Decay(0.33)) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"function_score":{"gauss":{"pin.location":{"decay":0.33,"offset":"0km","origin":"11, 12","scale":"2km"}},"query":{"term":{"name.last":"banon"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFunctionScoreQueryWithGaussScoreFuncAndMultiValueMode(t *testing.T) { + q := NewFunctionScoreQuery(). + Query(NewTermQuery("name.last", "banon")). + AddScoreFunc(NewGaussDecayFunction().FieldName("pin.location").Origin("11, 12").Scale("2km").Offset("0km").Decay(0.33).MultiValueMode("avg")) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"function_score":{"gauss":{"multi_value_mode":"avg","pin.location":{"decay":0.33,"offset":"0km","origin":"11, 12","scale":"2km"}},"query":{"term":{"name.last":"banon"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fuzzy.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fuzzy.go new file mode 100644 index 000000000..da79dc7e6 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fuzzy.go @@ -0,0 +1,120 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// FuzzyQuery uses similarity based on Levenshtein edit distance for +// string fields, and a +/- margin on numeric and date fields. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-fuzzy-query.html +type FuzzyQuery struct { + name string + value interface{} + boost *float64 + fuzziness interface{} + prefixLength *int + maxExpansions *int + transpositions *bool + rewrite string + queryName string +} + +// NewFuzzyQuery creates a new fuzzy query. +func NewFuzzyQuery(name string, value interface{}) *FuzzyQuery { + q := &FuzzyQuery{ + name: name, + value: value, + } + return q +} + +// Boost sets the boost for this query. Documents matching this query will +// (in addition to the normal weightings) have their score multiplied by +// the boost provided. +func (q *FuzzyQuery) Boost(boost float64) *FuzzyQuery { + q.boost = &boost + return q +} + +// Fuzziness can be an integer/long like 0, 1 or 2 as well as strings +// like "auto", "0..1", "1..4" or "0.0..1.0". +func (q *FuzzyQuery) Fuzziness(fuzziness interface{}) *FuzzyQuery { + q.fuzziness = fuzziness + return q +} + +func (q *FuzzyQuery) PrefixLength(prefixLength int) *FuzzyQuery { + q.prefixLength = &prefixLength + return q +} + +func (q *FuzzyQuery) MaxExpansions(maxExpansions int) *FuzzyQuery { + q.maxExpansions = &maxExpansions + return q +} + +func (q *FuzzyQuery) Transpositions(transpositions bool) *FuzzyQuery { + q.transpositions = &transpositions + return q +} + +func (q *FuzzyQuery) Rewrite(rewrite string) *FuzzyQuery { + q.rewrite = rewrite + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched filters per hit. +func (q *FuzzyQuery) QueryName(queryName string) *FuzzyQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *FuzzyQuery) Source() (interface{}, error) { + // { + // "fuzzy" : { + // "user" : { + // "value" : "ki", + // "boost" : 1.0, + // "fuzziness" : 2, + // "prefix_length" : 0, + // "max_expansions" : 100 + // } + // } + + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["fuzzy"] = query + + fq := make(map[string]interface{}) + query[q.name] = fq + + fq["value"] = q.value + + if q.boost != nil { + fq["boost"] = *q.boost + } + if q.transpositions != nil { + fq["transpositions"] = *q.transpositions + } + if q.fuzziness != nil { + fq["fuzziness"] = q.fuzziness + } + if q.prefixLength != nil { + fq["prefix_length"] = *q.prefixLength + } + if q.maxExpansions != nil { + fq["max_expansions"] = *q.maxExpansions + } + if q.rewrite != "" { + fq["rewrite"] = q.rewrite + } + if q.queryName != "" { + fq["_name"] = q.queryName + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fuzzy_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fuzzy_test.go new file mode 100644 index 000000000..fbbfe2f94 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fuzzy_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestFuzzyQuery(t *testing.T) { + q := NewFuzzyQuery("user", "ki").Boost(1.5).Fuzziness(2).PrefixLength(0).MaxExpansions(100) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"fuzzy":{"user":{"boost":1.5,"fuzziness":2,"max_expansions":100,"prefix_length":0,"value":"ki"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_bounding_box.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_bounding_box.go new file mode 100644 index 000000000..808ce82df --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_bounding_box.go @@ -0,0 +1,121 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// GeoBoundingBoxQuery allows to filter hits based on a point location using +// a bounding box. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-geo-bounding-box-query.html +type GeoBoundingBoxQuery struct { + name string + top *float64 + left *float64 + bottom *float64 + right *float64 + typ string + queryName string +} + +// NewGeoBoundingBoxQuery creates and initializes a new GeoBoundingBoxQuery. +func NewGeoBoundingBoxQuery(name string) *GeoBoundingBoxQuery { + return &GeoBoundingBoxQuery{ + name: name, + } +} + +func (q *GeoBoundingBoxQuery) TopLeft(top, left float64) *GeoBoundingBoxQuery { + q.top = &top + q.left = &left + return q +} + +func (q *GeoBoundingBoxQuery) TopLeftFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { + return q.TopLeft(point.Lat, point.Lon) +} + +func (q *GeoBoundingBoxQuery) BottomRight(bottom, right float64) *GeoBoundingBoxQuery { + q.bottom = &bottom + q.right = &right + return q +} + +func (q *GeoBoundingBoxQuery) BottomRightFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { + return q.BottomRight(point.Lat, point.Lon) +} + +func (q *GeoBoundingBoxQuery) BottomLeft(bottom, left float64) *GeoBoundingBoxQuery { + q.bottom = &bottom + q.left = &left + return q +} + +func (q *GeoBoundingBoxQuery) BottomLeftFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { + return q.BottomLeft(point.Lat, point.Lon) +} + +func (q *GeoBoundingBoxQuery) TopRight(top, right float64) *GeoBoundingBoxQuery { + q.top = &top + q.right = &right + return q +} + +func (q *GeoBoundingBoxQuery) TopRightFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { + return q.TopRight(point.Lat, point.Lon) +} + +// Type sets the type of executing the geo bounding box. It can be either +// memory or indexed. It defaults to memory. +func (q *GeoBoundingBoxQuery) Type(typ string) *GeoBoundingBoxQuery { + q.typ = typ + return q +} + +func (q *GeoBoundingBoxQuery) QueryName(queryName string) *GeoBoundingBoxQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *GeoBoundingBoxQuery) Source() (interface{}, error) { + // { + // "geo_bbox" : { + // ... + // } + // } + + if q.top == nil { + return nil, errors.New("geo_bounding_box requires top latitude to be set") + } + if q.bottom == nil { + return nil, errors.New("geo_bounding_box requires bottom latitude to be set") + } + if q.right == nil { + return nil, errors.New("geo_bounding_box requires right longitude to be set") + } + if q.left == nil { + return nil, errors.New("geo_bounding_box requires left longitude to be set") + } + + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["geo_bbox"] = params + + box := make(map[string]interface{}) + box["top_left"] = []float64{*q.left, *q.top} + box["bottom_right"] = []float64{*q.right, *q.bottom} + params[q.name] = box + + if q.typ != "" { + params["type"] = q.typ + } + if q.queryName != "" { + params["_name"] = q.queryName + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_bounding_box_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_bounding_box_test.go new file mode 100644 index 000000000..6b15885ca --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_bounding_box_test.go @@ -0,0 +1,63 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestGeoBoundingBoxQueryIncomplete(t *testing.T) { + q := NewGeoBoundingBoxQuery("pin.location") + q = q.TopLeft(40.73, -74.1) + // no bottom and no right here + q = q.Type("memory") + src, err := q.Source() + if err == nil { + t.Fatal("expected error") + } + if src != nil { + t.Fatal("expected empty source") + } +} + +func TestGeoBoundingBoxQuery(t *testing.T) { + q := NewGeoBoundingBoxQuery("pin.location") + q = q.TopLeft(40.73, -74.1) + q = q.BottomRight(40.01, -71.12) + q = q.Type("memory") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_bbox":{"pin.location":{"bottom_right":[-71.12,40.01],"top_left":[-74.1,40.73]},"type":"memory"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoBoundingBoxQueryWithGeoPoint(t *testing.T) { + q := NewGeoBoundingBoxQuery("pin.location") + q = q.TopLeftFromGeoPoint(GeoPointFromLatLon(40.73, -74.1)) + q = q.BottomRightFromGeoPoint(GeoPointFromLatLon(40.01, -71.12)) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_bbox":{"pin.location":{"bottom_right":[-71.12,40.01],"top_left":[-74.1,40.73]}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_distance.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_distance.go new file mode 100644 index 000000000..c1eed8521 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_distance.go @@ -0,0 +1,116 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// GeoDistanceQuery filters documents that include only hits that exists +// within a specific distance from a geo point. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-geo-distance-query.html +type GeoDistanceQuery struct { + name string + distance string + lat float64 + lon float64 + geohash string + distanceType string + optimizeBbox string + queryName string +} + +// NewGeoDistanceQuery creates and initializes a new GeoDistanceQuery. +func NewGeoDistanceQuery(name string) *GeoDistanceQuery { + return &GeoDistanceQuery{name: name} +} + +func (q *GeoDistanceQuery) GeoPoint(point *GeoPoint) *GeoDistanceQuery { + q.lat = point.Lat + q.lon = point.Lon + return q +} + +func (q *GeoDistanceQuery) Point(lat, lon float64) *GeoDistanceQuery { + q.lat = lat + q.lon = lon + return q +} + +func (q *GeoDistanceQuery) Lat(lat float64) *GeoDistanceQuery { + q.lat = lat + return q +} + +func (q *GeoDistanceQuery) Lon(lon float64) *GeoDistanceQuery { + q.lon = lon + return q +} + +func (q *GeoDistanceQuery) GeoHash(geohash string) *GeoDistanceQuery { + q.geohash = geohash + return q +} + +func (q *GeoDistanceQuery) Distance(distance string) *GeoDistanceQuery { + q.distance = distance + return q +} + +func (q *GeoDistanceQuery) DistanceType(distanceType string) *GeoDistanceQuery { + q.distanceType = distanceType + return q +} + +func (q *GeoDistanceQuery) OptimizeBbox(optimizeBbox string) *GeoDistanceQuery { + q.optimizeBbox = optimizeBbox + return q +} + +func (q *GeoDistanceQuery) QueryName(queryName string) *GeoDistanceQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *GeoDistanceQuery) Source() (interface{}, error) { + // { + // "geo_distance" : { + // "distance" : "200km", + // "pin.location" : { + // "lat" : 40, + // "lon" : -70 + // } + // } + // } + + source := make(map[string]interface{}) + + params := make(map[string]interface{}) + + if q.geohash != "" { + params[q.name] = q.geohash + } else { + location := make(map[string]interface{}) + location["lat"] = q.lat + location["lon"] = q.lon + params[q.name] = location + } + + if q.distance != "" { + params["distance"] = q.distance + } + if q.distanceType != "" { + params["distance_type"] = q.distanceType + } + if q.optimizeBbox != "" { + params["optimize_bbox"] = q.optimizeBbox + } + if q.queryName != "" { + params["_name"] = q.queryName + } + + source["geo_distance"] = params + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_distance_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_distance_test.go new file mode 100644 index 000000000..f0b8ca654 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_distance_test.go @@ -0,0 +1,70 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestGeoDistanceQuery(t *testing.T) { + q := NewGeoDistanceQuery("pin.location") + q = q.Lat(40) + q = q.Lon(-70) + q = q.Distance("200km") + q = q.DistanceType("plane") + q = q.OptimizeBbox("memory") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_distance":{"distance":"200km","distance_type":"plane","optimize_bbox":"memory","pin.location":{"lat":40,"lon":-70}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoDistanceQueryWithGeoPoint(t *testing.T) { + q := NewGeoDistanceQuery("pin.location") + q = q.GeoPoint(GeoPointFromLatLon(40, -70)) + q = q.Distance("200km") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_distance":{"distance":"200km","pin.location":{"lat":40,"lon":-70}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoDistanceQueryWithGeoHash(t *testing.T) { + q := NewGeoDistanceQuery("pin.location") + q = q.GeoHash("drm3btev3e86") + q = q.Distance("12km") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_distance":{"distance":"12km","pin.location":"drm3btev3e86"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_polygon.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_polygon.go new file mode 100644 index 000000000..b08d7078a --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_polygon.go @@ -0,0 +1,72 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// GeoPolygonQuery allows to include hits that only fall within a polygon of points. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-geo-polygon-query.html +type GeoPolygonQuery struct { + name string + points []*GeoPoint + queryName string +} + +// NewGeoPolygonQuery creates and initializes a new GeoPolygonQuery. +func NewGeoPolygonQuery(name string) *GeoPolygonQuery { + return &GeoPolygonQuery{ + name: name, + points: make([]*GeoPoint, 0), + } +} + +// AddPoint adds a point from latitude and longitude. +func (q *GeoPolygonQuery) AddPoint(lat, lon float64) *GeoPolygonQuery { + q.points = append(q.points, GeoPointFromLatLon(lat, lon)) + return q +} + +// AddGeoPoint adds a GeoPoint. +func (q *GeoPolygonQuery) AddGeoPoint(point *GeoPoint) *GeoPolygonQuery { + q.points = append(q.points, point) + return q +} + +func (q *GeoPolygonQuery) QueryName(queryName string) *GeoPolygonQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *GeoPolygonQuery) Source() (interface{}, error) { + // "geo_polygon" : { + // "person.location" : { + // "points" : [ + // {"lat" : 40, "lon" : -70}, + // {"lat" : 30, "lon" : -80}, + // {"lat" : 20, "lon" : -90} + // ] + // } + // } + source := make(map[string]interface{}) + + params := make(map[string]interface{}) + source["geo_polygon"] = params + + polygon := make(map[string]interface{}) + params[q.name] = polygon + + points := make([]interface{}, 0) + for _, point := range q.points { + points = append(points, point.Source()) + } + polygon["points"] = points + + if q.queryName != "" { + params["_name"] = q.queryName + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_polygon_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_polygon_test.go new file mode 100644 index 000000000..efe89a8d4 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_polygon_test.go @@ -0,0 +1,58 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestGeoPolygonQuery(t *testing.T) { + q := NewGeoPolygonQuery("person.location") + q = q.AddPoint(40, -70) + q = q.AddPoint(30, -80) + point, err := GeoPointFromString("20,-90") + if err != nil { + t.Fatalf("GeoPointFromString failed: %v", err) + } + q = q.AddGeoPoint(point) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_polygon":{"person.location":{"points":[{"lat":40,"lon":-70},{"lat":30,"lon":-80},{"lat":20,"lon":-90}]}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoPolygonQueryFromGeoPoints(t *testing.T) { + q := NewGeoPolygonQuery("person.location") + q = q.AddGeoPoint(&GeoPoint{Lat: 40, Lon: -70}) + q = q.AddGeoPoint(GeoPointFromLatLon(30, -80)) + point, err := GeoPointFromString("20,-90") + if err != nil { + t.Fatalf("GeoPointFromString failed: %v", err) + } + q = q.AddGeoPoint(point) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_polygon":{"person.location":{"points":[{"lat":40,"lon":-70},{"lat":30,"lon":-80},{"lat":20,"lon":-90}]}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_child.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_child.go new file mode 100644 index 000000000..a8907546b --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_child.go @@ -0,0 +1,129 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// HasChildQuery accepts a query and the child type to run against, and results +// in parent documents that have child docs matching the query. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-has-child-query.html +type HasChildQuery struct { + query Query + childType string + boost *float64 + scoreType string + minChildren *int + maxChildren *int + shortCircuitCutoff *int + queryName string + innerHit *InnerHit +} + +// NewHasChildQuery creates and initializes a new has_child query. +func NewHasChildQuery(childType string, query Query) *HasChildQuery { + return &HasChildQuery{ + query: query, + childType: childType, + } +} + +// Boost sets the boost for this query. +func (q *HasChildQuery) Boost(boost float64) *HasChildQuery { + q.boost = &boost + return q +} + +// ScoreType defines how the scores from the matching child documents +// are mapped into the parent document. +func (q *HasChildQuery) ScoreType(scoreType string) *HasChildQuery { + q.scoreType = scoreType + return q +} + +// MinChildren defines the minimum number of children that are required +// to match for the parent to be considered a match. +func (q *HasChildQuery) MinChildren(minChildren int) *HasChildQuery { + q.minChildren = &minChildren + return q +} + +// MaxChildren defines the maximum number of children that are required +// to match for the parent to be considered a match. +func (q *HasChildQuery) MaxChildren(maxChildren int) *HasChildQuery { + q.maxChildren = &maxChildren + return q +} + +// ShortCircuitCutoff configures what cut off point only to evaluate +// parent documents that contain the matching parent id terms instead +// of evaluating all parent docs. +func (q *HasChildQuery) ShortCircuitCutoff(shortCircuitCutoff int) *HasChildQuery { + q.shortCircuitCutoff = &shortCircuitCutoff + return q +} + +// QueryName specifies the query name for the filter that can be used when +// searching for matched filters per hit. +func (q *HasChildQuery) QueryName(queryName string) *HasChildQuery { + q.queryName = queryName + return q +} + +// InnerHit sets the inner hit definition in the scope of this query and +// reusing the defined type and query. +func (q *HasChildQuery) InnerHit(innerHit *InnerHit) *HasChildQuery { + q.innerHit = innerHit + return q +} + +// Source returns JSON for the function score query. +func (q *HasChildQuery) Source() (interface{}, error) { + // { + // "has_child" : { + // "type" : "blog_tag", + // "query" : { + // "term" : { + // "tag" : "something" + // } + // } + // } + // } + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["has_child"] = query + + src, err := q.query.Source() + if err != nil { + return nil, err + } + query["query"] = src + query["type"] = q.childType + if q.boost != nil { + query["boost"] = *q.boost + } + if q.scoreType != "" { + query["score_type"] = q.scoreType + } + if q.minChildren != nil { + query["min_children"] = *q.minChildren + } + if q.maxChildren != nil { + query["max_children"] = *q.maxChildren + } + if q.shortCircuitCutoff != nil { + query["short_circuit_cutoff"] = *q.shortCircuitCutoff + } + if q.queryName != "" { + query["_name"] = q.queryName + } + if q.innerHit != nil { + src, err := q.innerHit.Source() + if err != nil { + return nil, err + } + query["inner_hits"] = src + } + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_child_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_child_test.go new file mode 100644 index 000000000..887b2e263 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_child_test.go @@ -0,0 +1,45 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestHasChildQuery(t *testing.T) { + q := NewHasChildQuery("blog_tag", NewTermQuery("tag", "something")) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"has_child":{"query":{"term":{"tag":"something"}},"type":"blog_tag"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestHasChildQueryWithInnerHit(t *testing.T) { + q := NewHasChildQuery("blog_tag", NewTermQuery("tag", "something")) + q = q.InnerHit(NewInnerHit().Name("comments")) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"has_child":{"inner_hits":{"name":"comments"},"query":{"term":{"tag":"something"}},"type":"blog_tag"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_parent.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_parent.go new file mode 100644 index 000000000..4db1dde7e --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_parent.go @@ -0,0 +1,97 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// HasParentQuery accepts a query and a parent type. The query is executed +// in the parent document space which is specified by the parent type. +// This query returns child documents which associated parents have matched. +// For the rest has_parent query has the same options and works in the +// same manner as has_child query. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-has-parent-query.html +type HasParentQuery struct { + query Query + parentType string + boost *float64 + scoreType string + queryName string + innerHit *InnerHit +} + +// NewHasParentQuery creates and initializes a new has_parent query. +func NewHasParentQuery(parentType string, query Query) *HasParentQuery { + return &HasParentQuery{ + query: query, + parentType: parentType, + } +} + +// Boost sets the boost for this query. +func (q *HasParentQuery) Boost(boost float64) *HasParentQuery { + q.boost = &boost + return q +} + +// ScoreType defines how the parent score is mapped into the child documents. +func (q *HasParentQuery) ScoreType(scoreType string) *HasParentQuery { + q.scoreType = scoreType + return q +} + +// QueryName specifies the query name for the filter that can be used when +// searching for matched filters per hit. +func (q *HasParentQuery) QueryName(queryName string) *HasParentQuery { + q.queryName = queryName + return q +} + +// InnerHit sets the inner hit definition in the scope of this query and +// reusing the defined type and query. +func (q *HasParentQuery) InnerHit(innerHit *InnerHit) *HasParentQuery { + q.innerHit = innerHit + return q +} + +// Source returns JSON for the function score query. +func (q *HasParentQuery) Source() (interface{}, error) { + // { + // "has_parent" : { + // "parent_type" : "blog", + // "query" : { + // "term" : { + // "tag" : "something" + // } + // } + // } + // } + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["has_parent"] = query + + src, err := q.query.Source() + if err != nil { + return nil, err + } + query["query"] = src + query["parent_type"] = q.parentType + if q.boost != nil { + query["boost"] = *q.boost + } + if q.scoreType != "" { + query["score_type"] = q.scoreType + } + if q.queryName != "" { + query["_name"] = q.queryName + } + if q.innerHit != nil { + src, err := q.innerHit.Source() + if err != nil { + return nil, err + } + query["inner_hits"] = src + } + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_parent_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_parent_test.go new file mode 100644 index 000000000..b5daefda8 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_parent_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestHasParentQueryTest(t *testing.T) { + q := NewHasParentQuery("blog", NewTermQuery("tag", "something")) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"has_parent":{"parent_type":"blog","query":{"term":{"tag":"something"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_ids.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_ids.go new file mode 100644 index 000000000..96f463dc6 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_ids.go @@ -0,0 +1,76 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// IdsQuery filters documents that only have the provided ids. +// Note, this query uses the _uid field. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-ids-query.html +type IdsQuery struct { + types []string + values []string + boost *float64 + queryName string +} + +// NewIdsQuery creates and initializes a new ids query. +func NewIdsQuery(types ...string) *IdsQuery { + return &IdsQuery{ + types: types, + values: make([]string, 0), + } +} + +// Ids adds ids to the filter. +func (q *IdsQuery) Ids(ids ...string) *IdsQuery { + q.values = append(q.values, ids...) + return q +} + +// Boost sets the boost for this query. +func (q *IdsQuery) Boost(boost float64) *IdsQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter. +func (q *IdsQuery) QueryName(queryName string) *IdsQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *IdsQuery) Source() (interface{}, error) { + // { + // "ids" : { + // "type" : "my_type", + // "values" : ["1", "4", "100"] + // } + // } + + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["ids"] = query + + // type(s) + if len(q.types) == 1 { + query["type"] = q.types[0] + } else if len(q.types) > 1 { + query["types"] = q.types + } + + // values + query["values"] = q.values + + if q.boost != nil { + query["boost"] = *q.boost + } + if q.queryName != "" { + query["_name"] = q.queryName + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_ids_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_ids_test.go new file mode 100644 index 000000000..d1ff9a6b1 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_ids_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestIdsQuery(t *testing.T) { + q := NewIdsQuery("my_type").Ids("1", "4", "100").Boost(10.5).QueryName("my_query") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"ids":{"_name":"my_query","boost":10.5,"type":"my_type","values":["1","4","100"]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_indices.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_indices.go new file mode 100644 index 000000000..56efab3dd --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_indices.go @@ -0,0 +1,89 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// IndicesQuery can be used when executed across multiple indices, allowing +// to have a query that executes only when executed on an index that matches +// a specific list of indices, and another query that executes when it is +// executed on an index that does not match the listed indices. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-indices-query.html +type IndicesQuery struct { + query Query + indices []string + noMatchQueryType string + noMatchQuery Query + queryName string +} + +// NewIndicesQuery creates and initializes a new indices query. +func NewIndicesQuery(query Query, indices ...string) *IndicesQuery { + return &IndicesQuery{ + query: query, + indices: indices, + } +} + +// NoMatchQuery sets the query to use when it executes on an index that +// does not match the indices provided. +func (q *IndicesQuery) NoMatchQuery(query Query) *IndicesQuery { + q.noMatchQuery = query + return q +} + +// NoMatchQueryType sets the no match query which can be either all or none. +func (q *IndicesQuery) NoMatchQueryType(typ string) *IndicesQuery { + q.noMatchQueryType = typ + return q +} + +// QueryName sets the query name for the filter. +func (q *IndicesQuery) QueryName(queryName string) *IndicesQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *IndicesQuery) Source() (interface{}, error) { + // { + // "indices" : { + // "indices" : ["index1", "index2"], + // "query" : { + // "term" : { "tag" : "wow" } + // }, + // "no_match_query" : { + // "term" : { "tag" : "kow" } + // } + // } + // } + + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["indices"] = params + + params["indices"] = q.indices + + src, err := q.query.Source() + if err != nil { + return nil, err + } + params["query"] = src + + if q.noMatchQuery != nil { + src, err := q.noMatchQuery.Source() + if err != nil { + return nil, err + } + params["no_match_query"] = src + } else if q.noMatchQueryType != "" { + params["no_match_query"] = q.noMatchQueryType + } + if q.queryName != "" { + params["_name"] = q.queryName + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_indices_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_indices_test.go new file mode 100644 index 000000000..f011b9ac7 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_indices_test.go @@ -0,0 +1,46 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestIndicesQuery(t *testing.T) { + q := NewIndicesQuery(NewTermQuery("tag", "wow"), "index1", "index2") + q = q.NoMatchQuery(NewTermQuery("tag", "kow")) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"indices":{"indices":["index1","index2"],"no_match_query":{"term":{"tag":"kow"}},"query":{"term":{"tag":"wow"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestIndicesQueryWithNoMatchQueryType(t *testing.T) { + q := NewIndicesQuery(NewTermQuery("tag", "wow"), "index1", "index2") + q = q.NoMatchQueryType("all") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"indices":{"indices":["index1","index2"],"no_match_query":"all","query":{"term":{"tag":"wow"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match.go new file mode 100644 index 000000000..b740b0f0d --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match.go @@ -0,0 +1,214 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MatchQuery is a family of queries that accepts text/numerics/dates, +// analyzes them, and constructs a query. +// +// To create a new MatchQuery, use NewMatchQuery. To create specific types +// of queries, e.g. a match_phrase query, use NewMatchPhrQuery(...).Type("phrase"), +// or use one of the shortcuts e.g. NewMatchPhraseQuery(...). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-match-query.html +type MatchQuery struct { + name string + text interface{} + typ string // boolean, phrase, phrase_prefix + operator string // or / and + analyzer string + boost *float64 + slop *int + fuzziness string + prefixLength *int + maxExpansions *int + minimumShouldMatch string + rewrite string + fuzzyRewrite string + lenient *bool + fuzzyTranspositions *bool + zeroTermsQuery string + cutoffFrequency *float64 + queryName string +} + +// NewMatchQuery creates and initializes a new MatchQuery. +func NewMatchQuery(name string, text interface{}) *MatchQuery { + return &MatchQuery{name: name, text: text} +} + +// NewMatchPhraseQuery creates and initializes a new MatchQuery of type phrase. +func NewMatchPhraseQuery(name string, text interface{}) *MatchQuery { + return &MatchQuery{name: name, text: text, typ: "phrase"} +} + +// NewMatchPhrasePrefixQuery creates and initializes a new MatchQuery of type phrase_prefix. +func NewMatchPhrasePrefixQuery(name string, text interface{}) *MatchQuery { + return &MatchQuery{name: name, text: text, typ: "phrase_prefix"} +} + +// Type can be "boolean", "phrase", or "phrase_prefix". Defaults to "boolean". +func (q *MatchQuery) Type(typ string) *MatchQuery { + q.typ = typ + return q +} + +// Operator sets the operator to use when using a boolean query. +// Can be "AND" or "OR" (default). +func (q *MatchQuery) Operator(operator string) *MatchQuery { + q.operator = operator + return q +} + +// Analyzer explicitly sets the analyzer to use. It defaults to use explicit +// mapping config for the field, or, if not set, the default search analyzer. +func (q *MatchQuery) Analyzer(analyzer string) *MatchQuery { + q.analyzer = analyzer + return q +} + +// Boost sets the boost to apply to this query. +func (q *MatchQuery) Boost(boost float64) *MatchQuery { + q.boost = &boost + return q +} + +// Slop sets the phrase slop if evaluated to a phrase query type. +func (q *MatchQuery) Slop(slop int) *MatchQuery { + q.slop = &slop + return q +} + +// Fuzziness sets the fuzziness when evaluated to a fuzzy query type. +// Defaults to "AUTO". +func (q *MatchQuery) Fuzziness(fuzziness string) *MatchQuery { + q.fuzziness = fuzziness + return q +} + +func (q *MatchQuery) PrefixLength(prefixLength int) *MatchQuery { + q.prefixLength = &prefixLength + return q +} + +// MaxExpansions is used with fuzzy or prefix type queries. It specifies +// the number of term expansions to use. It defaults to unbounded so that +// its recommended to set it to a reasonable value for faster execution. +func (q *MatchQuery) MaxExpansions(maxExpansions int) *MatchQuery { + q.maxExpansions = &maxExpansions + return q +} + +// CutoffFrequency can be a value in [0..1] (or an absolute number >=1). +// It represents the maximum treshold of a terms document frequency to be +// considered a low frequency term. +func (q *MatchQuery) CutoffFrequency(cutoff float64) *MatchQuery { + q.cutoffFrequency = &cutoff + return q +} + +func (q *MatchQuery) MinimumShouldMatch(minimumShouldMatch string) *MatchQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +func (q *MatchQuery) Rewrite(rewrite string) *MatchQuery { + q.rewrite = rewrite + return q +} + +func (q *MatchQuery) FuzzyRewrite(fuzzyRewrite string) *MatchQuery { + q.fuzzyRewrite = fuzzyRewrite + return q +} + +func (q *MatchQuery) FuzzyTranspositions(fuzzyTranspositions bool) *MatchQuery { + q.fuzzyTranspositions = &fuzzyTranspositions + return q +} + +// Lenient specifies whether format based failures will be ignored. +func (q *MatchQuery) Lenient(lenient bool) *MatchQuery { + q.lenient = &lenient + return q +} + +// ZeroTermsQuery can be "all" or "none". +func (q *MatchQuery) ZeroTermsQuery(zeroTermsQuery string) *MatchQuery { + q.zeroTermsQuery = zeroTermsQuery + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched filters per hit. +func (q *MatchQuery) QueryName(queryName string) *MatchQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *MatchQuery) Source() (interface{}, error) { + // {"match":{"name":{"query":"value","type":"boolean/phrase"}}} + source := make(map[string]interface{}) + + match := make(map[string]interface{}) + source["match"] = match + + query := make(map[string]interface{}) + match[q.name] = query + + query["query"] = q.text + + if q.typ != "" { + query["type"] = q.typ + } + if q.operator != "" { + query["operator"] = q.operator + } + if q.analyzer != "" { + query["analyzer"] = q.analyzer + } + if q.boost != nil { + query["boost"] = *q.boost + } + if q.slop != nil { + query["slop"] = *q.slop + } + if q.fuzziness != "" { + query["fuzziness"] = q.fuzziness + } + if q.prefixLength != nil { + query["prefix_length"] = *q.prefixLength + } + if q.maxExpansions != nil { + query["max_expansions"] = *q.maxExpansions + } + if q.minimumShouldMatch != "" { + query["minimum_should_match"] = q.minimumShouldMatch + } + if q.rewrite != "" { + query["rewrite"] = q.rewrite + } + if q.fuzzyRewrite != "" { + query["fuzzy_rewrite"] = q.fuzzyRewrite + } + if q.lenient != nil { + query["lenient"] = *q.lenient + } + if q.fuzzyTranspositions != nil { + query["fuzzy_transpositions"] = *q.fuzzyTranspositions + } + if q.zeroTermsQuery != "" { + query["zero_terms_query"] = q.zeroTermsQuery + } + if q.cutoffFrequency != nil { + query["cutoff_frequency"] = q.cutoffFrequency + } + if q.queryName != "" { + query["_name"] = q.queryName + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match_all.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match_all.go new file mode 100644 index 000000000..5b5ca590e --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match_all.go @@ -0,0 +1,41 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MatchAllQuery is the most simple query, which matches all documents, +// giving them all a _score of 1.0. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-match-all-query.html +type MatchAllQuery struct { + boost *float64 +} + +// NewMatchAllQuery creates and initializes a new match all query. +func NewMatchAllQuery() *MatchAllQuery { + return &MatchAllQuery{} +} + +// Boost sets the boost for this query. Documents matching this query will +// (in addition to the normal weightings) have their score multiplied by the +// boost provided. +func (q *MatchAllQuery) Boost(boost float64) *MatchAllQuery { + q.boost = &boost + return q +} + +// Source returns JSON for the function score query. +func (q MatchAllQuery) Source() (interface{}, error) { + // { + // "match_all" : { ... } + // } + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["match_all"] = params + if q.boost != nil { + params["boost"] = *q.boost + } + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match_all_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match_all_test.go new file mode 100644 index 000000000..0dcebb1f6 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match_all_test.go @@ -0,0 +1,44 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMatchAllQuery(t *testing.T) { + q := NewMatchAllQuery() + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"match_all":{}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMatchAllQueryWithBoost(t *testing.T) { + q := NewMatchAllQuery().Boost(3.14) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"match_all":{"boost":3.14}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match_test.go new file mode 100644 index 000000000..ade59351f --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match_test.go @@ -0,0 +1,78 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMatchQuery(t *testing.T) { + q := NewMatchQuery("message", "this is a test") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"match":{"message":{"query":"this is a test"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMatchPhraseQuery(t *testing.T) { + q := NewMatchPhraseQuery("message", "this is a test") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"match":{"message":{"query":"this is a test","type":"phrase"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMatchPhrasePrefixQuery(t *testing.T) { + q := NewMatchPhrasePrefixQuery("message", "this is a test") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"match":{"message":{"query":"this is a test","type":"phrase_prefix"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMatchQueryWithOptions(t *testing.T) { + q := NewMatchQuery("message", "this is a test").Analyzer("whitespace").Operator("or").Boost(2.5) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"match":{"message":{"analyzer":"whitespace","boost":2.5,"operator":"or","query":"this is a test"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_missing.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_missing.go new file mode 100644 index 000000000..0fff3f55c --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_missing.go @@ -0,0 +1,67 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MissingQuery returns documents that have only null values or no value +// in the original field. +// +// For details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-missing-query.html +type MissingQuery struct { + name string + queryName string + nullValue *bool + existence *bool +} + +// NewMissingQuery creates and initializes a new MissingQuery. +func NewMissingQuery(name string) *MissingQuery { + return &MissingQuery{name: name} +} + +// QueryName sets the query name for the query that can be used when +// searching for matched filters hit. +func (q *MissingQuery) QueryName(queryName string) *MissingQuery { + q.queryName = queryName + return q +} + +// NullValue indicates whether the missing filter automatically includes +// fields with null value configured in the mappings. Defaults to false. +func (q *MissingQuery) NullValue(nullValue bool) *MissingQuery { + q.nullValue = &nullValue + return q +} + +// Existence indicates whether the missing filter includes documents where +// the field doesn't exist in the docs. +func (q *MissingQuery) Existence(existence bool) *MissingQuery { + q.existence = &existence + return q +} + +// Source returns JSON for the query. +func (q *MissingQuery) Source() (interface{}, error) { + // { + // "missing" : { + // "field" : "..." + // } + // } + + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["missing"] = params + params["field"] = q.name + if q.nullValue != nil { + params["null_value"] = *q.nullValue + } + if q.existence != nil { + params["existence"] = *q.existence + } + if q.queryName != "" { + params["_name"] = q.queryName + } + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_missing_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_missing_test.go new file mode 100644 index 000000000..096b0b3cd --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_missing_test.go @@ -0,0 +1,44 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMissingQuery(t *testing.T) { + q := NewMissingQuery("user") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"missing":{"field":"user"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMissingQueryWithParams(t *testing.T) { + q := NewMissingQuery("user").NullValue(true).Existence(true).QueryName("_my_query") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"missing":{"_name":"_my_query","existence":true,"field":"user","null_value":true}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_more_like_this.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_more_like_this.go new file mode 100644 index 000000000..afce3f05c --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_more_like_this.go @@ -0,0 +1,412 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// MoreLikeThis query (MLT Query) finds documents that are "like" a given +// set of documents. In order to do so, MLT selects a set of representative +// terms of these input documents, forms a query using these terms, executes +// the query and returns the results. The user controls the input documents, +// how the terms should be selected and how the query is formed. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-mlt-query.html +type MoreLikeThisQuery struct { + fields []string + docs []*MoreLikeThisQueryItem + unlikeDocs []*MoreLikeThisQueryItem + include *bool + minimumShouldMatch string + minTermFreq *int + maxQueryTerms *int + stopWords []string + minDocFreq *int + maxDocFreq *int + minWordLen *int + maxWordLen *int + boostTerms *float64 + boost *float64 + analyzer string + failOnUnsupportedField *bool + queryName string +} + +// NewMoreLikeThisQuery creates and initializes a new MoreLikeThisQuery. +func NewMoreLikeThisQuery() *MoreLikeThisQuery { + return &MoreLikeThisQuery{ + fields: make([]string, 0), + stopWords: make([]string, 0), + docs: make([]*MoreLikeThisQueryItem, 0), + unlikeDocs: make([]*MoreLikeThisQueryItem, 0), + } +} + +// Field adds one or more field names to the query. +func (q *MoreLikeThisQuery) Field(fields ...string) *MoreLikeThisQuery { + q.fields = append(q.fields, fields...) + return q +} + +// StopWord sets the stopwords. Any word in this set is considered +// "uninteresting" and ignored. Even if your Analyzer allows stopwords, +// you might want to tell the MoreLikeThis code to ignore them, as for +// the purposes of document similarity it seems reasonable to assume that +// "a stop word is never interesting". +func (q *MoreLikeThisQuery) StopWord(stopWords ...string) *MoreLikeThisQuery { + q.stopWords = append(q.stopWords, stopWords...) + return q +} + +// LikeText sets the text to use in order to find documents that are "like" this. +func (q *MoreLikeThisQuery) LikeText(likeTexts ...string) *MoreLikeThisQuery { + for _, s := range likeTexts { + item := NewMoreLikeThisQueryItem().LikeText(s) + q.docs = append(q.docs, item) + } + return q +} + +// LikeItems sets the documents to use in order to find documents that are "like" this. +func (q *MoreLikeThisQuery) LikeItems(docs ...*MoreLikeThisQueryItem) *MoreLikeThisQuery { + q.docs = append(q.docs, docs...) + return q +} + +// IgnoreLikeText sets the text from which the terms should not be selected from. +func (q *MoreLikeThisQuery) IgnoreLikeText(ignoreLikeText ...string) *MoreLikeThisQuery { + for _, s := range ignoreLikeText { + item := NewMoreLikeThisQueryItem().LikeText(s) + q.unlikeDocs = append(q.unlikeDocs, item) + } + return q +} + +// IgnoreLikeItems sets the documents from which the terms should not be selected from. +func (q *MoreLikeThisQuery) IgnoreLikeItems(ignoreDocs ...*MoreLikeThisQueryItem) *MoreLikeThisQuery { + q.unlikeDocs = append(q.unlikeDocs, ignoreDocs...) + return q +} + +// Ids sets the document ids to use in order to find documents that are "like" this. +func (q *MoreLikeThisQuery) Ids(ids ...string) *MoreLikeThisQuery { + for _, id := range ids { + item := NewMoreLikeThisQueryItem().Id(id) + q.docs = append(q.docs, item) + } + return q +} + +// Include specifies whether the input documents should also be included +// in the results returned. Defaults to false. +func (q *MoreLikeThisQuery) Include(include bool) *MoreLikeThisQuery { + q.include = &include + return q +} + +// MinimumShouldMatch sets the number of terms that must match the generated +// query expressed in the common syntax for minimum should match. +// The default value is "30%". +// +// This used to be "PercentTermsToMatch" in Elasticsearch versions before 2.0. +func (q *MoreLikeThisQuery) MinimumShouldMatch(minimumShouldMatch string) *MoreLikeThisQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +// MinTermFreq is the frequency below which terms will be ignored in the +// source doc. The default frequency is 2. +func (q *MoreLikeThisQuery) MinTermFreq(minTermFreq int) *MoreLikeThisQuery { + q.minTermFreq = &minTermFreq + return q +} + +// MaxQueryTerms sets the maximum number of query terms that will be included +// in any generated query. It defaults to 25. +func (q *MoreLikeThisQuery) MaxQueryTerms(maxQueryTerms int) *MoreLikeThisQuery { + q.maxQueryTerms = &maxQueryTerms + return q +} + +// MinDocFreq sets the frequency at which words will be ignored which do +// not occur in at least this many docs. The default is 5. +func (q *MoreLikeThisQuery) MinDocFreq(minDocFreq int) *MoreLikeThisQuery { + q.minDocFreq = &minDocFreq + return q +} + +// MaxDocFreq sets the maximum frequency for which words may still appear. +// Words that appear in more than this many docs will be ignored. +// It defaults to unbounded. +func (q *MoreLikeThisQuery) MaxDocFreq(maxDocFreq int) *MoreLikeThisQuery { + q.maxDocFreq = &maxDocFreq + return q +} + +// MinWordLength sets the minimum word length below which words will be +// ignored. It defaults to 0. +func (q *MoreLikeThisQuery) MinWordLen(minWordLen int) *MoreLikeThisQuery { + q.minWordLen = &minWordLen + return q +} + +// MaxWordLen sets the maximum word length above which words will be ignored. +// Defaults to unbounded (0). +func (q *MoreLikeThisQuery) MaxWordLen(maxWordLen int) *MoreLikeThisQuery { + q.maxWordLen = &maxWordLen + return q +} + +// BoostTerms sets the boost factor to use when boosting terms. +// It defaults to 1. +func (q *MoreLikeThisQuery) BoostTerms(boostTerms float64) *MoreLikeThisQuery { + q.boostTerms = &boostTerms + return q +} + +// Analyzer specifies the analyzer that will be use to analyze the text. +// Defaults to the analyzer associated with the field. +func (q *MoreLikeThisQuery) Analyzer(analyzer string) *MoreLikeThisQuery { + q.analyzer = analyzer + return q +} + +// Boost sets the boost for this query. +func (q *MoreLikeThisQuery) Boost(boost float64) *MoreLikeThisQuery { + q.boost = &boost + return q +} + +// FailOnUnsupportedField indicates whether to fail or return no result +// when this query is run against a field which is not supported such as +// a binary/numeric field. +func (q *MoreLikeThisQuery) FailOnUnsupportedField(fail bool) *MoreLikeThisQuery { + q.failOnUnsupportedField = &fail + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched_filters per hit. +func (q *MoreLikeThisQuery) QueryName(queryName string) *MoreLikeThisQuery { + q.queryName = queryName + return q +} + +// Source creates the source for the MLT query. +// It may return an error if the caller forgot to specify any documents to +// be "liked" in the MoreLikeThisQuery. +func (q *MoreLikeThisQuery) Source() (interface{}, error) { + // { + // "match_all" : { ... } + // } + if len(q.docs) == 0 { + return nil, errors.New(`more_like_this requires some documents to be "liked"`) + } + + source := make(map[string]interface{}) + + params := make(map[string]interface{}) + source["mlt"] = params + + if len(q.fields) > 0 { + params["fields"] = q.fields + } + + likes := make([]interface{}, 0) + for _, doc := range q.docs { + src, err := doc.Source() + if err != nil { + return nil, err + } + likes = append(likes, src) + } + params["like"] = likes + + if len(q.unlikeDocs) > 0 { + dontLikes := make([]interface{}, 0) + for _, doc := range q.unlikeDocs { + src, err := doc.Source() + if err != nil { + return nil, err + } + dontLikes = append(dontLikes, src) + } + params["unlike"] = dontLikes + } + + if q.minimumShouldMatch != "" { + params["minimum_should_match"] = q.minimumShouldMatch + } + if q.minTermFreq != nil { + params["min_term_freq"] = *q.minTermFreq + } + if q.maxQueryTerms != nil { + params["max_query_terms"] = *q.maxQueryTerms + } + if len(q.stopWords) > 0 { + params["stop_words"] = q.stopWords + } + if q.minDocFreq != nil { + params["min_doc_freq"] = *q.minDocFreq + } + if q.maxDocFreq != nil { + params["max_doc_freq"] = *q.maxDocFreq + } + if q.minWordLen != nil { + params["min_word_len"] = *q.minWordLen + } + if q.maxWordLen != nil { + params["max_word_len"] = *q.maxWordLen + } + if q.boostTerms != nil { + params["boost_terms"] = *q.boostTerms + } + if q.boost != nil { + params["boost"] = *q.boost + } + if q.analyzer != "" { + params["analyzer"] = q.analyzer + } + if q.failOnUnsupportedField != nil { + params["fail_on_unsupported_field"] = *q.failOnUnsupportedField + } + if q.queryName != "" { + params["_name"] = q.queryName + } + if q.include != nil { + params["include"] = *q.include + } + + return source, nil +} + +// -- MoreLikeThisQueryItem -- + +// MoreLikeThisQueryItem represents a single item of a MoreLikeThisQuery +// to be "liked" or "unliked". +type MoreLikeThisQueryItem struct { + likeText string + + index string + typ string + id string + doc interface{} + fields []string + routing string + fsc *FetchSourceContext + version int64 + versionType string +} + +// NewMoreLikeThisQueryItem creates and initializes a MoreLikeThisQueryItem. +func NewMoreLikeThisQueryItem() *MoreLikeThisQueryItem { + return &MoreLikeThisQueryItem{ + version: -1, + } +} + +// LikeText represents a text to be "liked". +func (item *MoreLikeThisQueryItem) LikeText(likeText string) *MoreLikeThisQueryItem { + item.likeText = likeText + return item +} + +// Index represents the index of the item. +func (item *MoreLikeThisQueryItem) Index(index string) *MoreLikeThisQueryItem { + item.index = index + return item +} + +// Type represents the document type of the item. +func (item *MoreLikeThisQueryItem) Type(typ string) *MoreLikeThisQueryItem { + item.typ = typ + return item +} + +// Id represents the document id of the item. +func (item *MoreLikeThisQueryItem) Id(id string) *MoreLikeThisQueryItem { + item.id = id + return item +} + +// Doc represents a raw document template for the item. +func (item *MoreLikeThisQueryItem) Doc(doc interface{}) *MoreLikeThisQueryItem { + item.doc = doc + return item +} + +// Fields represents the list of fields of the item. +func (item *MoreLikeThisQueryItem) Fields(fields ...string) *MoreLikeThisQueryItem { + item.fields = append(item.fields, fields...) + return item +} + +// Routing sets the routing associated with the item. +func (item *MoreLikeThisQueryItem) Routing(routing string) *MoreLikeThisQueryItem { + item.routing = routing + return item +} + +// FetchSourceContext represents the fetch source of the item which controls +// if and how _source should be returned. +func (item *MoreLikeThisQueryItem) FetchSourceContext(fsc *FetchSourceContext) *MoreLikeThisQueryItem { + item.fsc = fsc + return item +} + +// Version specifies the version of the item. +func (item *MoreLikeThisQueryItem) Version(version int64) *MoreLikeThisQueryItem { + item.version = version + return item +} + +// VersionType represents the version type of the item. +func (item *MoreLikeThisQueryItem) VersionType(versionType string) *MoreLikeThisQueryItem { + item.versionType = versionType + return item +} + +// Source returns the JSON-serializable fragment of the entity. +func (item *MoreLikeThisQueryItem) Source() (interface{}, error) { + if item.likeText != "" { + return item.likeText, nil + } + + source := make(map[string]interface{}) + + if item.index != "" { + source["_index"] = item.index + } + if item.typ != "" { + source["_type"] = item.typ + } + if item.id != "" { + source["_id"] = item.id + } + if item.doc != nil { + source["doc"] = item.doc + } + if len(item.fields) > 0 { + source["fields"] = item.fields + } + if item.routing != "" { + source["_routing"] = item.routing + } + if item.fsc != nil { + src, err := item.fsc.Source() + if err != nil { + return nil, err + } + source["_source"] = src + } + if item.version >= 0 { + source["_version"] = item.version + } + if item.versionType != "" { + source["_version_type"] = item.versionType + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_more_like_this_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_more_like_this_test.go new file mode 100644 index 000000000..64bfe4305 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_more_like_this_test.go @@ -0,0 +1,91 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMoreLikeThisQuerySourceWithLikeText(t *testing.T) { + q := NewMoreLikeThisQuery().LikeText("Golang topic").Field("message") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatal(err) + } + got := string(data) + expected := `{"mlt":{"fields":["message"],"like":["Golang topic"]}}` + if got != expected { + t.Fatalf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMoreLikeThisQuerySourceWithLikeAndUnlikeItems(t *testing.T) { + q := NewMoreLikeThisQuery() + q = q.LikeItems( + NewMoreLikeThisQueryItem().Id("1"), + NewMoreLikeThisQueryItem().Index(testIndexName2).Type("comment").Id("2").Routing("routing_id"), + ) + q = q.IgnoreLikeItems(NewMoreLikeThisQueryItem().Id("3")) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatal(err) + } + got := string(data) + expected := `{"mlt":{"like":[{"_id":"1"},{"_id":"2","_index":"elastic-test2","_routing":"routing_id","_type":"comment"}],"unlike":[{"_id":"3"}]}}` + if got != expected { + t.Fatalf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMoreLikeThisQuery(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another Golang topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Common query + mltq := NewMoreLikeThisQuery().LikeText("Golang topic").Field("message") + res, err := client.Search(). + Index(testIndexName). + Query(mltq). + Do() + if err != nil { + t.Fatal(err) + } + if res.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_multi_match.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_multi_match.go new file mode 100644 index 000000000..b9f74a0d3 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_multi_match.go @@ -0,0 +1,275 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "strings" +) + +// MultiMatchQuery builds on the MatchQuery to allow multi-field queries. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-multi-match-query.html +type MultiMatchQuery struct { + text interface{} + fields []string + fieldBoosts map[string]*float64 + typ string // best_fields, boolean, most_fields, cross_fields, phrase, phrase_prefix + operator string // AND or OR + analyzer string + boost *float64 + slop *int + fuzziness string + prefixLength *int + maxExpansions *int + minimumShouldMatch string + rewrite string + fuzzyRewrite string + tieBreaker *float64 + lenient *bool + cutoffFrequency *float64 + zeroTermsQuery string + queryName string +} + +// MultiMatchQuery creates and initializes a new MultiMatchQuery. +func NewMultiMatchQuery(text interface{}, fields ...string) *MultiMatchQuery { + q := &MultiMatchQuery{ + text: text, + fields: make([]string, 0), + fieldBoosts: make(map[string]*float64), + } + q.fields = append(q.fields, fields...) + return q +} + +// Field adds a field to run the multi match against. +func (q *MultiMatchQuery) Field(field string) *MultiMatchQuery { + q.fields = append(q.fields, field) + return q +} + +// FieldWithBoost adds a field to run the multi match against with a specific boost. +func (q *MultiMatchQuery) FieldWithBoost(field string, boost float64) *MultiMatchQuery { + q.fields = append(q.fields, field) + q.fieldBoosts[field] = &boost + return q +} + +// Type can be "best_fields", "boolean", "most_fields", "cross_fields", +// "phrase", or "phrase_prefix". +func (q *MultiMatchQuery) Type(typ string) *MultiMatchQuery { + var zero = float64(0.0) + var one = float64(1.0) + + switch strings.ToLower(typ) { + default: // best_fields / boolean + q.typ = "best_fields" + q.tieBreaker = &zero + case "most_fields": + q.typ = "most_fields" + q.tieBreaker = &one + case "cross_fields": + q.typ = "cross_fields" + q.tieBreaker = &zero + case "phrase": + q.typ = "phrase" + q.tieBreaker = &zero + case "phrase_prefix": + q.typ = "phrase_prefix" + q.tieBreaker = &zero + } + return q +} + +// Operator sets the operator to use when using boolean query. +// It can be either AND or OR (default). +func (q *MultiMatchQuery) Operator(operator string) *MultiMatchQuery { + q.operator = operator + return q +} + +// Analyzer sets the analyzer to use explicitly. It defaults to use explicit +// mapping config for the field, or, if not set, the default search analyzer. +func (q *MultiMatchQuery) Analyzer(analyzer string) *MultiMatchQuery { + q.analyzer = analyzer + return q +} + +// Boost sets the boost for this query. +func (q *MultiMatchQuery) Boost(boost float64) *MultiMatchQuery { + q.boost = &boost + return q +} + +// Slop sets the phrase slop if evaluated to a phrase query type. +func (q *MultiMatchQuery) Slop(slop int) *MultiMatchQuery { + q.slop = &slop + return q +} + +// Fuzziness sets the fuzziness used when evaluated to a fuzzy query type. +// It defaults to "AUTO". +func (q *MultiMatchQuery) Fuzziness(fuzziness string) *MultiMatchQuery { + q.fuzziness = fuzziness + return q +} + +// PrefixLength for the fuzzy process. +func (q *MultiMatchQuery) PrefixLength(prefixLength int) *MultiMatchQuery { + q.prefixLength = &prefixLength + return q +} + +// MaxExpansions is the number of term expansions to use when using fuzzy +// or prefix type query. It defaults to unbounded so it's recommended +// to set it to a reasonable value for faster execution. +func (q *MultiMatchQuery) MaxExpansions(maxExpansions int) *MultiMatchQuery { + q.maxExpansions = &maxExpansions + return q +} + +// MinimumShouldMatch represents the minimum number of optional should clauses +// to match. +func (q *MultiMatchQuery) MinimumShouldMatch(minimumShouldMatch string) *MultiMatchQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +func (q *MultiMatchQuery) Rewrite(rewrite string) *MultiMatchQuery { + q.rewrite = rewrite + return q +} + +func (q *MultiMatchQuery) FuzzyRewrite(fuzzyRewrite string) *MultiMatchQuery { + q.fuzzyRewrite = fuzzyRewrite + return q +} + +// TieBreaker for "best-match" disjunction queries (OR queries). +// The tie breaker capability allows documents that match more than one +// query clause (in this case on more than one field) to be scored better +// than documents that match only the best of the fields, without confusing +// this with the better case of two distinct matches in the multiple fields. +// +// A tie-breaker value of 1.0 is interpreted as a signal to score queries as +// "most-match" queries where all matching query clauses are considered for scoring. +func (q *MultiMatchQuery) TieBreaker(tieBreaker float64) *MultiMatchQuery { + q.tieBreaker = &tieBreaker + return q +} + +// Lenient indicates whether format based failures will be ignored. +func (q *MultiMatchQuery) Lenient(lenient bool) *MultiMatchQuery { + q.lenient = &lenient + return q +} + +// CutoffFrequency sets a cutoff value in [0..1] (or absolute number >=1) +// representing the maximum threshold of a terms document frequency to be +// considered a low frequency term. +func (q *MultiMatchQuery) CutoffFrequency(cutoff float64) *MultiMatchQuery { + q.cutoffFrequency = &cutoff + return q +} + +// ZeroTermsQuery can be "all" or "none". +func (q *MultiMatchQuery) ZeroTermsQuery(zeroTermsQuery string) *MultiMatchQuery { + q.zeroTermsQuery = zeroTermsQuery + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched filters per hit. +func (q *MultiMatchQuery) QueryName(queryName string) *MultiMatchQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the query. +func (q *MultiMatchQuery) Source() (interface{}, error) { + // + // { + // "multi_match" : { + // "query" : "this is a test", + // "fields" : [ "subject", "message" ] + // } + // } + + source := make(map[string]interface{}) + + multiMatch := make(map[string]interface{}) + source["multi_match"] = multiMatch + + multiMatch["query"] = q.text + + if len(q.fields) > 0 { + fields := make([]string, 0) + for _, field := range q.fields { + if boost, found := q.fieldBoosts[field]; found { + if boost != nil { + fields = append(fields, fmt.Sprintf("%s^%f", field, *boost)) + } else { + fields = append(fields, field) + } + } else { + fields = append(fields, field) + } + } + multiMatch["fields"] = fields + } + + if q.typ != "" { + multiMatch["type"] = q.typ + } + + if q.operator != "" { + multiMatch["operator"] = q.operator + } + if q.analyzer != "" { + multiMatch["analyzer"] = q.analyzer + } + if q.boost != nil { + multiMatch["boost"] = *q.boost + } + if q.slop != nil { + multiMatch["slop"] = *q.slop + } + if q.fuzziness != "" { + multiMatch["fuzziness"] = q.fuzziness + } + if q.prefixLength != nil { + multiMatch["prefix_length"] = *q.prefixLength + } + if q.maxExpansions != nil { + multiMatch["max_expansions"] = *q.maxExpansions + } + if q.minimumShouldMatch != "" { + multiMatch["minimum_should_match"] = q.minimumShouldMatch + } + if q.rewrite != "" { + multiMatch["rewrite"] = q.rewrite + } + if q.fuzzyRewrite != "" { + multiMatch["fuzzy_rewrite"] = q.fuzzyRewrite + } + if q.tieBreaker != nil { + multiMatch["tie_breaker"] = *q.tieBreaker + } + if q.lenient != nil { + multiMatch["lenient"] = *q.lenient + } + if q.cutoffFrequency != nil { + multiMatch["cutoff_frequency"] = *q.cutoffFrequency + } + if q.zeroTermsQuery != "" { + multiMatch["zero_terms_query"] = q.zeroTermsQuery + } + if q.queryName != "" { + multiMatch["_name"] = q.queryName + } + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_multi_match_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_multi_match_test.go new file mode 100644 index 000000000..508726bed --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_multi_match_test.go @@ -0,0 +1,131 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMultiMatchQuery(t *testing.T) { + q := NewMultiMatchQuery("this is a test", "subject", "message") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMultiMatchQueryBestFields(t *testing.T) { + q := NewMultiMatchQuery("this is a test", "subject", "message").Type("best_fields") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"best_fields"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMultiMatchQueryMostFields(t *testing.T) { + q := NewMultiMatchQuery("this is a test", "subject", "message").Type("most_fields") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":1,"type":"most_fields"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMultiMatchQueryCrossFields(t *testing.T) { + q := NewMultiMatchQuery("this is a test", "subject", "message").Type("cross_fields") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"cross_fields"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMultiMatchQueryPhrase(t *testing.T) { + q := NewMultiMatchQuery("this is a test", "subject", "message").Type("phrase") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"phrase"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMultiMatchQueryPhrasePrefix(t *testing.T) { + q := NewMultiMatchQuery("this is a test", "subject", "message").Type("phrase_prefix") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"phrase_prefix"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMultiMatchQueryBestFieldsWithCustomTieBreaker(t *testing.T) { + q := NewMultiMatchQuery("this is a test", "subject", "message"). + Type("best_fields"). + TieBreaker(0.3) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0.3,"type":"best_fields"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_nested.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_nested.go new file mode 100644 index 000000000..0a598f8bf --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_nested.go @@ -0,0 +1,85 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// NestedQuery allows to query nested objects / docs. +// The query is executed against the nested objects / docs as if they were +// indexed as separate docs (they are, internally) and resulting in the +// root parent doc (or parent nested mapping). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-nested-query.html +type NestedQuery struct { + query Query + path string + scoreMode string + boost *float64 + queryName string + innerHit *InnerHit +} + +// NewNestedQuery creates and initializes a new NestedQuery. +func NewNestedQuery(path string, query Query) *NestedQuery { + return &NestedQuery{path: path, query: query} +} + +// ScoreMode specifies the score mode. +func (q *NestedQuery) ScoreMode(scoreMode string) *NestedQuery { + q.scoreMode = scoreMode + return q +} + +// Boost sets the boost for this query. +func (q *NestedQuery) Boost(boost float64) *NestedQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *NestedQuery) QueryName(queryName string) *NestedQuery { + q.queryName = queryName + return q +} + +// InnerHit sets the inner hit definition in the scope of this nested query +// and reusing the defined path and query. +func (q *NestedQuery) InnerHit(innerHit *InnerHit) *NestedQuery { + q.innerHit = innerHit + return q +} + +// Source returns JSON for the query. +func (q *NestedQuery) Source() (interface{}, error) { + query := make(map[string]interface{}) + nq := make(map[string]interface{}) + query["nested"] = nq + + src, err := q.query.Source() + if err != nil { + return nil, err + } + nq["query"] = src + + nq["path"] = q.path + + if q.scoreMode != "" { + nq["score_mode"] = q.scoreMode + } + if q.boost != nil { + nq["boost"] = *q.boost + } + if q.queryName != "" { + nq["_name"] = q.queryName + } + if q.innerHit != nil { + src, err := q.innerHit.Source() + if err != nil { + return nil, err + } + nq["inner_hits"] = src + } + return query, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_nested_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_nested_test.go new file mode 100644 index 000000000..b068c59b1 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_nested_test.go @@ -0,0 +1,52 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestNestedQuery(t *testing.T) { + bq := NewBoolQuery() + bq = bq.Must(NewTermQuery("obj1.name", "blue")) + bq = bq.Must(NewRangeQuery("obj1.count").Gt(5)) + q := NewNestedQuery("obj1", bq).QueryName("qname") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"nested":{"_name":"qname","path":"obj1","query":{"bool":{"must":[{"term":{"obj1.name":"blue"}},{"range":{"obj1.count":{"from":5,"include_lower":false,"include_upper":true,"to":null}}}]}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestNestedQueryWithInnerHit(t *testing.T) { + bq := NewBoolQuery() + bq = bq.Must(NewTermQuery("obj1.name", "blue")) + bq = bq.Must(NewRangeQuery("obj1.count").Gt(5)) + q := NewNestedQuery("obj1", bq) + q = q.QueryName("qname") + q = q.InnerHit(NewInnerHit().Name("comments").Query(NewTermQuery("user", "olivere"))) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"nested":{"_name":"qname","inner_hits":{"name":"comments","query":{"term":{"user":"olivere"}}},"path":"obj1","query":{"bool":{"must":[{"term":{"obj1.name":"blue"}},{"range":{"obj1.count":{"from":5,"include_lower":false,"include_upper":true,"to":null}}}]}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_not.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_not.go new file mode 100644 index 000000000..7a1ee8e08 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_not.go @@ -0,0 +1,45 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// NotQuery filters out matched documents using a query. +// +// For details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-not-query.html +type NotQuery struct { + filter Query + queryName string +} + +// NewNotQuery creates and initializes a new NotQuery. +func NewNotQuery(filter Query) *NotQuery { + return &NotQuery{ + filter: filter, + } +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *NotQuery) QueryName(queryName string) *NotQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the query. +func (q *NotQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["not"] = params + + src, err := q.filter.Source() + if err != nil { + return nil, err + } + params["query"] = src + if q.queryName != "" { + params["_name"] = q.queryName + } + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_not_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_not_test.go new file mode 100644 index 000000000..4c4f1c0ab --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_not_test.go @@ -0,0 +1,46 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestNotQuery(t *testing.T) { + f := NewNotQuery(NewTermQuery("user", "olivere")) + src, err := f.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"not":{"query":{"term":{"user":"olivere"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestNotQueryWithParams(t *testing.T) { + postDateFilter := NewRangeQuery("postDate").From("2010-03-01").To("2010-04-01") + f := NewNotQuery(postDateFilter) + f = f.QueryName("MyQueryName") + src, err := f.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"not":{"_name":"MyQueryName","query":{"range":{"postDate":{"from":"2010-03-01","include_lower":true,"include_upper":true,"to":"2010-04-01"}}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_prefix.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_prefix.go new file mode 100644 index 000000000..1628ba8cc --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_prefix.go @@ -0,0 +1,67 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// PrefixQuery matches documents that have fields containing terms +// with a specified prefix (not analyzed). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-prefix-query.html +type PrefixQuery struct { + name string + prefix string + boost *float64 + rewrite string + queryName string +} + +// NewPrefixQuery creates and initializes a new PrefixQuery. +func NewPrefixQuery(name string, prefix string) *PrefixQuery { + return &PrefixQuery{name: name, prefix: prefix} +} + +// Boost sets the boost for this query. +func (q *PrefixQuery) Boost(boost float64) *PrefixQuery { + q.boost = &boost + return q +} + +func (q *PrefixQuery) Rewrite(rewrite string) *PrefixQuery { + q.rewrite = rewrite + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched_filters per hit. +func (q *PrefixQuery) QueryName(queryName string) *PrefixQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the query. +func (q *PrefixQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["prefix"] = query + + if q.boost == nil && q.rewrite == "" && q.queryName == "" { + query[q.name] = q.prefix + } else { + subQuery := make(map[string]interface{}) + subQuery["prefix"] = q.prefix + if q.boost != nil { + subQuery["boost"] = *q.boost + } + if q.rewrite != "" { + subQuery["rewrite"] = q.rewrite + } + if q.queryName != "" { + subQuery["_name"] = q.queryName + } + query[q.name] = subQuery + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_prefix_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_prefix_test.go new file mode 100644 index 000000000..ce1b74e41 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_prefix_test.go @@ -0,0 +1,45 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestPrefixQuery(t *testing.T) { + q := NewPrefixQuery("user", "ki") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"prefix":{"user":"ki"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPrefixQueryWithOptions(t *testing.T) { + q := NewPrefixQuery("user", "ki") + q = q.QueryName("my_query_name") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"prefix":{"user":{"_name":"my_query_name","prefix":"ki"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_query_string.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_query_string.go new file mode 100644 index 000000000..53e4f344f --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_query_string.go @@ -0,0 +1,349 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" +) + +// QueryStringQuery uses the query parser in order to parse its content. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html +type QueryStringQuery struct { + queryString string + defaultField string + defaultOperator string + analyzer string + quoteAnalyzer string + quoteFieldSuffix string + autoGeneratePhraseQueries *bool + allowLeadingWildcard *bool + lowercaseExpandedTerms *bool + enablePositionIncrements *bool + analyzeWildcard *bool + locale string + boost *float64 + fuzziness string + fuzzyPrefixLength *int + fuzzyMaxExpansions *int + fuzzyRewrite string + phraseSlop *int + fields []string + fieldBoosts map[string]*float64 + useDisMax *bool + tieBreaker *float64 + rewrite string + minimumShouldMatch string + lenient *bool + queryName string + timeZone string + maxDeterminizedStates *int +} + +// NewQueryStringQuery creates and initializes a new QueryStringQuery. +func NewQueryStringQuery(queryString string) *QueryStringQuery { + return &QueryStringQuery{ + queryString: queryString, + fields: make([]string, 0), + fieldBoosts: make(map[string]*float64), + } +} + +// DefaultField specifies the field to run against when no prefix field +// is specified. Only relevant when not explicitly adding fields the query +// string will run against. +func (q *QueryStringQuery) DefaultField(defaultField string) *QueryStringQuery { + q.defaultField = defaultField + return q +} + +// Field adds a field to run the query string against. +func (q *QueryStringQuery) Field(field string) *QueryStringQuery { + q.fields = append(q.fields, field) + return q +} + +// FieldWithBoost adds a field to run the query string against with a specific boost. +func (q *QueryStringQuery) FieldWithBoost(field string, boost float64) *QueryStringQuery { + q.fields = append(q.fields, field) + q.fieldBoosts[field] = &boost + return q +} + +// UseDisMax specifies whether to combine queries using dis max or boolean +// query when more zhan one field is used with the query string. Defaults +// to dismax (true). +func (q *QueryStringQuery) UseDisMax(useDisMax bool) *QueryStringQuery { + q.useDisMax = &useDisMax + return q +} + +// TieBreaker is used when more than one field is used with the query string, +// and combined queries are using dismax. +func (q *QueryStringQuery) TieBreaker(tieBreaker float64) *QueryStringQuery { + q.tieBreaker = &tieBreaker + return q +} + +// DefaultOperator sets the boolean operator of the query parser used to +// parse the query string. +// +// In default mode (OR) terms without any modifiers +// are considered optional, e.g. "capital of Hungary" is equal to +// "capital OR of OR Hungary". +// +// In AND mode, terms are considered to be in conjunction. The above mentioned +// query is then parsed as "capital AND of AND Hungary". +func (q *QueryStringQuery) DefaultOperator(operator string) *QueryStringQuery { + q.defaultOperator = operator + return q +} + +// Analyzer is an optional analyzer used to analyze the query string. +// Note, if a field has search analyzer defined for it, then it will be used +// automatically. Defaults to the smart search analyzer. +func (q *QueryStringQuery) Analyzer(analyzer string) *QueryStringQuery { + q.analyzer = analyzer + return q +} + +// QuoteAnalyzer is an optional analyzer to be used to analyze the query string +// for phrase searches. Note, if a field has search analyzer defined for it, +// then it will be used automatically. Defaults to the smart search analyzer. +func (q *QueryStringQuery) QuoteAnalyzer(quoteAnalyzer string) *QueryStringQuery { + q.quoteAnalyzer = quoteAnalyzer + return q +} + +// AutoGeneratePhraseQueries indicates whether or not phrase queries will +// be automatically generated when the analyzer returns more then one term +// from whitespace delimited text. Set to false if phrase queries should only +// be generated when surrounded by double quotes. +func (q *QueryStringQuery) AutoGeneratePhraseQueries(autoGeneratePhraseQueries bool) *QueryStringQuery { + q.autoGeneratePhraseQueries = &autoGeneratePhraseQueries + return q +} + +// MaxDeterminizedState protects against too-difficult regular expression queries. +func (q *QueryStringQuery) MaxDeterminizedState(maxDeterminizedStates int) *QueryStringQuery { + q.maxDeterminizedStates = &maxDeterminizedStates + return q +} + +// AllowLeadingWildcard specifies whether leading wildcards should be allowed +// or not (defaults to true). +func (q *QueryStringQuery) AllowLeadingWildcard(allowLeadingWildcard bool) *QueryStringQuery { + q.allowLeadingWildcard = &allowLeadingWildcard + return q +} + +// LowercaseExpandedTerms indicates whether terms of wildcard, prefix, fuzzy +// and range queries are automatically lower-cased or not. Default is true. +func (q *QueryStringQuery) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *QueryStringQuery { + q.lowercaseExpandedTerms = &lowercaseExpandedTerms + return q +} + +// EnablePositionIncrements indicates whether to enable position increments +// in result query. Defaults to true. +// +// When set, result phrase and multi-phrase queries will be aware of position +// increments. Useful when e.g. a StopFilter increases the position increment +// of the token that follows an omitted token. +func (q *QueryStringQuery) EnablePositionIncrements(enablePositionIncrements bool) *QueryStringQuery { + q.enablePositionIncrements = &enablePositionIncrements + return q +} + +// Fuzziness sets the edit distance for fuzzy queries. Default is "AUTO". +func (q *QueryStringQuery) Fuzziness(fuzziness string) *QueryStringQuery { + q.fuzziness = fuzziness + return q +} + +// FuzzyPrefixLength sets the minimum prefix length for fuzzy queries. +// Default is 1. +func (q *QueryStringQuery) FuzzyPrefixLength(fuzzyPrefixLength int) *QueryStringQuery { + q.fuzzyPrefixLength = &fuzzyPrefixLength + return q +} + +func (q *QueryStringQuery) FuzzyMaxExpansions(fuzzyMaxExpansions int) *QueryStringQuery { + q.fuzzyMaxExpansions = &fuzzyMaxExpansions + return q +} + +func (q *QueryStringQuery) FuzzyRewrite(fuzzyRewrite string) *QueryStringQuery { + q.fuzzyRewrite = fuzzyRewrite + return q +} + +// PhraseSlop sets the default slop for phrases. If zero, then exact matches +// are required. Default value is zero. +func (q *QueryStringQuery) PhraseSlop(phraseSlop int) *QueryStringQuery { + q.phraseSlop = &phraseSlop + return q +} + +// AnalyzeWildcard indicates whether to enabled analysis on wildcard and prefix queries. +func (q *QueryStringQuery) AnalyzeWildcard(analyzeWildcard bool) *QueryStringQuery { + q.analyzeWildcard = &analyzeWildcard + return q +} + +func (q *QueryStringQuery) Rewrite(rewrite string) *QueryStringQuery { + q.rewrite = rewrite + return q +} + +func (q *QueryStringQuery) MinimumShouldMatch(minimumShouldMatch string) *QueryStringQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +// Boost sets the boost for this query. +func (q *QueryStringQuery) Boost(boost float64) *QueryStringQuery { + q.boost = &boost + return q +} + +// QuoteFieldSuffix is an optional field name suffix to automatically +// try and add to the field searched when using quoted text. +func (q *QueryStringQuery) QuoteFieldSuffix(quoteFieldSuffix string) *QueryStringQuery { + q.quoteFieldSuffix = quoteFieldSuffix + return q +} + +// Lenient indicates whether the query string parser should be lenient +// when parsing field values. It defaults to the index setting and if not +// set, defaults to false. +func (q *QueryStringQuery) Lenient(lenient bool) *QueryStringQuery { + q.lenient = &lenient + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched_filters per hit. +func (q *QueryStringQuery) QueryName(queryName string) *QueryStringQuery { + q.queryName = queryName + return q +} + +func (q *QueryStringQuery) Locale(locale string) *QueryStringQuery { + q.locale = locale + return q +} + +// TimeZone can be used to automatically adjust to/from fields using a +// timezone. Only used with date fields, of course. +func (q *QueryStringQuery) TimeZone(timeZone string) *QueryStringQuery { + q.timeZone = timeZone + return q +} + +// Source returns JSON for the query. +func (q *QueryStringQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["query_string"] = query + + query["query"] = q.queryString + + if q.defaultField != "" { + query["default_field"] = q.defaultField + } + + if len(q.fields) > 0 { + fields := make([]string, 0) + for _, field := range q.fields { + if boost, found := q.fieldBoosts[field]; found { + if boost != nil { + fields = append(fields, fmt.Sprintf("%s^%f", field, *boost)) + } else { + fields = append(fields, field) + } + } else { + fields = append(fields, field) + } + } + query["fields"] = fields + } + + if q.tieBreaker != nil { + query["tie_breaker"] = *q.tieBreaker + } + if q.useDisMax != nil { + query["use_dis_max"] = *q.useDisMax + } + if q.defaultOperator != "" { + query["default_operator"] = q.defaultOperator + } + if q.analyzer != "" { + query["analyzer"] = q.analyzer + } + if q.quoteAnalyzer != "" { + query["quote_analyzer"] = q.quoteAnalyzer + } + if q.autoGeneratePhraseQueries != nil { + query["auto_generate_phrase_queries"] = *q.autoGeneratePhraseQueries + } + if q.maxDeterminizedStates != nil { + query["max_determinized_states"] = *q.maxDeterminizedStates + } + if q.allowLeadingWildcard != nil { + query["allow_leading_wildcard"] = *q.allowLeadingWildcard + } + if q.lowercaseExpandedTerms != nil { + query["lowercase_expanded_terms"] = *q.lowercaseExpandedTerms + } + if q.enablePositionIncrements != nil { + query["enable_position_increments"] = *q.enablePositionIncrements + } + if q.fuzziness != "" { + query["fuzziness"] = q.fuzziness + } + if q.boost != nil { + query["boost"] = *q.boost + } + if q.fuzzyPrefixLength != nil { + query["fuzzy_prefix_length"] = *q.fuzzyPrefixLength + } + if q.fuzzyMaxExpansions != nil { + query["fuzzy_max_expansions"] = *q.fuzzyMaxExpansions + } + if q.fuzzyRewrite != "" { + query["fuzzy_rewrite"] = q.fuzzyRewrite + } + if q.phraseSlop != nil { + query["phrase_slop"] = *q.phraseSlop + } + if q.analyzeWildcard != nil { + query["analyze_wildcard"] = *q.analyzeWildcard + } + if q.rewrite != "" { + query["rewrite"] = q.rewrite + } + if q.minimumShouldMatch != "" { + query["minimum_should_match"] = q.minimumShouldMatch + } + if q.quoteFieldSuffix != "" { + query["quote_field_suffix"] = q.quoteFieldSuffix + } + if q.lenient != nil { + query["lenient"] = *q.lenient + } + if q.queryName != "" { + query["_name"] = q.queryName + } + if q.locale != "" { + query["locale"] = q.locale + } + if q.timeZone != "" { + query["time_zone"] = q.timeZone + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_query_string_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_query_string_test.go new file mode 100644 index 000000000..4d766124a --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_query_string_test.go @@ -0,0 +1,28 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestQueryStringQuery(t *testing.T) { + q := NewQueryStringQuery(`this AND that OR thus`) + q = q.DefaultField("content") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"query_string":{"default_field":"content","query":"this AND that OR thus"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_range.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_range.go new file mode 100644 index 000000000..f688c25bd --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_range.go @@ -0,0 +1,145 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// RangeQuery matches documents with fields that have terms within a certain range. +// +// For details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-range-query.html +type RangeQuery struct { + name string + from interface{} + to interface{} + timeZone string + includeLower bool + includeUpper bool + boost *float64 + queryName string + format string +} + +// NewRangeQuery creates and initializes a new RangeQuery. +func NewRangeQuery(name string) *RangeQuery { + return &RangeQuery{name: name, includeLower: true, includeUpper: true} +} + +// From indicates the from part of the RangeQuery. +// Use nil to indicate an unbounded from part. +func (q *RangeQuery) From(from interface{}) *RangeQuery { + q.from = from + return q +} + +// Gt indicates a greater-than value for the from part. +// Use nil to indicate an unbounded from part. +func (q *RangeQuery) Gt(from interface{}) *RangeQuery { + q.from = from + q.includeLower = false + return q +} + +// Gte indicates a greater-than-or-equal value for the from part. +// Use nil to indicate an unbounded from part. +func (q *RangeQuery) Gte(from interface{}) *RangeQuery { + q.from = from + q.includeLower = true + return q +} + +// To indicates the to part of the RangeQuery. +// Use nil to indicate an unbounded to part. +func (q *RangeQuery) To(to interface{}) *RangeQuery { + q.to = to + return q +} + +// Lt indicates a less-than value for the to part. +// Use nil to indicate an unbounded to part. +func (q *RangeQuery) Lt(to interface{}) *RangeQuery { + q.to = to + q.includeUpper = false + return q +} + +// Lte indicates a less-than-or-equal value for the to part. +// Use nil to indicate an unbounded to part. +func (q *RangeQuery) Lte(to interface{}) *RangeQuery { + q.to = to + q.includeUpper = true + return q +} + +// IncludeLower indicates whether the lower bound should be included or not. +// Defaults to true. +func (q *RangeQuery) IncludeLower(includeLower bool) *RangeQuery { + q.includeLower = includeLower + return q +} + +// IncludeUpper indicates whether the upper bound should be included or not. +// Defaults to true. +func (q *RangeQuery) IncludeUpper(includeUpper bool) *RangeQuery { + q.includeUpper = includeUpper + return q +} + +// Boost sets the boost for this query. +func (q *RangeQuery) Boost(boost float64) *RangeQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched_filters per hit. +func (q *RangeQuery) QueryName(queryName string) *RangeQuery { + q.queryName = queryName + return q +} + +// TimeZone is used for date fields. In that case, we can adjust the +// from/to fields using a timezone. +func (q *RangeQuery) TimeZone(timeZone string) *RangeQuery { + q.timeZone = timeZone + return q +} + +// Format is used for date fields. In that case, we can set the format +// to be used instead of the mapper format. +func (q *RangeQuery) Format(format string) *RangeQuery { + q.format = format + return q +} + +// Source returns JSON for the query. +func (q *RangeQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + + rangeQ := make(map[string]interface{}) + source["range"] = rangeQ + + params := make(map[string]interface{}) + rangeQ[q.name] = params + + params["from"] = q.from + params["to"] = q.to + if q.timeZone != "" { + params["time_zone"] = q.timeZone + } + if q.format != "" { + params["format"] = q.format + } + params["include_lower"] = q.includeLower + params["include_upper"] = q.includeUpper + + if q.boost != nil { + rangeQ["boost"] = *q.boost + } + + if q.queryName != "" { + rangeQ["_name"] = q.queryName + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_range_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_range_test.go new file mode 100644 index 000000000..126bb16f2 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_range_test.go @@ -0,0 +1,68 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestRangeQuery(t *testing.T) { + q := NewRangeQuery("postDate").From("2010-03-01").To("2010-04-01") + q = q.QueryName("my_query") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"range":{"_name":"my_query","postDate":{"from":"2010-03-01","include_lower":true,"include_upper":true,"to":"2010-04-01"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestRangeQueryWithTimeZone(t *testing.T) { + q := NewRangeQuery("born"). + Gte("2012-01-01"). + Lte("now"). + TimeZone("+1:00") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"range":{"born":{"from":"2012-01-01","include_lower":true,"include_upper":true,"time_zone":"+1:00","to":"now"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestRangeQueryWithFormat(t *testing.T) { + q := NewRangeQuery("born"). + Gte("2012/01/01"). + Lte("now"). + Format("yyyy/MM/dd") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"range":{"born":{"format":"yyyy/MM/dd","from":"2012/01/01","include_lower":true,"include_upper":true,"to":"now"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_regexp.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_regexp.go new file mode 100644 index 000000000..ecd9f7fe0 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_regexp.go @@ -0,0 +1,82 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// RegexpQuery allows you to use regular expression term queries. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-regexp-query.html +type RegexpQuery struct { + name string + regexp string + flags string + boost *float64 + rewrite string + queryName string + maxDeterminizedStates *int +} + +// NewRegexpQuery creates and initializes a new RegexpQuery. +func NewRegexpQuery(name string, regexp string) *RegexpQuery { + return &RegexpQuery{name: name, regexp: regexp} +} + +// Flags sets the regexp flags. +func (q *RegexpQuery) Flags(flags string) *RegexpQuery { + q.flags = flags + return q +} + +// MaxDeterminizedStates protects against complex regular expressions. +func (q *RegexpQuery) MaxDeterminizedStates(maxDeterminizedStates int) *RegexpQuery { + q.maxDeterminizedStates = &maxDeterminizedStates + return q +} + +// Boost sets the boost for this query. +func (q *RegexpQuery) Boost(boost float64) *RegexpQuery { + q.boost = &boost + return q +} + +func (q *RegexpQuery) Rewrite(rewrite string) *RegexpQuery { + q.rewrite = rewrite + return q +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *RegexpQuery) QueryName(queryName string) *RegexpQuery { + q.queryName = queryName + return q +} + +// Source returns the JSON-serializable query data. +func (q *RegexpQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["regexp"] = query + + x := make(map[string]interface{}) + x["value"] = q.regexp + if q.flags != "" { + x["flags"] = q.flags + } + if q.maxDeterminizedStates != nil { + x["max_determinized_states"] = *q.maxDeterminizedStates + } + if q.boost != nil { + x["boost"] = *q.boost + } + if q.rewrite != "" { + x["rewrite"] = q.rewrite + } + if q.queryName != "" { + x["name"] = q.queryName + } + query[q.name] = x + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_regexp_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_regexp_test.go new file mode 100644 index 000000000..f4dc2355b --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_regexp_test.go @@ -0,0 +1,47 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestRegexpQuery(t *testing.T) { + q := NewRegexpQuery("name.first", "s.*y") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"regexp":{"name.first":{"value":"s.*y"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestRegexpQueryWithOptions(t *testing.T) { + q := NewRegexpQuery("name.first", "s.*y"). + Boost(1.2). + Flags("INTERSECTION|COMPLEMENT|EMPTY"). + QueryName("my_query_name") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"regexp":{"name.first":{"boost":1.2,"flags":"INTERSECTION|COMPLEMENT|EMPTY","name":"my_query_name","value":"s.*y"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_script.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_script.go new file mode 100644 index 000000000..3baa90574 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_script.go @@ -0,0 +1,51 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// ScriptQuery allows to define scripts as filters. +// +// For details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-script-query.html +type ScriptQuery struct { + script *Script + queryName string +} + +// NewScriptQuery creates and initializes a new ScriptQuery. +func NewScriptQuery(script *Script) *ScriptQuery { + return &ScriptQuery{ + script: script, + } +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *ScriptQuery) QueryName(queryName string) *ScriptQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the query. +func (q *ScriptQuery) Source() (interface{}, error) { + if q.script == nil { + return nil, errors.New("ScriptQuery expected a script") + } + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["script"] = params + + src, err := q.script.Source() + if err != nil { + return nil, err + } + params["script"] = src + + if q.queryName != "" { + params["_name"] = q.queryName + } + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_script_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_script_test.go new file mode 100644 index 000000000..e10510c10 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_script_test.go @@ -0,0 +1,45 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestScriptQuery(t *testing.T) { + q := NewScriptQuery(NewScript("doc['num1'.value > 1")) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"script":{"script":"doc['num1'.value \u003e 1"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestScriptQueryWithParams(t *testing.T) { + q := NewScriptQuery(NewScript("doc['num1'.value > 1")) + q = q.QueryName("MyQueryName") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"script":{"_name":"MyQueryName","script":"doc['num1'.value \u003e 1"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_simple_query_string.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_simple_query_string.go new file mode 100644 index 000000000..fb0a2a9b9 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_simple_query_string.go @@ -0,0 +1,185 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "strings" +) + +// SimpleQueryStringQuery is a query that uses the SimpleQueryParser +// to parse its context. Unlike the regular query_string query, +// the simple_query_string query will never throw an exception, +// and discards invalid parts of the query. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-simple-query-string-query.html +type SimpleQueryStringQuery struct { + queryText string + analyzer string + operator string + fields []string + fieldBoosts map[string]*float64 + minimumShouldMatch string + flags string + boost *float64 + lowercaseExpandedTerms *bool + lenient *bool + analyzeWildcard *bool + locale string + queryName string +} + +// NewSimpleQueryStringQuery creates and initializes a new SimpleQueryStringQuery. +func NewSimpleQueryStringQuery(text string) *SimpleQueryStringQuery { + return &SimpleQueryStringQuery{ + queryText: text, + fields: make([]string, 0), + fieldBoosts: make(map[string]*float64), + } +} + +// Field adds a field to run the query against. +func (q *SimpleQueryStringQuery) Field(field string) *SimpleQueryStringQuery { + q.fields = append(q.fields, field) + return q +} + +// Field adds a field to run the query against with a specific boost. +func (q *SimpleQueryStringQuery) FieldWithBoost(field string, boost float64) *SimpleQueryStringQuery { + q.fields = append(q.fields, field) + q.fieldBoosts[field] = &boost + return q +} + +// Boost sets the boost for this query. +func (q *SimpleQueryStringQuery) Boost(boost float64) *SimpleQueryStringQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched_filters per hit. +func (q *SimpleQueryStringQuery) QueryName(queryName string) *SimpleQueryStringQuery { + q.queryName = queryName + return q +} + +// Analyzer specifies the analyzer to use for the query. +func (q *SimpleQueryStringQuery) Analyzer(analyzer string) *SimpleQueryStringQuery { + q.analyzer = analyzer + return q +} + +// DefaultOperator specifies the default operator for the query. +func (q *SimpleQueryStringQuery) DefaultOperator(defaultOperator string) *SimpleQueryStringQuery { + q.operator = defaultOperator + return q +} + +// Flags sets the flags for the query. +func (q *SimpleQueryStringQuery) Flags(flags string) *SimpleQueryStringQuery { + q.flags = flags + return q +} + +// LowercaseExpandedTerms indicates whether terms of wildcard, prefix, fuzzy +// and range queries are automatically lower-cased or not. Default is true. +func (q *SimpleQueryStringQuery) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *SimpleQueryStringQuery { + q.lowercaseExpandedTerms = &lowercaseExpandedTerms + return q +} + +func (q *SimpleQueryStringQuery) Locale(locale string) *SimpleQueryStringQuery { + q.locale = locale + return q +} + +// Lenient indicates whether the query string parser should be lenient +// when parsing field values. It defaults to the index setting and if not +// set, defaults to false. +func (q *SimpleQueryStringQuery) Lenient(lenient bool) *SimpleQueryStringQuery { + q.lenient = &lenient + return q +} + +// AnalyzeWildcard indicates whether to enabled analysis on wildcard and prefix queries. +func (q *SimpleQueryStringQuery) AnalyzeWildcard(analyzeWildcard bool) *SimpleQueryStringQuery { + q.analyzeWildcard = &analyzeWildcard + return q +} + +func (q *SimpleQueryStringQuery) MinimumShouldMatch(minimumShouldMatch string) *SimpleQueryStringQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +// Source returns JSON for the query. +func (q *SimpleQueryStringQuery) Source() (interface{}, error) { + // { + // "simple_query_string" : { + // "query" : "\"fried eggs\" +(eggplant | potato) -frittata", + // "analyzer" : "snowball", + // "fields" : ["body^5","_all"], + // "default_operator" : "and" + // } + // } + + source := make(map[string]interface{}) + + query := make(map[string]interface{}) + source["simple_query_string"] = query + + query["query"] = q.queryText + + if len(q.fields) > 0 { + fields := make([]string, 0) + for _, field := range q.fields { + if boost, found := q.fieldBoosts[field]; found { + if boost != nil { + fields = append(fields, fmt.Sprintf("%s^%f", field, *boost)) + } else { + fields = append(fields, field) + } + } else { + fields = append(fields, field) + } + } + query["fields"] = fields + } + + if q.flags != "" { + query["flags"] = q.flags + } + if q.analyzer != "" { + query["analyzer"] = q.analyzer + } + if q.operator != "" { + query["default_operator"] = strings.ToLower(q.operator) + } + if q.lowercaseExpandedTerms != nil { + query["lowercase_expanded_terms"] = *q.lowercaseExpandedTerms + } + if q.lenient != nil { + query["lenient"] = *q.lenient + } + if q.analyzeWildcard != nil { + query["analyze_wildcard"] = *q.analyzeWildcard + } + if q.locale != "" { + query["locale"] = q.locale + } + if q.queryName != "" { + query["_name"] = q.queryName + } + if q.minimumShouldMatch != "" { + query["minimum_should_match"] = q.minimumShouldMatch + } + if q.boost != nil { + query["boost"] = *q.boost + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_simple_query_string_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_simple_query_string_test.go new file mode 100644 index 000000000..f6be3e5bd --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_simple_query_string_test.go @@ -0,0 +1,86 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSimpleQueryStringQuery(t *testing.T) { + q := NewSimpleQueryStringQuery(`"fried eggs" +(eggplant | potato) -frittata`) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"simple_query_string":{"query":"\"fried eggs\" +(eggplant | potato) -frittata"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSimpleQueryStringQueryExec(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + query := NewSimpleQueryStringQuery("+Golang +Elasticsearch") + searchResult, err := client.Search(). + Index(testIndexName). + Query(query). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 1 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 1 { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 1, len(searchResult.Hits.Hits)) + } + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_template_query.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_template_query.go new file mode 100644 index 000000000..0611c3ea4 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_template_query.go @@ -0,0 +1,84 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TemplateQuery is a query that accepts a query template and a +// map of key/value pairs to fill in template parameters. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-template-query.html +type TemplateQuery struct { + template string + templateType string + vars map[string]interface{} +} + +// NewTemplateQuery creates and initializes a new TemplateQuery. +func NewTemplateQuery(name string) *TemplateQuery { + return &TemplateQuery{ + template: name, + vars: make(map[string]interface{}), + } +} + +// Template specifies the name of the template. +func (q *TemplateQuery) Template(name string) *TemplateQuery { + q.template = name + return q +} + +// TemplateType defines which kind of query we use. The values can be: +// inline, indexed, or file. If undefined, inline is used. +func (q *TemplateQuery) TemplateType(typ string) *TemplateQuery { + q.templateType = typ + return q +} + +// Var sets a single parameter pair. +func (q *TemplateQuery) Var(name string, value interface{}) *TemplateQuery { + q.vars[name] = value + return q +} + +// Vars sets parameters for the template query. +func (q *TemplateQuery) Vars(vars map[string]interface{}) *TemplateQuery { + q.vars = vars + return q +} + +// Source returns the JSON serializable content for the search. +func (q *TemplateQuery) Source() (interface{}, error) { + // { + // "template" : { + // "query" : {"match_{{template}}": {}}, + // "params" : { + // "template": "all" + // } + // } + // } + + query := make(map[string]interface{}) + + tmpl := make(map[string]interface{}) + query["template"] = tmpl + + // TODO(oe): Implementation differs from online documentation at http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-template-query.html + var fieldname string + switch q.templateType { + case "file": // file + fieldname = "file" + case "indexed", "id": // indexed + fieldname = "id" + default: // inline + fieldname = "query" + } + + tmpl[fieldname] = q.template + if len(q.vars) > 0 { + tmpl["params"] = q.vars + } + + return query, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_template_query_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_template_query_test.go new file mode 100644 index 000000000..8f21ef9f0 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_template_query_test.go @@ -0,0 +1,65 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestTemplateQueryInlineTest(t *testing.T) { + q := NewTemplateQuery("\"match_{{template}}\": {}}\"").Vars(map[string]interface{}{"template": "all"}) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"template":{"params":{"template":"all"},"query":"\"match_{{template}}\": {}}\""}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestTemplateQueryIndexedTest(t *testing.T) { + q := NewTemplateQuery("indexedTemplate"). + TemplateType("id"). + Vars(map[string]interface{}{"template": "all"}) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"template":{"id":"indexedTemplate","params":{"template":"all"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestTemplateQueryFileTest(t *testing.T) { + q := NewTemplateQuery("storedTemplate"). + TemplateType("file"). + Vars(map[string]interface{}{"template": "all"}) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"template":{"file":"storedTemplate","params":{"template":"all"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_term.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_term.go new file mode 100644 index 000000000..c20c5c66e --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_term.go @@ -0,0 +1,58 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TermQuery finds documents that contain the exact term specified +// in the inverted index. +// +// For details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-term-query.html +type TermQuery struct { + name string + value interface{} + boost *float64 + queryName string +} + +// NewTermQuery creates and initializes a new TermQuery. +func NewTermQuery(name string, value interface{}) *TermQuery { + return &TermQuery{name: name, value: value} +} + +// Boost sets the boost for this query. +func (q *TermQuery) Boost(boost float64) *TermQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *TermQuery) QueryName(queryName string) *TermQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the query. +func (q *TermQuery) Source() (interface{}, error) { + // {"term":{"name":"value"}} + source := make(map[string]interface{}) + tq := make(map[string]interface{}) + source["term"] = tq + + if q.boost == nil && q.queryName == "" { + tq[q.name] = q.value + } else { + subQ := make(map[string]interface{}) + subQ["value"] = q.value + if q.boost != nil { + subQ["boost"] = *q.boost + } + if q.queryName != "" { + subQ["_name"] = q.queryName + } + tq[q.name] = subQ + } + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_term_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_term_test.go new file mode 100644 index 000000000..17c8c9848 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_term_test.go @@ -0,0 +1,46 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestTermQuery(t *testing.T) { + q := NewTermQuery("user", "ki") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"term":{"user":"ki"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestTermQueryWithOptions(t *testing.T) { + q := NewTermQuery("user", "ki") + q = q.Boost(2.79) + q = q.QueryName("my_tq") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"term":{"user":{"_name":"my_tq","boost":2.79,"value":"ki"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_terms.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_terms.go new file mode 100644 index 000000000..a7e158859 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_terms.go @@ -0,0 +1,58 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TermsQuery filters documents that have fields that match any +// of the provided terms (not analyzed). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-terms-query.html +type TermsQuery struct { + name string + values []interface{} + queryName string + boost *float64 +} + +// NewTermsQuery creates and initializes a new TermsQuery. +func NewTermsQuery(name string, values ...interface{}) *TermsQuery { + q := &TermsQuery{ + name: name, + values: make([]interface{}, 0), + } + if len(values) > 0 { + q.values = append(q.values, values...) + } + return q +} + +// Boost sets the boost for this query. +func (q *TermsQuery) Boost(boost float64) *TermsQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *TermsQuery) QueryName(queryName string) *TermsQuery { + q.queryName = queryName + return q +} + +// Creates the query source for the term query. +func (q *TermsQuery) Source() (interface{}, error) { + // {"terms":{"name":["value1","value2"]}} + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["terms"] = params + params[q.name] = q.values + if q.boost != nil { + params["boost"] = *q.boost + } + if q.queryName != "" { + params["_name"] = q.queryName + } + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_terms_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_terms_test.go new file mode 100644 index 000000000..6de743d14 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_terms_test.go @@ -0,0 +1,46 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestTermsQuery(t *testing.T) { + q := NewTermsQuery("user", "ki") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"terms":{"user":["ki"]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestTermQuerysWithOptions(t *testing.T) { + q := NewTermsQuery("user", "ki", "ko") + q = q.Boost(2.79) + q = q.QueryName("my_tq") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"terms":{"_name":"my_tq","boost":2.79,"user":["ki","ko"]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_type.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_type.go new file mode 100644 index 000000000..884d4ae7b --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_type.go @@ -0,0 +1,26 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TypeQuery filters documents matching the provided document / mapping type. +// +// For details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-type-query.html +type TypeQuery struct { + typ string +} + +func NewTypeQuery(typ string) *TypeQuery { + return &TypeQuery{typ: typ} +} + +// Source returns JSON for the query. +func (q *TypeQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["type"] = params + params["value"] = q.typ + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_type_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_type_test.go new file mode 100644 index 000000000..bde0ed3d3 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_type_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestTypeQuery(t *testing.T) { + q := NewTypeQuery("my_type") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"type":{"value":"my_type"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_wildcard.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_wildcard.go new file mode 100644 index 000000000..127332da3 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_wildcard.go @@ -0,0 +1,81 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// WildcardQuery matches documents that have fields matching a wildcard +// expression (not analyzed). Supported wildcards are *, which matches +// any character sequence (including the empty one), and ?, which matches +// any single character. Note this query can be slow, as it needs to iterate +// over many terms. In order to prevent extremely slow wildcard queries, +// a wildcard term should not start with one of the wildcards * or ?. +// The wildcard query maps to Lucene WildcardQuery. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-wildcard-query.html +type WildcardQuery struct { + name string + wildcard string + boost *float64 + rewrite string + queryName string +} + +// NewWildcardQuery creates and initializes a new WildcardQuery. +func NewWildcardQuery(name, wildcard string) *WildcardQuery { + return &WildcardQuery{ + name: name, + wildcard: wildcard, + } +} + +// Boost sets the boost for this query. +func (q *WildcardQuery) Boost(boost float64) *WildcardQuery { + q.boost = &boost + return q +} + +func (q *WildcardQuery) Rewrite(rewrite string) *WildcardQuery { + q.rewrite = rewrite + return q +} + +// QueryName sets the name of this query. +func (q *WildcardQuery) QueryName(queryName string) *WildcardQuery { + q.queryName = queryName + return q +} + +// Source returns the JSON serializable body of this query. +func (q *WildcardQuery) Source() (interface{}, error) { + // { + // "wildcard" : { + // "user" : { + // "wildcard" : "ki*y", + // "boost" : 1.0 + // } + // } + + source := make(map[string]interface{}) + + query := make(map[string]interface{}) + source["wildcard"] = query + + wq := make(map[string]interface{}) + query[q.name] = wq + + wq["wildcard"] = q.wildcard + + if q.boost != nil { + wq["boost"] = *q.boost + } + if q.rewrite != "" { + wq["rewrite"] = q.rewrite + } + if q.queryName != "" { + wq["_name"] = q.queryName + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_wildcard_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_wildcard_test.go new file mode 100644 index 000000000..5cd529aff --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_wildcard_test.go @@ -0,0 +1,67 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic_test + +import ( + "encoding/json" + "testing" + + "gopkg.in/olivere/elastic.v3" +) + +func ExampleWildcardQuery() { + // Get a client to the local Elasticsearch instance. + client, err := elastic.NewClient() + if err != nil { + // Handle error + panic(err) + } + + // Define wildcard query + q := elastic.NewWildcardQuery("user", "oli*er?").Boost(1.2) + searchResult, err := client.Search(). + Index("twitter"). // search in index "twitter" + Query(q). // use wildcard query defined above + Do() // execute + if err != nil { + // Handle error + panic(err) + } + _ = searchResult +} + +func TestWildcardQuery(t *testing.T) { + q := elastic.NewWildcardQuery("user", "ki*y??") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"wildcard":{"user":{"wildcard":"ki*y??"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestWildcardQueryWithBoost(t *testing.T) { + q := elastic.NewWildcardQuery("user", "ki*y??").Boost(1.2) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"wildcard":{"user":{"boost":1.2,"wildcard":"ki*y??"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_request.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_request.go new file mode 100644 index 000000000..5fb476dd1 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_request.go @@ -0,0 +1,153 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "strings" +) + +// SearchRequest combines a search request and its +// query details (see SearchSource). +// It is used in combination with MultiSearch. +type SearchRequest struct { + searchType string // default in ES is "query_then_fetch" + indices []string + types []string + routing *string + preference *string + source interface{} +} + +// NewSearchRequest creates a new search request. +func NewSearchRequest() *SearchRequest { + return &SearchRequest{ + indices: make([]string, 0), + types: make([]string, 0), + } +} + +// SearchRequest must be one of "query_then_fetch", "query_and_fetch", +// "scan", "count", "dfs_query_then_fetch", or "dfs_query_and_fetch". +// Use one of the constants defined via SearchType. +func (r *SearchRequest) SearchType(searchType string) *SearchRequest { + r.searchType = searchType + return r +} + +func (r *SearchRequest) SearchTypeDfsQueryThenFetch() *SearchRequest { + return r.SearchType("dfs_query_then_fetch") +} + +func (r *SearchRequest) SearchTypeDfsQueryAndFetch() *SearchRequest { + return r.SearchType("dfs_query_and_fetch") +} + +func (r *SearchRequest) SearchTypeQueryThenFetch() *SearchRequest { + return r.SearchType("query_then_fetch") +} + +func (r *SearchRequest) SearchTypeQueryAndFetch() *SearchRequest { + return r.SearchType("query_and_fetch") +} + +func (r *SearchRequest) SearchTypeScan() *SearchRequest { + return r.SearchType("scan") +} + +func (r *SearchRequest) SearchTypeCount() *SearchRequest { + return r.SearchType("count") +} + +func (r *SearchRequest) Index(indices ...string) *SearchRequest { + r.indices = append(r.indices, indices...) + return r +} + +func (r *SearchRequest) HasIndices() bool { + return len(r.indices) > 0 +} + +func (r *SearchRequest) Type(types ...string) *SearchRequest { + r.types = append(r.types, types...) + return r +} + +func (r *SearchRequest) Routing(routing string) *SearchRequest { + r.routing = &routing + return r +} + +func (r *SearchRequest) Routings(routings ...string) *SearchRequest { + if routings != nil { + routings := strings.Join(routings, ",") + r.routing = &routings + } else { + r.routing = nil + } + return r +} + +func (r *SearchRequest) Preference(preference string) *SearchRequest { + r.preference = &preference + return r +} + +func (r *SearchRequest) Source(source interface{}) *SearchRequest { + switch v := source.(type) { + case *SearchSource: + src, err := v.Source() + if err != nil { + // Do not do anything in case of an error + return r + } + r.source = src + default: + r.source = source + } + return r +} + +// header is used by MultiSearch to get information about the search header +// of one SearchRequest. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-multi-search.html +func (r *SearchRequest) header() interface{} { + h := make(map[string]interface{}) + if r.searchType != "" { + h["search_type"] = r.searchType + } + + switch len(r.indices) { + case 0: + case 1: + h["index"] = r.indices[0] + default: + h["indices"] = r.indices + } + + switch len(r.types) { + case 0: + case 1: + h["types"] = r.types[0] + default: + h["type"] = r.types + } + + if r.routing != nil && *r.routing != "" { + h["routing"] = *r.routing + } + + if r.preference != nil && *r.preference != "" { + h["preference"] = *r.preference + } + + return h +} + +// bidy is used by MultiSearch to get information about the search body +// of one SearchRequest. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-multi-search.html +func (r *SearchRequest) body() interface{} { + return r.source +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_request_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_request_test.go new file mode 100644 index 000000000..c672b0705 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_request_test.go @@ -0,0 +1,48 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + _ "net/http" + "testing" +) + +func TestSearchRequestIndex(t *testing.T) { + builder := NewSearchRequest().Index("test") + data, err := json.Marshal(builder.header()) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"index":"test"}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchRequestIndices(t *testing.T) { + builder := NewSearchRequest().Index("test", "test2") + data, err := json.Marshal(builder.header()) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"indices":["test","test2"]}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchRequestHasIndices(t *testing.T) { + builder := NewSearchRequest() + if builder.HasIndices() { + t.Errorf("expected HasIndices to return true; got %v", builder.HasIndices()) + } + builder = builder.Index("test", "test2") + if !builder.HasIndices() { + t.Errorf("expected HasIndices to return false; got %v", builder.HasIndices()) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_source.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_source.go new file mode 100644 index 000000000..59c9fec67 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_source.go @@ -0,0 +1,511 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" +) + +// SearchSource enables users to build the search source. +// It resembles the SearchSourceBuilder in Elasticsearch. +type SearchSource struct { + query Query + postQuery Query + from int + size int + explain *bool + version *bool + sorts []SortInfo + sorters []Sorter + trackScores bool + minScore *float64 + timeout string + terminateAfter *int + fieldNames []string + fieldDataFields []string + scriptFields []*ScriptField + fetchSourceContext *FetchSourceContext + aggregations map[string]Aggregation + highlight *Highlight + globalSuggestText string + suggesters []Suggester + rescores []*Rescore + defaultRescoreWindowSize *int + indexBoosts map[string]float64 + stats []string + innerHits map[string]*InnerHit +} + +// NewSearchSource initializes a new SearchSource. +func NewSearchSource() *SearchSource { + return &SearchSource{ + from: -1, + size: -1, + trackScores: false, + sorts: make([]SortInfo, 0), + sorters: make([]Sorter, 0), + fieldDataFields: make([]string, 0), + scriptFields: make([]*ScriptField, 0), + aggregations: make(map[string]Aggregation), + rescores: make([]*Rescore, 0), + indexBoosts: make(map[string]float64), + stats: make([]string, 0), + innerHits: make(map[string]*InnerHit), + } +} + +// Query sets the query to use with this search source. +func (s *SearchSource) Query(query Query) *SearchSource { + s.query = query + return s +} + +// PostFilter will be executed after the query has been executed and +// only affects the search hits, not the aggregations. +// This filter is always executed as the last filtering mechanism. +func (s *SearchSource) PostFilter(postFilter Query) *SearchSource { + s.postQuery = postFilter + return s +} + +// From index to start the search from. Defaults to 0. +func (s *SearchSource) From(from int) *SearchSource { + s.from = from + return s +} + +// Size is the number of search hits to return. Defaults to 10. +func (s *SearchSource) Size(size int) *SearchSource { + s.size = size + return s +} + +// MinScore sets the minimum score below which docs will be filtered out. +func (s *SearchSource) MinScore(minScore float64) *SearchSource { + s.minScore = &minScore + return s +} + +// Explain indicates whether each search hit should be returned with +// an explanation of the hit (ranking). +func (s *SearchSource) Explain(explain bool) *SearchSource { + s.explain = &explain + return s +} + +// Version indicates whether each search hit should be returned with +// a version associated to it. +func (s *SearchSource) Version(version bool) *SearchSource { + s.version = &version + return s +} + +// Timeout controls how long a search is allowed to take, e.g. "1s" or "500ms". +func (s *SearchSource) Timeout(timeout string) *SearchSource { + s.timeout = timeout + return s +} + +// TimeoutInMillis controls how many milliseconds a search is allowed +// to take before it is canceled. +func (s *SearchSource) TimeoutInMillis(timeoutInMillis int) *SearchSource { + s.timeout = fmt.Sprintf("%dms", timeoutInMillis) + return s +} + +// TerminateAfter allows the request to stop after the given number +// of search hits are collected. +func (s *SearchSource) TerminateAfter(terminateAfter int) *SearchSource { + s.terminateAfter = &terminateAfter + return s +} + +// Sort adds a sort order. +func (s *SearchSource) Sort(field string, ascending bool) *SearchSource { + s.sorts = append(s.sorts, SortInfo{Field: field, Ascending: ascending}) + return s +} + +// SortWithInfo adds a sort order. +func (s *SearchSource) SortWithInfo(info SortInfo) *SearchSource { + s.sorts = append(s.sorts, info) + return s +} + +// SortBy adds a sort order. +func (s *SearchSource) SortBy(sorter ...Sorter) *SearchSource { + s.sorters = append(s.sorters, sorter...) + return s +} + +func (s *SearchSource) hasSort() bool { + return len(s.sorts) > 0 || len(s.sorters) > 0 +} + +// TrackScores is applied when sorting and controls if scores will be +// tracked as well. Defaults to false. +func (s *SearchSource) TrackScores(trackScores bool) *SearchSource { + s.trackScores = trackScores + return s +} + +// Aggregation adds an aggreation to perform as part of the search. +func (s *SearchSource) Aggregation(name string, aggregation Aggregation) *SearchSource { + s.aggregations[name] = aggregation + return s +} + +// DefaultRescoreWindowSize sets the rescore window size for rescores +// that don't specify their window. +func (s *SearchSource) DefaultRescoreWindowSize(defaultRescoreWindowSize int) *SearchSource { + s.defaultRescoreWindowSize = &defaultRescoreWindowSize + return s +} + +// Highlight adds highlighting to the search. +func (s *SearchSource) Highlight(highlight *Highlight) *SearchSource { + s.highlight = highlight + return s +} + +// Highlighter returns the highlighter. +func (s *SearchSource) Highlighter() *Highlight { + if s.highlight == nil { + s.highlight = NewHighlight() + } + return s.highlight +} + +// GlobalSuggestText defines the global text to use with all suggesters. +// This avoids repetition. +func (s *SearchSource) GlobalSuggestText(text string) *SearchSource { + s.globalSuggestText = text + return s +} + +// Suggester adds a suggester to the search. +func (s *SearchSource) Suggester(suggester Suggester) *SearchSource { + s.suggesters = append(s.suggesters, suggester) + return s +} + +// Rescorer adds a rescorer to the search. +func (s *SearchSource) Rescorer(rescore *Rescore) *SearchSource { + s.rescores = append(s.rescores, rescore) + return s +} + +// ClearRescorers removes all rescorers from the search. +func (s *SearchSource) ClearRescorers() *SearchSource { + s.rescores = make([]*Rescore, 0) + return s +} + +// FetchSource indicates whether the response should contain the stored +// _source for every hit. +func (s *SearchSource) FetchSource(fetchSource bool) *SearchSource { + if s.fetchSourceContext == nil { + s.fetchSourceContext = NewFetchSourceContext(fetchSource) + } else { + s.fetchSourceContext.SetFetchSource(fetchSource) + } + return s +} + +// FetchSourceContext indicates how the _source should be fetched. +func (s *SearchSource) FetchSourceContext(fetchSourceContext *FetchSourceContext) *SearchSource { + s.fetchSourceContext = fetchSourceContext + return s +} + +// NoFields indicates that no fields should be loaded, resulting in only +// id and type to be returned per field. +func (s *SearchSource) NoFields() *SearchSource { + s.fieldNames = make([]string, 0) + return s +} + +// Field adds a single field to load and return (note, must be stored) as +// part of the search request. If none are specified, the source of the +// document will be returned. +func (s *SearchSource) Field(fieldName string) *SearchSource { + if s.fieldNames == nil { + s.fieldNames = make([]string, 0) + } + s.fieldNames = append(s.fieldNames, fieldName) + return s +} + +// Fields sets the fields to load and return as part of the search request. +// If none are specified, the source of the document will be returned. +func (s *SearchSource) Fields(fieldNames ...string) *SearchSource { + if s.fieldNames == nil { + s.fieldNames = make([]string, 0) + } + s.fieldNames = append(s.fieldNames, fieldNames...) + return s +} + +// FieldDataField adds a single field to load from the field data cache +// and return as part of the search request. +func (s *SearchSource) FieldDataField(fieldDataField string) *SearchSource { + s.fieldDataFields = append(s.fieldDataFields, fieldDataField) + return s +} + +// FieldDataFields adds one or more fields to load from the field data cache +// and return as part of the search request. +func (s *SearchSource) FieldDataFields(fieldDataFields ...string) *SearchSource { + s.fieldDataFields = append(s.fieldDataFields, fieldDataFields...) + return s +} + +// ScriptField adds a single script field with the provided script. +func (s *SearchSource) ScriptField(scriptField *ScriptField) *SearchSource { + s.scriptFields = append(s.scriptFields, scriptField) + return s +} + +// ScriptFields adds one or more script fields with the provided scripts. +func (s *SearchSource) ScriptFields(scriptFields ...*ScriptField) *SearchSource { + s.scriptFields = append(s.scriptFields, scriptFields...) + return s +} + +// IndexBoost sets the boost that a specific index will receive when the +// query is executed against it. +func (s *SearchSource) IndexBoost(index string, boost float64) *SearchSource { + s.indexBoosts[index] = boost + return s +} + +// Stats group this request will be aggregated under. +func (s *SearchSource) Stats(statsGroup ...string) *SearchSource { + s.stats = append(s.stats, statsGroup...) + return s +} + +// InnerHit adds an inner hit to return with the result. +func (s *SearchSource) InnerHit(name string, innerHit *InnerHit) *SearchSource { + s.innerHits[name] = innerHit + return s +} + +// Source returns the serializable JSON for the source builder. +func (s *SearchSource) Source() (interface{}, error) { + source := make(map[string]interface{}) + + if s.from != -1 { + source["from"] = s.from + } + if s.size != -1 { + source["size"] = s.size + } + if s.timeout != "" { + source["timeout"] = s.timeout + } + if s.terminateAfter != nil { + source["terminate_after"] = *s.terminateAfter + } + if s.query != nil { + src, err := s.query.Source() + if err != nil { + return nil, err + } + source["query"] = src + } + if s.postQuery != nil { + src, err := s.postQuery.Source() + if err != nil { + return nil, err + } + source["post_filter"] = src + } + if s.minScore != nil { + source["min_score"] = *s.minScore + } + if s.version != nil { + source["version"] = *s.version + } + if s.explain != nil { + source["explain"] = *s.explain + } + if s.fetchSourceContext != nil { + src, err := s.fetchSourceContext.Source() + if err != nil { + return nil, err + } + source["_source"] = src + } + + if s.fieldNames != nil { + switch len(s.fieldNames) { + case 1: + source["fields"] = s.fieldNames[0] + default: + source["fields"] = s.fieldNames + } + } + + if len(s.fieldDataFields) > 0 { + source["fielddata_fields"] = s.fieldDataFields + } + + if len(s.scriptFields) > 0 { + sfmap := make(map[string]interface{}) + for _, scriptField := range s.scriptFields { + src, err := scriptField.Source() + if err != nil { + return nil, err + } + sfmap[scriptField.FieldName] = src + } + source["script_fields"] = sfmap + } + + if len(s.sorters) > 0 { + sortarr := make([]interface{}, 0) + for _, sorter := range s.sorters { + src, err := sorter.Source() + if err != nil { + return nil, err + } + sortarr = append(sortarr, src) + } + source["sort"] = sortarr + } else if len(s.sorts) > 0 { + sortarr := make([]interface{}, 0) + for _, sort := range s.sorts { + src, err := sort.Source() + if err != nil { + return nil, err + } + sortarr = append(sortarr, src) + } + source["sort"] = sortarr + } + + if s.trackScores { + source["track_scores"] = s.trackScores + } + + if len(s.indexBoosts) > 0 { + source["indices_boost"] = s.indexBoosts + } + + if len(s.aggregations) > 0 { + aggsMap := make(map[string]interface{}) + for name, aggregate := range s.aggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + source["aggregations"] = aggsMap + } + + if s.highlight != nil { + src, err := s.highlight.Source() + if err != nil { + return nil, err + } + source["highlight"] = src + } + + if len(s.suggesters) > 0 { + suggesters := make(map[string]interface{}) + for _, s := range s.suggesters { + src, err := s.Source(false) + if err != nil { + return nil, err + } + suggesters[s.Name()] = src + } + if s.globalSuggestText != "" { + suggesters["text"] = s.globalSuggestText + } + source["suggest"] = suggesters + } + + if len(s.rescores) > 0 { + // Strip empty rescores from request + rescores := make([]*Rescore, 0) + for _, r := range s.rescores { + if !r.IsEmpty() { + rescores = append(rescores, r) + } + } + + if len(rescores) == 1 { + rescores[0].defaultRescoreWindowSize = s.defaultRescoreWindowSize + src, err := rescores[0].Source() + if err != nil { + return nil, err + } + source["rescore"] = src + } else { + slice := make([]interface{}, 0) + for _, r := range rescores { + r.defaultRescoreWindowSize = s.defaultRescoreWindowSize + src, err := r.Source() + if err != nil { + return nil, err + } + slice = append(slice, src) + } + source["rescore"] = slice + } + } + + if len(s.stats) > 0 { + source["stats"] = s.stats + } + + if len(s.innerHits) > 0 { + // Top-level inner hits + // See http://www.elastic.co/guide/en/elasticsearch/reference/1.5/search-request-inner-hits.html#top-level-inner-hits + // "inner_hits": { + // "": { + // "": { + // "": { + // , + // [,"inner_hits" : { []+ } ]? + // } + // } + // }, + // [,"" : { ... } ]* + // } + m := make(map[string]interface{}) + for name, hit := range s.innerHits { + if hit.path != "" { + src, err := hit.Source() + if err != nil { + return nil, err + } + path := make(map[string]interface{}) + path[hit.path] = src + m[name] = map[string]interface{}{ + "path": path, + } + } else if hit.typ != "" { + src, err := hit.Source() + if err != nil { + return nil, err + } + typ := make(map[string]interface{}) + typ[hit.typ] = src + m[name] = map[string]interface{}{ + "type": typ, + } + } else { + // TODO the Java client throws here, because either path or typ must be specified + } + } + source["inner_hits"] = m + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_source_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_source_test.go new file mode 100644 index 000000000..b5ddf61af --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_source_test.go @@ -0,0 +1,238 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSearchSourceMatchAllQuery(t *testing.T) { + matchAllQ := NewMatchAllQuery() + builder := NewSearchSource().Query(matchAllQ) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceNoFields(t *testing.T) { + matchAllQ := NewMatchAllQuery() + builder := NewSearchSource().Query(matchAllQ).NoFields() + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"fields":[],"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceFields(t *testing.T) { + matchAllQ := NewMatchAllQuery() + builder := NewSearchSource().Query(matchAllQ).Fields("message", "tags") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"fields":["message","tags"],"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceFetchSourceDisabled(t *testing.T) { + matchAllQ := NewMatchAllQuery() + builder := NewSearchSource().Query(matchAllQ).FetchSource(false) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"_source":false,"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceFetchSourceByWildcards(t *testing.T) { + matchAllQ := NewMatchAllQuery() + fsc := NewFetchSourceContext(true).Include("obj1.*", "obj2.*").Exclude("*.description") + builder := NewSearchSource().Query(matchAllQ).FetchSourceContext(fsc) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"_source":{"excludes":["*.description"],"includes":["obj1.*","obj2.*"]},"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceFieldDataFields(t *testing.T) { + matchAllQ := NewMatchAllQuery() + builder := NewSearchSource().Query(matchAllQ).FieldDataFields("test1", "test2") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"fielddata_fields":["test1","test2"],"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceScriptFields(t *testing.T) { + matchAllQ := NewMatchAllQuery() + sf1 := NewScriptField("test1", NewScript("doc['my_field_name'].value * 2")) + sf2 := NewScriptField("test2", NewScript("doc['my_field_name'].value * factor").Param("factor", 3.1415927)) + builder := NewSearchSource().Query(matchAllQ).ScriptFields(sf1, sf2) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"query":{"match_all":{}},"script_fields":{"test1":{"script":"doc['my_field_name'].value * 2"},"test2":{"script":{"inline":"doc['my_field_name'].value * factor","params":{"factor":3.1415927}}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourcePostFilter(t *testing.T) { + matchAllQ := NewMatchAllQuery() + pf := NewTermQuery("tag", "important") + builder := NewSearchSource().Query(matchAllQ).PostFilter(pf) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"post_filter":{"term":{"tag":"important"}},"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceHighlight(t *testing.T) { + matchAllQ := NewMatchAllQuery() + hl := NewHighlight().Field("content") + builder := NewSearchSource().Query(matchAllQ).Highlight(hl) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"highlight":{"fields":{"content":{}}},"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceRescoring(t *testing.T) { + matchAllQ := NewMatchAllQuery() + rescorerQuery := NewMatchQuery("field1", "the quick brown fox").Type("phrase").Slop(2) + rescorer := NewQueryRescorer(rescorerQuery) + rescorer = rescorer.QueryWeight(0.7) + rescorer = rescorer.RescoreQueryWeight(1.2) + rescore := NewRescore().WindowSize(50).Rescorer(rescorer) + builder := NewSearchSource().Query(matchAllQ).Rescorer(rescore) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"query":{"match_all":{}},"rescore":{"query":{"query_weight":0.7,"rescore_query":{"match":{"field1":{"query":"the quick brown fox","slop":2,"type":"phrase"}}},"rescore_query_weight":1.2},"window_size":50}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceIndexBoost(t *testing.T) { + matchAllQ := NewMatchAllQuery() + builder := NewSearchSource().Query(matchAllQ).IndexBoost("index1", 1.4).IndexBoost("index2", 1.3) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"indices_boost":{"index1":1.4,"index2":1.3},"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceInnerHits(t *testing.T) { + matchAllQ := NewMatchAllQuery() + builder := NewSearchSource().Query(matchAllQ). + InnerHit("comments", NewInnerHit().Type("comment").Query(NewMatchQuery("user", "olivere"))). + InnerHit("views", NewInnerHit().Path("view")) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"inner_hits":{"comments":{"type":{"comment":{"query":{"match":{"user":{"query":"olivere"}}}}}},"views":{"path":{"view":{}}}},"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_suggester_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_suggester_test.go new file mode 100644 index 000000000..02c552af2 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_suggester_test.go @@ -0,0 +1,259 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + _ "encoding/json" + _ "net/http" + "testing" +) + +func TestTermSuggester(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + all := NewMatchAllQuery() + + tsName := "my-suggestions" + ts := NewTermSuggester(tsName) + ts = ts.Text("Goolang") + ts = ts.Field("message") + + searchResult, err := client.Search(). + Index(testIndexName). + Query(all). + Suggester(ts). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Suggest == nil { + t.Errorf("expected SearchResult.Suggest != nil; got nil") + } + mySuggestions, found := searchResult.Suggest[tsName] + if !found { + t.Errorf("expected to find SearchResult.Suggest[%s]; got false", tsName) + } + if mySuggestions == nil { + t.Errorf("expected SearchResult.Suggest[%s] != nil; got nil", tsName) + } + + if len(mySuggestions) != 1 { + t.Errorf("expected 1 suggestion; got %d", len(mySuggestions)) + } + mySuggestion := mySuggestions[0] + if mySuggestion.Text != "goolang" { + t.Errorf("expected Text = 'goolang'; got %s", mySuggestion.Text) + } + if mySuggestion.Offset != 0 { + t.Errorf("expected Offset = %d; got %d", 0, mySuggestion.Offset) + } + if mySuggestion.Length != 7 { + t.Errorf("expected Length = %d; got %d", 7, mySuggestion.Length) + } + if len(mySuggestion.Options) != 1 { + t.Errorf("expected 1 option; got %d", len(mySuggestion.Options)) + } + myOption := mySuggestion.Options[0] + if myOption.Text != "golang" { + t.Errorf("expected Text = 'golang'; got %s", myOption.Text) + } + if myOption.Score == float64(0.0) { + t.Errorf("expected Score != 0.0; got %v", myOption.Score) + } + if myOption.Freq == 0 { + t.Errorf("expected Freq != 0; got %v", myOption.Freq) + } +} + +func TestPhraseSuggester(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + all := NewMatchAllQuery() + + phraseSuggesterName := "my-suggestions" + ps := NewPhraseSuggester(phraseSuggesterName) + ps = ps.Text("Goolang") + ps = ps.Field("message") + + searchResult, err := client.Search(). + Index(testIndexName). + Query(all). + Suggester(ps). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Suggest == nil { + t.Errorf("expected SearchResult.Suggest != nil; got nil") + } + mySuggestions, found := searchResult.Suggest[phraseSuggesterName] + if !found { + t.Errorf("expected to find SearchResult.Suggest[%s]; got false", phraseSuggesterName) + } + if mySuggestions == nil { + t.Errorf("expected SearchResult.Suggest[%s] != nil; got nil", phraseSuggesterName) + } + + if len(mySuggestions) != 1 { + t.Errorf("expected 1 suggestion; got %d", len(mySuggestions)) + } + mySuggestion := mySuggestions[0] + if mySuggestion.Text != "Goolang" { + t.Errorf("expected Text = 'Goolang'; got %s", mySuggestion.Text) + } + if mySuggestion.Offset != 0 { + t.Errorf("expected Offset = %d; got %d", 0, mySuggestion.Offset) + } + if mySuggestion.Length != 7 { + t.Errorf("expected Length = %d; got %d", 7, mySuggestion.Length) + } + /* + if len(mySuggestion.Options) != 1 { + t.Errorf("expected 1 option; got %d", len(mySuggestion.Options)) + } + myOption := mySuggestion.Options[0] + if myOption.Text != "golang" { + t.Errorf("expected Text = 'golang'; got %s", myOption.Text) + } + if myOption.Score == float64(0.0) { + t.Errorf("expected Score != 0.0; got %v", myOption.Score) + } + */ +} + +// TODO(oe): I get a "Completion suggester not supported" exception on 0.90.2?! +/* +func TestCompletionSuggester(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + all := NewMatchAllQuery() + + suggesterName := "my-suggestions" + cs := NewCompletionSuggester(suggesterName) + cs = cs.Text("Goolang") + cs = cs.Field("message") + + searchResult, err := client.Search(). + Index(testIndexName). + Query(&all). + Suggester(cs). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Suggest == nil { + t.Errorf("expected SearchResult.Suggest != nil; got nil") + } + mySuggestions, found := searchResult.Suggest[suggesterName] + if !found { + t.Errorf("expected to find SearchResult.Suggest[%s]; got false") + } + if mySuggestions == nil { + t.Errorf("expected SearchResult.Suggest[%s] != nil; got nil", suggesterName) + } + + if len(mySuggestions) != 1 { + t.Errorf("expected 1 suggestion; got %d", len(mySuggestions)) + } + mySuggestion := mySuggestions[0] + if mySuggestion.Text != "Goolang" { + t.Errorf("expected Text = 'Goolang'; got %s", mySuggestion.Text) + } + if mySuggestion.Offset != 0 { + t.Errorf("expected Offset = %d; got %d", 0, mySuggestion.Offset) + } + if mySuggestion.Length != 7 { + t.Errorf("expected Length = %d; got %d", 7, mySuggestion.Length) + } + if len(mySuggestion.Options) != 1 { + t.Errorf("expected 1 option; got %d", len(mySuggestion.Options)) + } + myOption := mySuggestion.Options[0] + if myOption.Text != "golang" { + t.Errorf("expected Text = 'golang'; got %s", myOption.Text) + } + if myOption.Score == float64(0.0) { + t.Errorf("expected Score != 0.0; got %v", myOption.Score) + } +} +//*/ diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_template.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_template.go new file mode 100644 index 000000000..229a2712b --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_template.go @@ -0,0 +1,152 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// PutTemplateService creates or updates a search template. +// The documentation can be found at +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html. +type PutTemplateService struct { + client *Client + pretty bool + id string + opType string + version *int + versionType string + bodyJson interface{} + bodyString string +} + +// NewPutTemplateService creates a new PutTemplateService. +func NewPutTemplateService(client *Client) *PutTemplateService { + return &PutTemplateService{ + client: client, + } +} + +// Id is the template ID. +func (s *PutTemplateService) Id(id string) *PutTemplateService { + s.id = id + return s +} + +// OpType is an explicit operation type. +func (s *PutTemplateService) OpType(opType string) *PutTemplateService { + s.opType = opType + return s +} + +// Version is an explicit version number for concurrency control. +func (s *PutTemplateService) Version(version int) *PutTemplateService { + s.version = &version + return s +} + +// VersionType is a specific version type. +func (s *PutTemplateService) VersionType(versionType string) *PutTemplateService { + s.versionType = versionType + return s +} + +// BodyJson is the document as a JSON serializable object. +func (s *PutTemplateService) BodyJson(body interface{}) *PutTemplateService { + s.bodyJson = body + return s +} + +// BodyString is the document as a string. +func (s *PutTemplateService) BodyString(body string) *PutTemplateService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *PutTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{ + "id": s.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.version != nil { + params.Set("version", fmt.Sprintf("%d", *s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + if s.opType != "" { + params.Set("op_type", s.opType) + } + + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *PutTemplateService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *PutTemplateService) Do() (*PutTemplateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest("PUT", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(PutTemplateResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// PutTemplateResponse is the response of PutTemplateService.Do. +type PutTemplateResponse struct { + Id string `json:"_id"` + Version int `json:"_version"` + Created bool `json:"created"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_templates_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_templates_test.go new file mode 100644 index 000000000..3f8bbcb65 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_templates_test.go @@ -0,0 +1,98 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestSearchTemplatesLifecycle(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Template + tmpl := `{"template":{"query":{"match":{"title":"{{query_string}}"}}}}` + + // Create template + cresp, err := client.PutTemplate().Id("elastic-test").BodyString(tmpl).Do() + if err != nil { + t.Fatal(err) + } + if cresp == nil { + t.Fatalf("expected response != nil; got: %v", cresp) + } + if !cresp.Created { + t.Errorf("expected created = %v; got: %v", true, cresp.Created) + } + + // Get template + resp, err := client.GetTemplate().Id("elastic-test").Do() + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatalf("expected response != nil; got: %v", resp) + } + if resp.Template == "" { + t.Errorf("expected template != %q; got: %q", "", resp.Template) + } + + // Delete template + dresp, err := client.DeleteTemplate().Id("elastic-test").Do() + if err != nil { + t.Fatal(err) + } + if dresp == nil { + t.Fatalf("expected response != nil; got: %v", dresp) + } + if !dresp.Found { + t.Fatalf("expected found = %v; got: %v", true, dresp.Found) + } +} + +func TestSearchTemplatesInlineQuery(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Run query with (inline) search template + // See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-template-query.html + tq := NewTemplateQuery(`{"match_{{template}}": {}}`).Var("template", "all") + resp, err := client.Search(testIndexName).Query(tq).Do() + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatalf("expected response != nil; got: %v", resp) + } + if resp.Hits == nil { + t.Fatalf("expected response hits != nil; got: %v", resp.Hits) + } + if resp.Hits.TotalHits != 3 { + t.Fatalf("expected 3 hits; got: %d", resp.Hits.TotalHits) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_test.go new file mode 100644 index 000000000..43a6695ff --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/search_test.go @@ -0,0 +1,885 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + _ "net/http" + "reflect" + "testing" + "time" +) + +func TestSearchMatchAll(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + // Match all should return all documents + searchResult, err := client.Search(). + Index(testIndexName). + Query(NewMatchAllQuery()). + Size(100). + Pretty(true). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if got, want := searchResult.Hits.TotalHits, int64(12); got != want { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", want, got) + } + if got, want := len(searchResult.Hits.Hits), 12; got != want { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", want, got) + } + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } +} + +func BenchmarkSearchMatchAll(b *testing.B) { + client := setupTestClientAndCreateIndexAndAddDocs(b) + + for n := 0; n < b.N; n++ { + // Match all should return all documents + all := NewMatchAllQuery() + searchResult, err := client.Search().Index(testIndexName).Query(all).Do() + if err != nil { + b.Fatal(err) + } + if searchResult.Hits == nil { + b.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 4 { + b.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 4, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 4 { + b.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 4, len(searchResult.Hits.Hits)) + } + } +} + +func TestSearchResultTotalHits(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) + + count, err := client.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + all := NewMatchAllQuery() + searchResult, err := client.Search().Index(testIndexName).Query(all).Do() + if err != nil { + t.Fatal(err) + } + + got := searchResult.TotalHits() + if got != count { + t.Fatalf("expected %d hits; got: %d", count, got) + } + + // No hits + searchResult = &SearchResult{} + got = searchResult.TotalHits() + if got != 0 { + t.Errorf("expected %d hits; got: %d", 0, got) + } +} + +func TestSearchResultEach(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) + + all := NewMatchAllQuery() + searchResult, err := client.Search().Index(testIndexName).Query(all).Do() + if err != nil { + t.Fatal(err) + } + + // Iterate over non-ptr type + var aTweet tweet + count := 0 + for _, item := range searchResult.Each(reflect.TypeOf(aTweet)) { + count++ + _, ok := item.(tweet) + if !ok { + t.Fatalf("expected hit to be serialized as tweet; got: %v", reflect.ValueOf(item)) + } + } + if count == 0 { + t.Errorf("expected to find some hits; got: %d", count) + } + + // Iterate over ptr-type + count = 0 + var aTweetPtr *tweet + for _, item := range searchResult.Each(reflect.TypeOf(aTweetPtr)) { + count++ + tw, ok := item.(*tweet) + if !ok { + t.Fatalf("expected hit to be serialized as tweet; got: %v", reflect.ValueOf(item)) + } + if tw == nil { + t.Fatal("expected hit to not be nil") + } + } + if count == 0 { + t.Errorf("expected to find some hits; got: %d", count) + } + + // Does not iterate when no hits are found + searchResult = &SearchResult{Hits: nil} + count = 0 + for _, item := range searchResult.Each(reflect.TypeOf(aTweet)) { + count++ + _ = item + } + if count != 0 { + t.Errorf("expected to not find any hits; got: %d", count) + } + searchResult = &SearchResult{Hits: &SearchHits{Hits: make([]*SearchHit, 0)}} + count = 0 + for _, item := range searchResult.Each(reflect.TypeOf(aTweet)) { + count++ + _ = item + } + if count != 0 { + t.Errorf("expected to not find any hits; got: %d", count) + } +} + +func TestSearchSorting(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + tweet2 := tweet{ + User: "olivere", Retweets: 0, + Message: "Another unrelated topic.", + Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), + } + tweet3 := tweet{ + User: "sandrae", Retweets: 12, + Message: "Cycling is fun.", + Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + all := NewMatchAllQuery() + searchResult, err := client.Search(). + Index(testIndexName). + Query(all). + Sort("created", false). + Timeout("1s"). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 3 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 3 { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits)) + } + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } +} + +func TestSearchSortingBySorters(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + tweet2 := tweet{ + User: "olivere", Retweets: 0, + Message: "Another unrelated topic.", + Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), + } + tweet3 := tweet{ + User: "sandrae", Retweets: 12, + Message: "Cycling is fun.", + Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + all := NewMatchAllQuery() + searchResult, err := client.Search(). + Index(testIndexName). + Query(all). + SortBy(NewFieldSort("created").Desc(), NewScoreSort()). + Timeout("1s"). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 3 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 3 { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits)) + } + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } +} + +func TestSearchSpecificFields(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + all := NewMatchAllQuery() + searchResult, err := client.Search(). + Index(testIndexName). + Query(all). + Fields("message"). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 3 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 3 { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits)) + } + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + if hit.Source != nil { + t.Fatalf("expected SearchResult.Hits.Hit.Source to be nil; got: %q", hit.Source) + } + if hit.Fields == nil { + t.Fatal("expected SearchResult.Hits.Hit.Fields to be != nil") + } + field, found := hit.Fields["message"] + if !found { + t.Errorf("expected SearchResult.Hits.Hit.Fields[%s] to be found", "message") + } + fields, ok := field.([]interface{}) + if !ok { + t.Errorf("expected []interface{}; got: %v", reflect.TypeOf(fields)) + } + if len(fields) != 1 { + t.Errorf("expected a field with 1 entry; got: %d", len(fields)) + } + message, ok := fields[0].(string) + if !ok { + t.Errorf("expected a string; got: %v", reflect.TypeOf(fields[0])) + } + if message == "" { + t.Errorf("expected a message; got: %q", message) + } + } +} + +func TestSearchExplain(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + tweet2 := tweet{ + User: "olivere", Retweets: 0, + Message: "Another unrelated topic.", + Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), + } + tweet3 := tweet{ + User: "sandrae", Retweets: 12, + Message: "Cycling is fun.", + Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + all := NewMatchAllQuery() + searchResult, err := client.Search(). + Index(testIndexName). + Query(all). + Explain(true). + Timeout("1s"). + // Pretty(true). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 3 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 3 { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits)) + } + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + if hit.Explanation == nil { + t.Fatal("expected search explanation") + } + if hit.Explanation.Value <= 0.0 { + t.Errorf("expected explanation value to be > 0.0; got: %v", hit.Explanation.Value) + } + if hit.Explanation.Description == "" { + t.Errorf("expected explanation description != %q; got: %q", "", hit.Explanation.Description) + } + } +} + +func TestSearchSource(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + tweet2 := tweet{ + User: "olivere", Retweets: 0, + Message: "Another unrelated topic.", + Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), + } + tweet3 := tweet{ + User: "sandrae", Retweets: 12, + Message: "Cycling is fun.", + Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Set up the request JSON manually to pass to the search service via Source() + source := map[string]interface{}{ + "query": map[string]interface{}{ + "match_all": map[string]interface{}{}, + }, + } + + searchResult, err := client.Search(). + Index(testIndexName). + Source(source). // sets the JSON request + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 3 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) + } +} + +func TestSearchSearchSource(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + tweet2 := tweet{ + User: "olivere", Retweets: 0, + Message: "Another unrelated topic.", + Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), + } + tweet3 := tweet{ + User: "sandrae", Retweets: 12, + Message: "Cycling is fun.", + Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Set up the search source manually and pass it to the search service via SearchSource() + ss := NewSearchSource().Query(NewMatchAllQuery()).From(0).Size(2) + + // One can use ss.Source() to get to the raw interface{} that will be used + // as the search request JSON by the SearchService. + + searchResult, err := client.Search(). + Index(testIndexName). + SearchSource(ss). // sets the SearchSource + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 3 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 2 { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 2, len(searchResult.Hits.Hits)) + } +} + +func TestSearchInnerHitsOnHasChild(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Check for valid ES version + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + if esversion < "1.5.0" { + t.Skip("InnerHits feature is only available for Elasticsearch 1.5+") + return + } + + tweet1 := tweet{ + User: "olivere", Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + tweet2 := tweet{ + User: "olivere", Retweets: 0, + Message: "Another unrelated topic.", + Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), + } + comment2a := comment{User: "sandrae", Comment: "What does that even mean?"} + tweet3 := tweet{ + User: "sandrae", Retweets: 12, + Message: "Cycling is fun.", + Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), + } + comment3a := comment{User: "nico", Comment: "You bet."} + comment3b := comment{User: "olivere", Comment: "It sure is."} + + // Add all documents + _, err = client.Index().Index(testIndexName).Type("tweet").Id("t1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("tweet").Id("t2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("comment").Id("c2a").Parent("t2").BodyJson(&comment2a).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("tweet").Id("t3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("comment").Id("c3a").Parent("t3").BodyJson(&comment3a).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("comment").Id("c3b").Parent("t3").BodyJson(&comment3b).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + bq := NewBoolQuery() + bq = bq.Must(NewMatchAllQuery()) + bq = bq.Filter(NewHasChildQuery("comment", NewMatchAllQuery()). + InnerHit(NewInnerHit().Name("comments"))) + + searchResult, err := client.Search(). + Index(testIndexName). + Query(bq). + Pretty(true). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 2 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 2, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 2 { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 2, len(searchResult.Hits.Hits)) + } + + hit := searchResult.Hits.Hits[0] + if hit.Id != "t2" { + t.Fatalf("expected tweet %q; got: %q", "t2", hit.Id) + } + if hit.InnerHits == nil { + t.Fatalf("expected inner hits; got: %v", hit.InnerHits) + } + if len(hit.InnerHits) != 1 { + t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits)) + } + innerHits, found := hit.InnerHits["comments"] + if !found { + t.Fatalf("expected inner hits for name %q", "comments") + } + if innerHits == nil || innerHits.Hits == nil { + t.Fatal("expected inner hits != nil") + } + if len(innerHits.Hits.Hits) != 1 { + t.Fatalf("expected %d inner hits; got: %d", 1, len(innerHits.Hits.Hits)) + } + if innerHits.Hits.Hits[0].Id != "c2a" { + t.Fatalf("expected inner hit with id %q; got: %q", "c2a", innerHits.Hits.Hits[0].Id) + } + + hit = searchResult.Hits.Hits[1] + if hit.Id != "t3" { + t.Fatalf("expected tweet %q; got: %q", "t3", hit.Id) + } + if hit.InnerHits == nil { + t.Fatalf("expected inner hits; got: %v", hit.InnerHits) + } + if len(hit.InnerHits) != 1 { + t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits)) + } + innerHits, found = hit.InnerHits["comments"] + if !found { + t.Fatalf("expected inner hits for name %q", "comments") + } + if innerHits == nil || innerHits.Hits == nil { + t.Fatal("expected inner hits != nil") + } + if len(innerHits.Hits.Hits) != 2 { + t.Fatalf("expected %d inner hits; got: %d", 2, len(innerHits.Hits.Hits)) + } + if innerHits.Hits.Hits[0].Id != "c3a" { + t.Fatalf("expected inner hit with id %q; got: %q", "c3a", innerHits.Hits.Hits[0].Id) + } + if innerHits.Hits.Hits[1].Id != "c3b" { + t.Fatalf("expected inner hit with id %q; got: %q", "c3b", innerHits.Hits.Hits[1].Id) + } +} + +func TestSearchInnerHitsOnHasParent(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Check for valid ES version + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + if esversion < "1.5.0" { + t.Skip("InnerHits feature is only available for Elasticsearch 1.5+") + return + } + + tweet1 := tweet{ + User: "olivere", Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + tweet2 := tweet{ + User: "olivere", Retweets: 0, + Message: "Another unrelated topic.", + Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), + } + comment2a := comment{User: "sandrae", Comment: "What does that even mean?"} + tweet3 := tweet{ + User: "sandrae", Retweets: 12, + Message: "Cycling is fun.", + Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), + } + comment3a := comment{User: "nico", Comment: "You bet."} + comment3b := comment{User: "olivere", Comment: "It sure is."} + + // Add all documents + _, err = client.Index().Index(testIndexName).Type("tweet").Id("t1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("tweet").Id("t2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("comment").Id("c2a").Parent("t2").BodyJson(&comment2a).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("tweet").Id("t3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("comment").Id("c3a").Parent("t3").BodyJson(&comment3a).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("comment").Id("c3b").Parent("t3").BodyJson(&comment3b).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + bq := NewBoolQuery() + bq = bq.Must(NewMatchAllQuery()) + bq = bq.Filter(NewHasParentQuery("tweet", NewMatchAllQuery()). + InnerHit(NewInnerHit().Name("tweets"))) + + searchResult, err := client.Search(). + Index(testIndexName). + Query(bq). + Pretty(true). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 3 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 3 { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits)) + } + + hit := searchResult.Hits.Hits[0] + if hit.Id != "c2a" { + t.Fatalf("expected tweet %q; got: %q", "c2a", hit.Id) + } + if hit.InnerHits == nil { + t.Fatalf("expected inner hits; got: %v", hit.InnerHits) + } + if len(hit.InnerHits) != 1 { + t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits)) + } + innerHits, found := hit.InnerHits["tweets"] + if !found { + t.Fatalf("expected inner hits for name %q", "tweets") + } + if innerHits == nil || innerHits.Hits == nil { + t.Fatal("expected inner hits != nil") + } + if len(innerHits.Hits.Hits) != 1 { + t.Fatalf("expected %d inner hits; got: %d", 1, len(innerHits.Hits.Hits)) + } + if innerHits.Hits.Hits[0].Id != "t2" { + t.Fatalf("expected inner hit with id %q; got: %q", "t2", innerHits.Hits.Hits[0].Id) + } + + hit = searchResult.Hits.Hits[1] + if hit.Id != "c3a" { + t.Fatalf("expected tweet %q; got: %q", "c3a", hit.Id) + } + if hit.InnerHits == nil { + t.Fatalf("expected inner hits; got: %v", hit.InnerHits) + } + if len(hit.InnerHits) != 1 { + t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits)) + } + innerHits, found = hit.InnerHits["tweets"] + if !found { + t.Fatalf("expected inner hits for name %q", "tweets") + } + if innerHits == nil || innerHits.Hits == nil { + t.Fatal("expected inner hits != nil") + } + if len(innerHits.Hits.Hits) != 1 { + t.Fatalf("expected %d inner hits; got: %d", 1, len(innerHits.Hits.Hits)) + } + if innerHits.Hits.Hits[0].Id != "t3" { + t.Fatalf("expected inner hit with id %q; got: %q", "t3", innerHits.Hits.Hits[0].Id) + } + + hit = searchResult.Hits.Hits[2] + if hit.Id != "c3b" { + t.Fatalf("expected tweet %q; got: %q", "c3b", hit.Id) + } + if hit.InnerHits == nil { + t.Fatalf("expected inner hits; got: %v", hit.InnerHits) + } + if len(hit.InnerHits) != 1 { + t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits)) + } + innerHits, found = hit.InnerHits["tweets"] + if !found { + t.Fatalf("expected inner hits for name %q", "tweets") + } + if innerHits == nil || innerHits.Hits == nil { + t.Fatal("expected inner hits != nil") + } + if len(innerHits.Hits.Hits) != 1 { + t.Fatalf("expected %d inner hits; got: %d", 1, len(innerHits.Hits.Hits)) + } + if innerHits.Hits.Hits[0].Id != "t3" { + t.Fatalf("expected inner hit with id %q; got: %q", "t3", innerHits.Hits.Hits[0].Id) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/setup_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/setup_test.go new file mode 100644 index 000000000..97af2bb27 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/setup_test.go @@ -0,0 +1,232 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "log" + "math/rand" + "os" + "time" +) + +const ( + testIndexName = "elastic-test" + testIndexName2 = "elastic-test2" + testMapping = ` +{ + "settings":{ + "number_of_shards":1, + "number_of_replicas":0 + }, + "mappings":{ + "_default_": { + "_timestamp": { + "enabled": true + }, + "_ttl": { + "enabled": true + } + }, + "tweet":{ + "properties":{ + "tags":{ + "type":"string" + }, + "location":{ + "type":"geo_point" + }, + "suggest_field":{ + "type":"completion", + "payloads":true + } + } + }, + "comment":{ + "_parent": { + "type": "tweet" + } + }, + "order":{ + "properties":{ + "article":{ + "type":"string" + }, + "manufacturer":{ + "type":"string", + "index" : "not_analyzed" + }, + "price":{ + "type":"float" + }, + "time":{ + "type":"date", + "format": "YYYY-MM-dd" + } + } + } + } +} +` +) + +type tweet struct { + User string `json:"user"` + Message string `json:"message"` + Retweets int `json:"retweets"` + Image string `json:"image,omitempty"` + Created time.Time `json:"created,omitempty"` + Tags []string `json:"tags,omitempty"` + Location string `json:"location,omitempty"` + Suggest *SuggestField `json:"suggest_field,omitempty"` +} + +func (t tweet) String() string { + return fmt.Sprintf("tweet{User:%q,Message:%q,Retweets:%d}", t.User, t.Message, t.Retweets) +} + +type comment struct { + User string `json:"user"` + Comment string `json:"comment"` + Created time.Time `json:"created,omitempty"` +} + +func (c comment) String() string { + return fmt.Sprintf("comment{User:%q,Comment:%q}", c.User, c.Comment) +} + +type order struct { + Article string `json:"article"` + Manufacturer string `json:"manufacturer"` + Price float64 `json:"price"` + Time string `json:"time,omitempty"` +} + +func (o order) String() string { + return fmt.Sprintf("order{Article:%q,Manufacturer:%q,Price:%v,Time:%v}", o.Article, o.Manufacturer, o.Price, o.Time) +} + +func isTravis() bool { + return os.Getenv("TRAVIS") != "" +} + +func travisGoVersion() string { + return os.Getenv("TRAVIS_GO_VERSION") +} + +type logger interface { + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fail() + FailNow() + Log(args ...interface{}) + Logf(format string, args ...interface{}) +} + +func setupTestClient(t logger, options ...ClientOptionFunc) (client *Client) { + var err error + + client, err = NewClient(options...) + if err != nil { + t.Fatal(err) + } + + client.DeleteIndex(testIndexName).Do() + client.DeleteIndex(testIndexName2).Do() + + return client +} + +func setupTestClientAndCreateIndex(t logger, options ...ClientOptionFunc) *Client { + client := setupTestClient(t, options...) + + // Create index + createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do() + if err != nil { + t.Fatal(err) + } + if createIndex == nil { + t.Errorf("expected result to be != nil; got: %v", createIndex) + } + + // Create second index + createIndex2, err := client.CreateIndex(testIndexName2).Body(testMapping).Do() + if err != nil { + t.Fatal(err) + } + if createIndex2 == nil { + t.Errorf("expected result to be != nil; got: %v", createIndex2) + } + + return client +} + +func setupTestClientAndCreateIndexAndLog(t logger, options ...ClientOptionFunc) *Client { + return setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0))) +} + +func setupTestClientAndCreateIndexAndAddDocs(t logger, options ...ClientOptionFunc) *Client { + client := setupTestClientAndCreateIndex(t, options...) + + // Add tweets + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + comment1 := comment{User: "nico", Comment: "You bet."} + + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").Routing("someroutingkey").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("comment").Id("1").Parent("3").BodyJson(&comment1).Do() + if err != nil { + t.Fatal(err) + } + + // Add orders + var orders []order + orders = append(orders, order{Article: "Apple MacBook", Manufacturer: "Apple", Price: 1290, Time: "2015-01-18"}) + orders = append(orders, order{Article: "Paper", Manufacturer: "Canon", Price: 100, Time: "2015-03-01"}) + orders = append(orders, order{Article: "Apple iPad", Manufacturer: "Apple", Price: 499, Time: "2015-04-12"}) + orders = append(orders, order{Article: "Dell XPS 13", Manufacturer: "Dell", Price: 1600, Time: "2015-04-18"}) + orders = append(orders, order{Article: "Apple Watch", Manufacturer: "Apple", Price: 349, Time: "2015-04-29"}) + orders = append(orders, order{Article: "Samsung TV", Manufacturer: "Samsung", Price: 790, Time: "2015-05-03"}) + orders = append(orders, order{Article: "Hoodie", Manufacturer: "h&m", Price: 49, Time: "2015-06-03"}) + orders = append(orders, order{Article: "T-Shirt", Manufacturer: "h&m", Price: 19, Time: "2015-06-18"}) + for i, o := range orders { + id := fmt.Sprintf("%d", i) + _, err = client.Index().Index(testIndexName).Type("order").Id(id).BodyJson(&o).Do() + if err != nil { + t.Fatal(err) + } + } + + // Flush + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + return client +} + +var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +func randomString(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + return string(b) +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/sort.go b/services/templeton/vendor/src/github.com/olivere/elastic/sort.go new file mode 100644 index 000000000..4c845c505 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/sort.go @@ -0,0 +1,480 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// -- Sorter -- + +// Sorter is an interface for sorting strategies, e.g. ScoreSort or FieldSort. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html. +type Sorter interface { + Source() (interface{}, error) +} + +// -- SortInfo -- + +// SortInfo contains information about sorting a field. +type SortInfo struct { + Sorter + Field string + Ascending bool + Missing interface{} + IgnoreUnmapped *bool + SortMode string + NestedFilter Query + NestedPath string +} + +func (info SortInfo) Source() (interface{}, error) { + prop := make(map[string]interface{}) + if info.Ascending { + prop["order"] = "asc" + } else { + prop["order"] = "desc" + } + if info.Missing != nil { + prop["missing"] = info.Missing + } + if info.IgnoreUnmapped != nil { + prop["ignore_unmapped"] = *info.IgnoreUnmapped + } + if info.SortMode != "" { + prop["sort_mode"] = info.SortMode + } + if info.NestedFilter != nil { + prop["nested_filter"] = info.NestedFilter + } + if info.NestedPath != "" { + prop["nested_path"] = info.NestedPath + } + source := make(map[string]interface{}) + source[info.Field] = prop + return source, nil +} + +// -- ScoreSort -- + +// ScoreSort sorts by relevancy score. +type ScoreSort struct { + Sorter + ascending bool +} + +// NewScoreSort creates a new ScoreSort. +func NewScoreSort() ScoreSort { + return ScoreSort{ascending: false} // Descending by default! +} + +// Order defines whether sorting ascending (default) or descending. +func (s ScoreSort) Order(ascending bool) ScoreSort { + s.ascending = ascending + return s +} + +// Asc sets ascending sort order. +func (s ScoreSort) Asc() ScoreSort { + s.ascending = true + return s +} + +// Desc sets descending sort order. +func (s ScoreSort) Desc() ScoreSort { + s.ascending = false + return s +} + +// Source returns the JSON-serializable data. +func (s ScoreSort) Source() (interface{}, error) { + source := make(map[string]interface{}) + x := make(map[string]interface{}) + source["_score"] = x + if s.ascending { + x["reverse"] = true + } + return source, nil +} + +// -- FieldSort -- + +// FieldSort sorts by a given field. +type FieldSort struct { + Sorter + fieldName string + ascending bool + missing interface{} + ignoreUnmapped *bool + unmappedType *string + sortMode *string + nestedFilter Query + nestedPath *string +} + +// NewFieldSort creates a new FieldSort. +func NewFieldSort(fieldName string) FieldSort { + return FieldSort{ + fieldName: fieldName, + ascending: true, + } +} + +// FieldName specifies the name of the field to be used for sorting. +func (s FieldSort) FieldName(fieldName string) FieldSort { + s.fieldName = fieldName + return s +} + +// Order defines whether sorting ascending (default) or descending. +func (s FieldSort) Order(ascending bool) FieldSort { + s.ascending = ascending + return s +} + +// Asc sets ascending sort order. +func (s FieldSort) Asc() FieldSort { + s.ascending = true + return s +} + +// Desc sets descending sort order. +func (s FieldSort) Desc() FieldSort { + s.ascending = false + return s +} + +// Missing sets the value to be used when a field is missing in a document. +// You can also use "_last" or "_first" to sort missing last or first +// respectively. +func (s FieldSort) Missing(missing interface{}) FieldSort { + s.missing = missing + return s +} + +// IgnoreUnmapped specifies what happens if the field does not exist in +// the index. Set it to true to ignore, or set it to false to not ignore (default). +func (s FieldSort) IgnoreUnmapped(ignoreUnmapped bool) FieldSort { + s.ignoreUnmapped = &ignoreUnmapped + return s +} + +// UnmappedType sets the type to use when the current field is not mapped +// in an index. +func (s FieldSort) UnmappedType(typ string) FieldSort { + s.unmappedType = &typ + return s +} + +// SortMode specifies what values to pick in case a document contains +// multiple values for the targeted sort field. Possible values are: +// min, max, sum, and avg. +func (s FieldSort) SortMode(sortMode string) FieldSort { + s.sortMode = &sortMode + return s +} + +// NestedFilter sets a filter that nested objects should match with +// in order to be taken into account for sorting. +func (s FieldSort) NestedFilter(nestedFilter Query) FieldSort { + s.nestedFilter = nestedFilter + return s +} + +// NestedPath is used if sorting occurs on a field that is inside a +// nested object. +func (s FieldSort) NestedPath(nestedPath string) FieldSort { + s.nestedPath = &nestedPath + return s +} + +// Source returns the JSON-serializable data. +func (s FieldSort) Source() (interface{}, error) { + source := make(map[string]interface{}) + x := make(map[string]interface{}) + source[s.fieldName] = x + if s.ascending { + x["order"] = "asc" + } else { + x["order"] = "desc" + } + if s.missing != nil { + x["missing"] = s.missing + } + if s.ignoreUnmapped != nil { + x["ignore_unmapped"] = *s.ignoreUnmapped + } + if s.unmappedType != nil { + x["unmapped_type"] = *s.unmappedType + } + if s.sortMode != nil { + x["mode"] = *s.sortMode + } + if s.nestedFilter != nil { + src, err := s.nestedFilter.Source() + if err != nil { + return nil, err + } + x["nested_filter"] = src + } + if s.nestedPath != nil { + x["nested_path"] = *s.nestedPath + } + return source, nil +} + +// -- GeoDistanceSort -- + +// GeoDistanceSort allows for sorting by geographic distance. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html#_geo_distance_sorting. +type GeoDistanceSort struct { + Sorter + fieldName string + points []*GeoPoint + geohashes []string + geoDistance *string + unit string + ascending bool + sortMode *string + nestedFilter Query + nestedPath *string +} + +// NewGeoDistanceSort creates a new sorter for geo distances. +func NewGeoDistanceSort(fieldName string) GeoDistanceSort { + return GeoDistanceSort{ + fieldName: fieldName, + points: make([]*GeoPoint, 0), + geohashes: make([]string, 0), + ascending: true, + } +} + +// FieldName specifies the name of the (geo) field to use for sorting. +func (s GeoDistanceSort) FieldName(fieldName string) GeoDistanceSort { + s.fieldName = fieldName + return s +} + +// Order defines whether sorting ascending (default) or descending. +func (s GeoDistanceSort) Order(ascending bool) GeoDistanceSort { + s.ascending = ascending + return s +} + +// Asc sets ascending sort order. +func (s GeoDistanceSort) Asc() GeoDistanceSort { + s.ascending = true + return s +} + +// Desc sets descending sort order. +func (s GeoDistanceSort) Desc() GeoDistanceSort { + s.ascending = false + return s +} + +// Point specifies a point to create the range distance aggregations from. +func (s GeoDistanceSort) Point(lat, lon float64) GeoDistanceSort { + s.points = append(s.points, GeoPointFromLatLon(lat, lon)) + return s +} + +// Points specifies the geo point(s) to create the range distance aggregations from. +func (s GeoDistanceSort) Points(points ...*GeoPoint) GeoDistanceSort { + s.points = append(s.points, points...) + return s +} + +// GeoHashes specifies the geo point to create the range distance aggregations from. +func (s GeoDistanceSort) GeoHashes(geohashes ...string) GeoDistanceSort { + s.geohashes = append(s.geohashes, geohashes...) + return s +} + +// GeoDistance represents how to compute the distance. +// It can be sloppy_arc (default), arc, or plane. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html#_geo_distance_sorting. +func (s GeoDistanceSort) GeoDistance(geoDistance string) GeoDistanceSort { + s.geoDistance = &geoDistance + return s +} + +// Unit specifies the distance unit to use. It defaults to km. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/common-options.html#distance-units +// for details. +func (s GeoDistanceSort) Unit(unit string) GeoDistanceSort { + s.unit = unit + return s +} + +// SortMode specifies what values to pick in case a document contains +// multiple values for the targeted sort field. Possible values are: +// min, max, sum, and avg. +func (s GeoDistanceSort) SortMode(sortMode string) GeoDistanceSort { + s.sortMode = &sortMode + return s +} + +// NestedFilter sets a filter that nested objects should match with +// in order to be taken into account for sorting. +func (s GeoDistanceSort) NestedFilter(nestedFilter Query) GeoDistanceSort { + s.nestedFilter = nestedFilter + return s +} + +// NestedPath is used if sorting occurs on a field that is inside a +// nested object. +func (s GeoDistanceSort) NestedPath(nestedPath string) GeoDistanceSort { + s.nestedPath = &nestedPath + return s +} + +// Source returns the JSON-serializable data. +func (s GeoDistanceSort) Source() (interface{}, error) { + source := make(map[string]interface{}) + x := make(map[string]interface{}) + source["_geo_distance"] = x + + // Points + ptarr := make([]interface{}, 0) + for _, pt := range s.points { + ptarr = append(ptarr, pt.Source()) + } + for _, geohash := range s.geohashes { + ptarr = append(ptarr, geohash) + } + x[s.fieldName] = ptarr + + if s.unit != "" { + x["unit"] = s.unit + } + if s.geoDistance != nil { + x["distance_type"] = *s.geoDistance + } + + if !s.ascending { + x["reverse"] = true + } + if s.sortMode != nil { + x["mode"] = *s.sortMode + } + if s.nestedFilter != nil { + src, err := s.nestedFilter.Source() + if err != nil { + return nil, err + } + x["nested_filter"] = src + } + if s.nestedPath != nil { + x["nested_path"] = *s.nestedPath + } + return source, nil +} + +// -- ScriptSort -- + +// ScriptSort sorts by a custom script. See +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/modules-scripting.html#modules-scripting +// for details about scripting. +type ScriptSort struct { + Sorter + script *Script + typ string + ascending bool + sortMode *string + nestedFilter Query + nestedPath *string +} + +// NewScriptSort creates and initializes a new ScriptSort. +// You must provide a script and a type, e.g. "string" or "number". +func NewScriptSort(script *Script, typ string) ScriptSort { + return ScriptSort{ + script: script, + typ: typ, + ascending: true, + } +} + +// Type sets the script type, which can be either "string" or "number". +func (s ScriptSort) Type(typ string) ScriptSort { + s.typ = typ + return s +} + +// Order defines whether sorting ascending (default) or descending. +func (s ScriptSort) Order(ascending bool) ScriptSort { + s.ascending = ascending + return s +} + +// Asc sets ascending sort order. +func (s ScriptSort) Asc() ScriptSort { + s.ascending = true + return s +} + +// Desc sets descending sort order. +func (s ScriptSort) Desc() ScriptSort { + s.ascending = false + return s +} + +// SortMode specifies what values to pick in case a document contains +// multiple values for the targeted sort field. Possible values are: +// min or max. +func (s ScriptSort) SortMode(sortMode string) ScriptSort { + s.sortMode = &sortMode + return s +} + +// NestedFilter sets a filter that nested objects should match with +// in order to be taken into account for sorting. +func (s ScriptSort) NestedFilter(nestedFilter Query) ScriptSort { + s.nestedFilter = nestedFilter + return s +} + +// NestedPath is used if sorting occurs on a field that is inside a +// nested object. +func (s ScriptSort) NestedPath(nestedPath string) ScriptSort { + s.nestedPath = &nestedPath + return s +} + +// Source returns the JSON-serializable data. +func (s ScriptSort) Source() (interface{}, error) { + if s.script == nil { + return nil, errors.New("ScriptSort expected a script") + } + source := make(map[string]interface{}) + x := make(map[string]interface{}) + source["_script"] = x + + src, err := s.script.Source() + if err != nil { + return nil, err + } + x["script"] = src + + x["type"] = s.typ + + if !s.ascending { + x["reverse"] = true + } + if s.sortMode != nil { + x["mode"] = *s.sortMode + } + if s.nestedFilter != nil { + src, err := s.nestedFilter.Source() + if err != nil { + return nil, err + } + x["nested_filter"] = src + } + if s.nestedPath != nil { + x["nested_path"] = *s.nestedPath + } + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/sort_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/sort_test.go new file mode 100644 index 000000000..a0f9ddfc8 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/sort_test.go @@ -0,0 +1,214 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSortInfo(t *testing.T) { + builder := SortInfo{Field: "grade", Ascending: false} + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"grade":{"order":"desc"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestScoreSort(t *testing.T) { + builder := NewScoreSort() + if builder.ascending != false { + t.Error("expected score sorter to be ascending by default") + } + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"_score":{}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestScoreSortOrderAscending(t *testing.T) { + builder := NewScoreSort().Asc() + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"_score":{"reverse":true}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestScoreSortOrderDescending(t *testing.T) { + builder := NewScoreSort().Desc() + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"_score":{}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFieldSort(t *testing.T) { + builder := NewFieldSort("grade") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"grade":{"order":"asc"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFieldSortOrderDesc(t *testing.T) { + builder := NewFieldSort("grade").Desc() + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"grade":{"order":"desc"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFieldSortComplex(t *testing.T) { + builder := NewFieldSort("price").Desc(). + SortMode("avg"). + Missing("_last"). + UnmappedType("product"). + NestedFilter(NewTermQuery("product.color", "blue")). + NestedPath("variant") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"price":{"missing":"_last","mode":"avg","nested_filter":{"term":{"product.color":"blue"}},"nested_path":"variant","order":"desc","unmapped_type":"product"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoDistanceSort(t *testing.T) { + builder := NewGeoDistanceSort("pin.location"). + Point(-70, 40). + Order(true). + Unit("km"). + SortMode("min"). + GeoDistance("sloppy_arc") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"_geo_distance":{"distance_type":"sloppy_arc","mode":"min","pin.location":[{"lat":-70,"lon":40}],"unit":"km"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoDistanceSortOrderDesc(t *testing.T) { + builder := NewGeoDistanceSort("pin.location"). + Point(-70, 40). + Unit("km"). + SortMode("min"). + GeoDistance("sloppy_arc"). + Desc() + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"_geo_distance":{"distance_type":"sloppy_arc","mode":"min","pin.location":[{"lat":-70,"lon":40}],"reverse":true,"unit":"km"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} +func TestScriptSort(t *testing.T) { + builder := NewScriptSort(NewScript("doc['field_name'].value * factor").Param("factor", 1.1), "number").Order(true) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"_script":{"script":{"inline":"doc['field_name'].value * factor","params":{"factor":1.1}},"type":"number"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestScriptSortOrderDesc(t *testing.T) { + builder := NewScriptSort(NewScript("doc['field_name'].value * factor").Param("factor", 1.1), "number").Desc() + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"_script":{"reverse":true,"script":{"inline":"doc['field_name'].value * factor","params":{"factor":1.1}},"type":"number"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggest.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggest.go new file mode 100644 index 000000000..1fb48ac0b --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/suggest.go @@ -0,0 +1,143 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// SuggestService returns suggestions for text. +type SuggestService struct { + client *Client + pretty bool + routing string + preference string + indices []string + suggesters []Suggester +} + +func NewSuggestService(client *Client) *SuggestService { + builder := &SuggestService{ + client: client, + indices: make([]string, 0), + suggesters: make([]Suggester, 0), + } + return builder +} + +func (s *SuggestService) Index(indices ...string) *SuggestService { + s.indices = append(s.indices, indices...) + return s +} + +func (s *SuggestService) Pretty(pretty bool) *SuggestService { + s.pretty = pretty + return s +} + +func (s *SuggestService) Routing(routing string) *SuggestService { + s.routing = routing + return s +} + +func (s *SuggestService) Preference(preference string) *SuggestService { + s.preference = preference + return s +} + +func (s *SuggestService) Suggester(suggester Suggester) *SuggestService { + s.suggesters = append(s.suggesters, suggester) + return s +} + +func (s *SuggestService) Do() (SuggestResult, error) { + // Build url + path := "/" + + // Indices part + indexPart := make([]string, 0) + for _, index := range s.indices { + index, err := uritemplates.Expand("{index}", map[string]string{ + "index": index, + }) + if err != nil { + return nil, err + } + indexPart = append(indexPart, index) + } + path += strings.Join(indexPart, ",") + + // Suggest + path += "/_suggest" + + // Parameters + params := make(url.Values) + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + + // Set body + body := make(map[string]interface{}) + for _, s := range s.suggesters { + src, err := s.Source(false) + if err != nil { + return nil, err + } + body[s.Name()] = src + } + + // Get response + res, err := s.client.PerformRequest("POST", path, params, body) + if err != nil { + return nil, err + } + + // There is a _shard object that cannot be deserialized. + // So we use json.RawMessage instead. + var suggestions map[string]*json.RawMessage + if err := json.Unmarshal(res.Body, &suggestions); err != nil { + return nil, err + } + + ret := make(SuggestResult) + for name, result := range suggestions { + if name != "_shards" { + var s []Suggestion + if err := json.Unmarshal(*result, &s); err != nil { + return nil, err + } + ret[name] = s + } + } + + return ret, nil +} + +type SuggestResult map[string][]Suggestion + +type Suggestion struct { + Text string `json:"text"` + Offset int `json:"offset"` + Length int `json:"length"` + Options []suggestionOption `json:"options"` +} + +type suggestionOption struct { + Text string `json:"text"` + Score float64 `json:"score"` + Freq int `json:"freq"` + Payload interface{} `json:"payload"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggest_field.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggest_field.go new file mode 100644 index 000000000..4738d9910 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/suggest_field.go @@ -0,0 +1,100 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" +) + +// SuggestField can be used by the caller to specify a suggest field +// at index time. For a detailed example, see e.g. +// http://www.elasticsearch.org/blog/you-complete-me/. +type SuggestField struct { + inputs []string + output *string + payload interface{} + weight int + contextQueries []SuggesterContextQuery +} + +func NewSuggestField() *SuggestField { + return &SuggestField{weight: -1} +} + +func (f *SuggestField) Input(input ...string) *SuggestField { + if f.inputs == nil { + f.inputs = make([]string, 0) + } + f.inputs = append(f.inputs, input...) + return f +} + +func (f *SuggestField) Output(output string) *SuggestField { + f.output = &output + return f +} + +func (f *SuggestField) Payload(payload interface{}) *SuggestField { + f.payload = payload + return f +} + +func (f *SuggestField) Weight(weight int) *SuggestField { + f.weight = weight + return f +} + +func (f *SuggestField) ContextQuery(queries ...SuggesterContextQuery) *SuggestField { + f.contextQueries = append(f.contextQueries, queries...) + return f +} + +// MarshalJSON encodes SuggestField into JSON. +func (f *SuggestField) MarshalJSON() ([]byte, error) { + source := make(map[string]interface{}) + + if f.inputs != nil { + switch len(f.inputs) { + case 1: + source["input"] = f.inputs[0] + default: + source["input"] = f.inputs + } + } + + if f.output != nil { + source["output"] = *f.output + } + + if f.payload != nil { + source["payload"] = f.payload + } + + if f.weight >= 0 { + source["weight"] = f.weight + } + + switch len(f.contextQueries) { + case 0: + case 1: + src, err := f.contextQueries[0].Source() + if err != nil { + return nil, err + } + source["context"] = src + default: + var ctxq []interface{} + for _, query := range f.contextQueries { + src, err := query.Source() + if err != nil { + return nil, err + } + ctxq = append(ctxq, src) + } + source["context"] = ctxq + } + + return json.Marshal(source) +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggest_field_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggest_field_test.go new file mode 100644 index 000000000..b01cf0af0 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/suggest_field_test.go @@ -0,0 +1,30 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSuggestField(t *testing.T) { + field := NewSuggestField(). + Input("Welcome to Golang and Elasticsearch.", "Golang and Elasticsearch"). + Output("Golang and Elasticsearch: An introduction."). + Weight(1). + ContextQuery( + NewSuggesterCategoryMapping("color").FieldName("color_field").DefaultValues("red", "green", "blue"), + NewSuggesterGeoMapping("location").Precision("5m").Neighbors(true).DefaultLocations(GeoPointFromLatLon(52.516275, 13.377704)), + ) + data, err := json.Marshal(field) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"context":[{"color":{"default":["red","green","blue"],"path":"color_field","type":"category"}},{"location":{"default":{"lat":52.516275,"lon":13.377704},"neighbors":true,"precision":["5m"],"type":"geo"}}],"input":["Welcome to Golang and Elasticsearch.","Golang and Elasticsearch"],"output":"Golang and Elasticsearch: An introduction.","weight":1}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggest_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggest_test.go new file mode 100644 index 000000000..50a4a0952 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/suggest_test.go @@ -0,0 +1,131 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + _ "net/http" + "testing" +) + +func TestSuggestService(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", + Message: "Welcome to Golang and Elasticsearch.", + Tags: []string{"golang", "elasticsearch"}, + Location: "48.1333,11.5667", // lat,lon + Suggest: NewSuggestField(). + Input("Welcome to Golang and Elasticsearch.", "Golang and Elasticsearch"). + Output("Golang and Elasticsearch: An introduction."). + Weight(0), + } + tweet2 := tweet{ + User: "olivere", + Message: "Another unrelated topic.", + Tags: []string{"golang"}, + Location: "48.1189,11.4289", // lat,lon + Suggest: NewSuggestField(). + Input("Another unrelated topic.", "Golang topic."). + Output("About Golang."). + Weight(1), + } + tweet3 := tweet{ + User: "sandrae", + Message: "Cycling is fun.", + Tags: []string{"sports", "cycling"}, + Location: "47.7167,11.7167", // lat,lon + Suggest: NewSuggestField(). + Input("Cycling is fun."). + Output("Cycling is a fun sport."), + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Test _suggest endpoint + termSuggesterName := "my-term-suggester" + termSuggester := NewTermSuggester(termSuggesterName).Text("Goolang").Field("message") + phraseSuggesterName := "my-phrase-suggester" + phraseSuggester := NewPhraseSuggester(phraseSuggesterName).Text("Goolang").Field("message") + completionSuggesterName := "my-completion-suggester" + completionSuggester := NewCompletionSuggester(completionSuggesterName).Text("Go").Field("suggest_field") + + result, err := client.Suggest(). + Index(testIndexName). + Suggester(termSuggester). + Suggester(phraseSuggester). + Suggester(completionSuggester). + Do() + if err != nil { + t.Fatal(err) + } + if result == nil { + t.Errorf("expected result != nil; got nil") + } + if len(result) != 3 { + t.Errorf("expected 3 suggester results; got %d", len(result)) + } + + termSuggestions, found := result[termSuggesterName] + if !found { + t.Errorf("expected to find Suggest[%s]; got false", termSuggesterName) + } + if termSuggestions == nil { + t.Errorf("expected Suggest[%s] != nil; got nil", termSuggesterName) + } + if len(termSuggestions) != 1 { + t.Errorf("expected 1 suggestion; got %d", len(termSuggestions)) + } + + phraseSuggestions, found := result[phraseSuggesterName] + if !found { + t.Errorf("expected to find Suggest[%s]; got false", phraseSuggesterName) + } + if phraseSuggestions == nil { + t.Errorf("expected Suggest[%s] != nil; got nil", phraseSuggesterName) + } + if len(phraseSuggestions) != 1 { + t.Errorf("expected 1 suggestion; got %d", len(phraseSuggestions)) + } + + completionSuggestions, found := result[completionSuggesterName] + if !found { + t.Errorf("expected to find Suggest[%s]; got false", completionSuggesterName) + } + if completionSuggestions == nil { + t.Errorf("expected Suggest[%s] != nil; got nil", completionSuggesterName) + } + if len(completionSuggestions) != 1 { + t.Errorf("expected 1 suggestion; got %d", len(completionSuggestions)) + } + if len(completionSuggestions[0].Options) != 2 { + t.Errorf("expected 2 suggestion options; got %d", len(completionSuggestions[0].Options)) + } + if completionSuggestions[0].Options[0].Text != "About Golang." { + t.Errorf("expected Suggest[%s][0].Options[0].Text == %q; got %q", completionSuggesterName, "About Golang.", completionSuggestions[0].Options[0].Text) + } + if completionSuggestions[0].Options[1].Text != "Golang and Elasticsearch: An introduction." { + t.Errorf("expected Suggest[%s][0].Options[1].Text == %q; got %q", completionSuggesterName, "Golang and Elasticsearch: An introduction.", completionSuggestions[0].Options[1].Text) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester.go new file mode 100644 index 000000000..c342b10d3 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/suggester.go @@ -0,0 +1,15 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// Represents the generic suggester interface. +// A suggester's only purpose is to return the +// source of the query as a JSON-serializable +// object. Returning a map[string]interface{} +// will do. +type Suggester interface { + Name() string + Source(includeName bool) (interface{}, error) +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion.go new file mode 100644 index 000000000..e0f5a3861 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion.go @@ -0,0 +1,129 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// CompletionSuggester is a fast suggester for e.g. type-ahead completion. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-completion.html +// for more details. +type CompletionSuggester struct { + Suggester + name string + text string + field string + analyzer string + size *int + shardSize *int + contextQueries []SuggesterContextQuery +} + +// Creates a new completion suggester. +func NewCompletionSuggester(name string) *CompletionSuggester { + return &CompletionSuggester{ + name: name, + contextQueries: make([]SuggesterContextQuery, 0), + } +} + +func (q *CompletionSuggester) Name() string { + return q.name +} + +func (q *CompletionSuggester) Text(text string) *CompletionSuggester { + q.text = text + return q +} + +func (q *CompletionSuggester) Field(field string) *CompletionSuggester { + q.field = field + return q +} + +func (q *CompletionSuggester) Analyzer(analyzer string) *CompletionSuggester { + q.analyzer = analyzer + return q +} + +func (q *CompletionSuggester) Size(size int) *CompletionSuggester { + q.size = &size + return q +} + +func (q *CompletionSuggester) ShardSize(shardSize int) *CompletionSuggester { + q.shardSize = &shardSize + return q +} + +func (q *CompletionSuggester) ContextQuery(query SuggesterContextQuery) *CompletionSuggester { + q.contextQueries = append(q.contextQueries, query) + return q +} + +func (q *CompletionSuggester) ContextQueries(queries ...SuggesterContextQuery) *CompletionSuggester { + q.contextQueries = append(q.contextQueries, queries...) + return q +} + +// completionSuggesterRequest is necessary because the order in which +// the JSON elements are routed to Elasticsearch is relevant. +// We got into trouble when using plain maps because the text element +// needs to go before the completion element. +type completionSuggesterRequest struct { + Text string `json:"text"` + Completion interface{} `json:"completion"` +} + +// Creates the source for the completion suggester. +func (q *CompletionSuggester) Source(includeName bool) (interface{}, error) { + cs := &completionSuggesterRequest{} + + if q.text != "" { + cs.Text = q.text + } + + suggester := make(map[string]interface{}) + cs.Completion = suggester + + if q.analyzer != "" { + suggester["analyzer"] = q.analyzer + } + if q.field != "" { + suggester["field"] = q.field + } + if q.size != nil { + suggester["size"] = *q.size + } + if q.shardSize != nil { + suggester["shard_size"] = *q.shardSize + } + switch len(q.contextQueries) { + case 0: + case 1: + src, err := q.contextQueries[0].Source() + if err != nil { + return nil, err + } + suggester["context"] = src + default: + ctxq := make([]interface{}, 0) + for _, query := range q.contextQueries { + src, err := query.Source() + if err != nil { + return nil, err + } + ctxq = append(ctxq, src) + } + suggester["context"] = ctxq + } + + // TODO(oe) Add completion-suggester specific parameters here + + if !includeName { + return cs, nil + } + + source := make(map[string]interface{}) + source[q.name] = cs + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion_fuzzy.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion_fuzzy.go new file mode 100644 index 000000000..1c4455a61 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion_fuzzy.go @@ -0,0 +1,179 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// FuzzyFuzzyCompletionSuggester is a FuzzyCompletionSuggester that allows fuzzy +// completion. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-completion.html +// for details, and +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-completion.html#fuzzy +// for details about the fuzzy completion suggester. +type FuzzyCompletionSuggester struct { + Suggester + name string + text string + field string + analyzer string + size *int + shardSize *int + contextQueries []SuggesterContextQuery + + fuzziness interface{} + fuzzyTranspositions *bool + fuzzyMinLength *int + fuzzyPrefixLength *int + unicodeAware *bool +} + +// Fuzziness defines the fuzziness which is used in FuzzyCompletionSuggester. +type Fuzziness struct { +} + +// Creates a new completion suggester. +func NewFuzzyCompletionSuggester(name string) *FuzzyCompletionSuggester { + return &FuzzyCompletionSuggester{ + name: name, + contextQueries: make([]SuggesterContextQuery, 0), + } +} + +func (q *FuzzyCompletionSuggester) Name() string { + return q.name +} + +func (q *FuzzyCompletionSuggester) Text(text string) *FuzzyCompletionSuggester { + q.text = text + return q +} + +func (q *FuzzyCompletionSuggester) Field(field string) *FuzzyCompletionSuggester { + q.field = field + return q +} + +func (q *FuzzyCompletionSuggester) Analyzer(analyzer string) *FuzzyCompletionSuggester { + q.analyzer = analyzer + return q +} + +func (q *FuzzyCompletionSuggester) Size(size int) *FuzzyCompletionSuggester { + q.size = &size + return q +} + +func (q *FuzzyCompletionSuggester) ShardSize(shardSize int) *FuzzyCompletionSuggester { + q.shardSize = &shardSize + return q +} + +func (q *FuzzyCompletionSuggester) ContextQuery(query SuggesterContextQuery) *FuzzyCompletionSuggester { + q.contextQueries = append(q.contextQueries, query) + return q +} + +func (q *FuzzyCompletionSuggester) ContextQueries(queries ...SuggesterContextQuery) *FuzzyCompletionSuggester { + q.contextQueries = append(q.contextQueries, queries...) + return q +} + +// Fuzziness defines the strategy used to describe what "fuzzy" actually +// means for the suggester, e.g. 1, 2, "0", "1..2", ">4", or "AUTO". +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/common-options.html#fuzziness +// for a detailed description. +func (q *FuzzyCompletionSuggester) Fuzziness(fuzziness interface{}) *FuzzyCompletionSuggester { + q.fuzziness = fuzziness + return q +} + +func (q *FuzzyCompletionSuggester) FuzzyTranspositions(fuzzyTranspositions bool) *FuzzyCompletionSuggester { + q.fuzzyTranspositions = &fuzzyTranspositions + return q +} + +func (q *FuzzyCompletionSuggester) FuzzyMinLength(minLength int) *FuzzyCompletionSuggester { + q.fuzzyMinLength = &minLength + return q +} + +func (q *FuzzyCompletionSuggester) FuzzyPrefixLength(prefixLength int) *FuzzyCompletionSuggester { + q.fuzzyPrefixLength = &prefixLength + return q +} + +func (q *FuzzyCompletionSuggester) UnicodeAware(unicodeAware bool) *FuzzyCompletionSuggester { + q.unicodeAware = &unicodeAware + return q +} + +// Creates the source for the completion suggester. +func (q *FuzzyCompletionSuggester) Source(includeName bool) (interface{}, error) { + cs := &completionSuggesterRequest{} + + if q.text != "" { + cs.Text = q.text + } + + suggester := make(map[string]interface{}) + cs.Completion = suggester + + if q.analyzer != "" { + suggester["analyzer"] = q.analyzer + } + if q.field != "" { + suggester["field"] = q.field + } + if q.size != nil { + suggester["size"] = *q.size + } + if q.shardSize != nil { + suggester["shard_size"] = *q.shardSize + } + switch len(q.contextQueries) { + case 0: + case 1: + src, err := q.contextQueries[0].Source() + if err != nil { + return nil, err + } + suggester["context"] = src + default: + ctxq := make([]interface{}, 0) + for _, query := range q.contextQueries { + src, err := query.Source() + if err != nil { + return nil, err + } + ctxq = append(ctxq, src) + } + suggester["context"] = ctxq + } + + // Fuzzy Completion Suggester fields + fuzzy := make(map[string]interface{}) + suggester["fuzzy"] = fuzzy + if q.fuzziness != nil { + fuzzy["fuzziness"] = q.fuzziness + } + if q.fuzzyTranspositions != nil { + fuzzy["transpositions"] = *q.fuzzyTranspositions + } + if q.fuzzyMinLength != nil { + fuzzy["min_length"] = *q.fuzzyMinLength + } + if q.fuzzyPrefixLength != nil { + fuzzy["prefix_length"] = *q.fuzzyPrefixLength + } + if q.unicodeAware != nil { + fuzzy["unicode_aware"] = *q.unicodeAware + } + + if !includeName { + return cs, nil + } + + source := make(map[string]interface{}) + source[q.name] = cs + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion_fuzzy_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion_fuzzy_test.go new file mode 100644 index 000000000..29fcba55f --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion_fuzzy_test.go @@ -0,0 +1,50 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestFuzzyCompletionSuggesterSource(t *testing.T) { + s := NewFuzzyCompletionSuggester("song-suggest"). + Text("n"). + Field("suggest"). + Fuzziness(2) + src, err := s.Source(true) + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"song-suggest":{"text":"n","completion":{"field":"suggest","fuzzy":{"fuzziness":2}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFuzzyCompletionSuggesterWithStringFuzzinessSource(t *testing.T) { + s := NewFuzzyCompletionSuggester("song-suggest"). + Text("n"). + Field("suggest"). + Fuzziness("1..4") + src, err := s.Source(true) + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"song-suggest":{"text":"n","completion":{"field":"suggest","fuzzy":{"fuzziness":"1..4"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion_test.go new file mode 100644 index 000000000..986d3da01 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion_test.go @@ -0,0 +1,29 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestCompletionSuggesterSource(t *testing.T) { + s := NewCompletionSuggester("song-suggest"). + Text("n"). + Field("suggest") + src, err := s.Source(true) + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"song-suggest":{"text":"n","completion":{"field":"suggest"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context.go new file mode 100644 index 000000000..0903f2171 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context.go @@ -0,0 +1,11 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SuggesterContextQuery is used to define context information within +// a suggestion request. +type SuggesterContextQuery interface { + Source() (interface{}, error) +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_category.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_category.go new file mode 100644 index 000000000..4b8e43f88 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_category.go @@ -0,0 +1,99 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// -- SuggesterCategoryMapping -- + +// SuggesterCategoryMapping provides a mapping for a category context in a suggester. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_category_mapping. +type SuggesterCategoryMapping struct { + name string + fieldName string + defaultValues []string +} + +// NewSuggesterCategoryMapping creates a new SuggesterCategoryMapping. +func NewSuggesterCategoryMapping(name string) *SuggesterCategoryMapping { + return &SuggesterCategoryMapping{ + name: name, + defaultValues: make([]string, 0), + } +} + +func (q *SuggesterCategoryMapping) DefaultValues(values ...string) *SuggesterCategoryMapping { + q.defaultValues = append(q.defaultValues, values...) + return q +} + +func (q *SuggesterCategoryMapping) FieldName(fieldName string) *SuggesterCategoryMapping { + q.fieldName = fieldName + return q +} + +// Source returns a map that will be used to serialize the context query as JSON. +func (q *SuggesterCategoryMapping) Source() (interface{}, error) { + source := make(map[string]interface{}) + + x := make(map[string]interface{}) + source[q.name] = x + + x["type"] = "category" + + switch len(q.defaultValues) { + case 0: + x["default"] = q.defaultValues + case 1: + x["default"] = q.defaultValues[0] + default: + x["default"] = q.defaultValues + } + + if q.fieldName != "" { + x["path"] = q.fieldName + } + return source, nil +} + +// -- SuggesterCategoryQuery -- + +// SuggesterCategoryQuery provides querying a category context in a suggester. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_category_query. +type SuggesterCategoryQuery struct { + name string + values []string +} + +// NewSuggesterCategoryQuery creates a new SuggesterCategoryQuery. +func NewSuggesterCategoryQuery(name string, values ...string) *SuggesterCategoryQuery { + q := &SuggesterCategoryQuery{ + name: name, + values: make([]string, 0), + } + if len(values) > 0 { + q.values = append(q.values, values...) + } + return q +} + +func (q *SuggesterCategoryQuery) Values(values ...string) *SuggesterCategoryQuery { + q.values = append(q.values, values...) + return q +} + +// Source returns a map that will be used to serialize the context query as JSON. +func (q *SuggesterCategoryQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + + switch len(q.values) { + case 0: + source[q.name] = q.values + case 1: + source[q.name] = q.values[0] + default: + source[q.name] = q.values + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_category_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_category_test.go new file mode 100644 index 000000000..7ca045801 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_category_test.go @@ -0,0 +1,97 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSuggesterCategoryMapping(t *testing.T) { + q := NewSuggesterCategoryMapping("color").DefaultValues("red") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"color":{"default":"red","type":"category"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSuggesterCategoryMappingWithTwoDefaultValues(t *testing.T) { + q := NewSuggesterCategoryMapping("color").DefaultValues("red", "orange") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"color":{"default":["red","orange"],"type":"category"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSuggesterCategoryMappingWithFieldName(t *testing.T) { + q := NewSuggesterCategoryMapping("color"). + DefaultValues("red", "orange"). + FieldName("color_field") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"color":{"default":["red","orange"],"path":"color_field","type":"category"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSuggesterCategoryQuery(t *testing.T) { + q := NewSuggesterCategoryQuery("color", "red") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"color":"red"}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSuggesterCategoryQueryWithTwoValues(t *testing.T) { + q := NewSuggesterCategoryQuery("color", "red", "yellow") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"color":["red","yellow"]}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_geo.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_geo.go new file mode 100644 index 000000000..bde1a4067 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_geo.go @@ -0,0 +1,132 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// -- SuggesterGeoMapping -- + +// SuggesterGeoMapping provides a mapping for a geolocation context in a suggester. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_geo_location_mapping. +type SuggesterGeoMapping struct { + name string + defaultLocations []*GeoPoint + precision []string + neighbors *bool + fieldName string +} + +// NewSuggesterGeoMapping creates a new SuggesterGeoMapping. +func NewSuggesterGeoMapping(name string) *SuggesterGeoMapping { + return &SuggesterGeoMapping{ + name: name, + defaultLocations: make([]*GeoPoint, 0), + precision: make([]string, 0), + } +} + +func (q *SuggesterGeoMapping) DefaultLocations(locations ...*GeoPoint) *SuggesterGeoMapping { + q.defaultLocations = append(q.defaultLocations, locations...) + return q +} + +func (q *SuggesterGeoMapping) Precision(precision ...string) *SuggesterGeoMapping { + q.precision = append(q.precision, precision...) + return q +} + +func (q *SuggesterGeoMapping) Neighbors(neighbors bool) *SuggesterGeoMapping { + q.neighbors = &neighbors + return q +} + +func (q *SuggesterGeoMapping) FieldName(fieldName string) *SuggesterGeoMapping { + q.fieldName = fieldName + return q +} + +// Source returns a map that will be used to serialize the context query as JSON. +func (q *SuggesterGeoMapping) Source() (interface{}, error) { + source := make(map[string]interface{}) + + x := make(map[string]interface{}) + source[q.name] = x + + x["type"] = "geo" + + if len(q.precision) > 0 { + x["precision"] = q.precision + } + if q.neighbors != nil { + x["neighbors"] = *q.neighbors + } + + switch len(q.defaultLocations) { + case 0: + case 1: + x["default"] = q.defaultLocations[0].Source() + default: + arr := make([]interface{}, 0) + for _, p := range q.defaultLocations { + arr = append(arr, p.Source()) + } + x["default"] = arr + } + + if q.fieldName != "" { + x["path"] = q.fieldName + } + return source, nil +} + +// -- SuggesterGeoQuery -- + +// SuggesterGeoQuery provides querying a geolocation context in a suggester. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_geo_location_query +type SuggesterGeoQuery struct { + name string + location *GeoPoint + precision []string +} + +// NewSuggesterGeoQuery creates a new SuggesterGeoQuery. +func NewSuggesterGeoQuery(name string, location *GeoPoint) *SuggesterGeoQuery { + return &SuggesterGeoQuery{ + name: name, + location: location, + precision: make([]string, 0), + } +} + +func (q *SuggesterGeoQuery) Precision(precision ...string) *SuggesterGeoQuery { + q.precision = append(q.precision, precision...) + return q +} + +// Source returns a map that will be used to serialize the context query as JSON. +func (q *SuggesterGeoQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + + if len(q.precision) == 0 { + if q.location != nil { + source[q.name] = q.location.Source() + } + } else { + x := make(map[string]interface{}) + source[q.name] = x + + if q.location != nil { + x["value"] = q.location.Source() + } + + switch len(q.precision) { + case 0: + case 1: + x["precision"] = q.precision[0] + default: + x["precision"] = q.precision + } + } + + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_geo_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_geo_test.go new file mode 100644 index 000000000..331276dab --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_geo_test.go @@ -0,0 +1,48 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSuggesterGeoMapping(t *testing.T) { + q := NewSuggesterGeoMapping("location"). + Precision("1km", "5m"). + Neighbors(true). + FieldName("pin"). + DefaultLocations(GeoPointFromLatLon(0.0, 0.0)) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"location":{"default":{"lat":0,"lon":0},"neighbors":true,"path":"pin","precision":["1km","5m"],"type":"geo"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSuggesterGeoQuery(t *testing.T) { + q := NewSuggesterGeoQuery("location", GeoPointFromLatLon(11.5, 62.71)).Precision("1km") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"location":{"precision":"1km","value":{"lat":11.5,"lon":62.71}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_phrase.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_phrase.go new file mode 100644 index 000000000..60c48d88b --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_phrase.go @@ -0,0 +1,554 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// For more details, see +// http://www.elasticsearch.org/guide/reference/api/search/phrase-suggest/ +type PhraseSuggester struct { + Suggester + name string + text string + field string + analyzer string + size *int + shardSize *int + contextQueries []SuggesterContextQuery + + // fields specific to a phrase suggester + maxErrors *float64 + separator *string + realWordErrorLikelihood *float64 + confidence *float64 + generators map[string][]CandidateGenerator + gramSize *int + smoothingModel SmoothingModel + forceUnigrams *bool + tokenLimit *int + preTag, postTag *string + collateQuery *string + collateFilter *string + collatePreference *string + collateParams map[string]interface{} + collatePrune *bool +} + +// Creates a new phrase suggester. +func NewPhraseSuggester(name string) *PhraseSuggester { + return &PhraseSuggester{ + name: name, + contextQueries: make([]SuggesterContextQuery, 0), + collateParams: make(map[string]interface{}), + } +} + +func (q *PhraseSuggester) Name() string { + return q.name +} + +func (q *PhraseSuggester) Text(text string) *PhraseSuggester { + q.text = text + return q +} + +func (q *PhraseSuggester) Field(field string) *PhraseSuggester { + q.field = field + return q +} + +func (q *PhraseSuggester) Analyzer(analyzer string) *PhraseSuggester { + q.analyzer = analyzer + return q +} + +func (q *PhraseSuggester) Size(size int) *PhraseSuggester { + q.size = &size + return q +} + +func (q *PhraseSuggester) ShardSize(shardSize int) *PhraseSuggester { + q.shardSize = &shardSize + return q +} + +func (q *PhraseSuggester) ContextQuery(query SuggesterContextQuery) *PhraseSuggester { + q.contextQueries = append(q.contextQueries, query) + return q +} + +func (q *PhraseSuggester) ContextQueries(queries ...SuggesterContextQuery) *PhraseSuggester { + q.contextQueries = append(q.contextQueries, queries...) + return q +} + +func (q *PhraseSuggester) GramSize(gramSize int) *PhraseSuggester { + if gramSize >= 1 { + q.gramSize = &gramSize + } + return q +} + +func (q *PhraseSuggester) MaxErrors(maxErrors float64) *PhraseSuggester { + q.maxErrors = &maxErrors + return q +} + +func (q *PhraseSuggester) Separator(separator string) *PhraseSuggester { + q.separator = &separator + return q +} + +func (q *PhraseSuggester) RealWordErrorLikelihood(realWordErrorLikelihood float64) *PhraseSuggester { + q.realWordErrorLikelihood = &realWordErrorLikelihood + return q +} + +func (q *PhraseSuggester) Confidence(confidence float64) *PhraseSuggester { + q.confidence = &confidence + return q +} + +func (q *PhraseSuggester) CandidateGenerator(generator CandidateGenerator) *PhraseSuggester { + if q.generators == nil { + q.generators = make(map[string][]CandidateGenerator) + } + typ := generator.Type() + if _, found := q.generators[typ]; !found { + q.generators[typ] = make([]CandidateGenerator, 0) + } + q.generators[typ] = append(q.generators[typ], generator) + return q +} + +func (q *PhraseSuggester) CandidateGenerators(generators ...CandidateGenerator) *PhraseSuggester { + for _, g := range generators { + q = q.CandidateGenerator(g) + } + return q +} + +func (q *PhraseSuggester) ClearCandidateGenerator() *PhraseSuggester { + q.generators = nil + return q +} + +func (q *PhraseSuggester) ForceUnigrams(forceUnigrams bool) *PhraseSuggester { + q.forceUnigrams = &forceUnigrams + return q +} + +func (q *PhraseSuggester) SmoothingModel(smoothingModel SmoothingModel) *PhraseSuggester { + q.smoothingModel = smoothingModel + return q +} + +func (q *PhraseSuggester) TokenLimit(tokenLimit int) *PhraseSuggester { + q.tokenLimit = &tokenLimit + return q +} + +func (q *PhraseSuggester) Highlight(preTag, postTag string) *PhraseSuggester { + q.preTag = &preTag + q.postTag = &postTag + return q +} + +func (q *PhraseSuggester) CollateQuery(collateQuery string) *PhraseSuggester { + q.collateQuery = &collateQuery + return q +} + +func (q *PhraseSuggester) CollateFilter(collateFilter string) *PhraseSuggester { + q.collateFilter = &collateFilter + return q +} + +func (q *PhraseSuggester) CollatePreference(collatePreference string) *PhraseSuggester { + q.collatePreference = &collatePreference + return q +} + +func (q *PhraseSuggester) CollateParams(collateParams map[string]interface{}) *PhraseSuggester { + q.collateParams = collateParams + return q +} + +func (q *PhraseSuggester) CollatePrune(collatePrune bool) *PhraseSuggester { + q.collatePrune = &collatePrune + return q +} + +// simplePhraseSuggesterRequest is necessary because the order in which +// the JSON elements are routed to Elasticsearch is relevant. +// We got into trouble when using plain maps because the text element +// needs to go before the simple_phrase element. +type phraseSuggesterRequest struct { + Text string `json:"text"` + Phrase interface{} `json:"phrase"` +} + +// Creates the source for the phrase suggester. +func (q *PhraseSuggester) Source(includeName bool) (interface{}, error) { + ps := &phraseSuggesterRequest{} + + if q.text != "" { + ps.Text = q.text + } + + suggester := make(map[string]interface{}) + ps.Phrase = suggester + + if q.analyzer != "" { + suggester["analyzer"] = q.analyzer + } + if q.field != "" { + suggester["field"] = q.field + } + if q.size != nil { + suggester["size"] = *q.size + } + if q.shardSize != nil { + suggester["shard_size"] = *q.shardSize + } + switch len(q.contextQueries) { + case 0: + case 1: + src, err := q.contextQueries[0].Source() + if err != nil { + return nil, err + } + suggester["context"] = src + default: + ctxq := make([]interface{}, 0) + for _, query := range q.contextQueries { + src, err := query.Source() + if err != nil { + return nil, err + } + ctxq = append(ctxq, src) + } + suggester["context"] = ctxq + } + + // Phase-specified parameters + if q.realWordErrorLikelihood != nil { + suggester["real_word_error_likelihood"] = *q.realWordErrorLikelihood + } + if q.confidence != nil { + suggester["confidence"] = *q.confidence + } + if q.separator != nil { + suggester["separator"] = *q.separator + } + if q.maxErrors != nil { + suggester["max_errors"] = *q.maxErrors + } + if q.gramSize != nil { + suggester["gram_size"] = *q.gramSize + } + if q.forceUnigrams != nil { + suggester["force_unigrams"] = *q.forceUnigrams + } + if q.tokenLimit != nil { + suggester["token_limit"] = *q.tokenLimit + } + if q.generators != nil && len(q.generators) > 0 { + for typ, generators := range q.generators { + arr := make([]interface{}, 0) + for _, g := range generators { + src, err := g.Source() + if err != nil { + return nil, err + } + arr = append(arr, src) + } + suggester[typ] = arr + } + } + if q.smoothingModel != nil { + src, err := q.smoothingModel.Source() + if err != nil { + return nil, err + } + x := make(map[string]interface{}) + x[q.smoothingModel.Type()] = src + suggester["smoothing"] = x + } + if q.preTag != nil { + hl := make(map[string]string) + hl["pre_tag"] = *q.preTag + if q.postTag != nil { + hl["post_tag"] = *q.postTag + } + suggester["highlight"] = hl + } + if q.collateQuery != nil || q.collateFilter != nil { + collate := make(map[string]interface{}) + suggester["collate"] = collate + if q.collateQuery != nil { + collate["query"] = *q.collateQuery + } + if q.collateFilter != nil { + collate["filter"] = *q.collateFilter + } + if q.collatePreference != nil { + collate["preference"] = *q.collatePreference + } + if len(q.collateParams) > 0 { + collate["params"] = q.collateParams + } + if q.collatePrune != nil { + collate["prune"] = *q.collatePrune + } + } + + if !includeName { + return ps, nil + } + + source := make(map[string]interface{}) + source[q.name] = ps + return source, nil +} + +// -- Smoothing models -- + +type SmoothingModel interface { + Type() string + Source() (interface{}, error) +} + +// StupidBackoffSmoothingModel implements a stupid backoff smoothing model. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models +// for details about smoothing models. +type StupidBackoffSmoothingModel struct { + discount float64 +} + +func NewStupidBackoffSmoothingModel(discount float64) *StupidBackoffSmoothingModel { + return &StupidBackoffSmoothingModel{ + discount: discount, + } +} + +func (sm *StupidBackoffSmoothingModel) Type() string { + return "stupid_backoff" +} + +func (sm *StupidBackoffSmoothingModel) Source() (interface{}, error) { + source := make(map[string]interface{}) + source["discount"] = sm.discount + return source, nil +} + +// -- + +// LaplaceSmoothingModel implements a laplace smoothing model. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models +// for details about smoothing models. +type LaplaceSmoothingModel struct { + alpha float64 +} + +func NewLaplaceSmoothingModel(alpha float64) *LaplaceSmoothingModel { + return &LaplaceSmoothingModel{ + alpha: alpha, + } +} + +func (sm *LaplaceSmoothingModel) Type() string { + return "laplace" +} + +func (sm *LaplaceSmoothingModel) Source() (interface{}, error) { + source := make(map[string]interface{}) + source["alpha"] = sm.alpha + return source, nil +} + +// -- + +// LinearInterpolationSmoothingModel implements a linear interpolation +// smoothing model. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models +// for details about smoothing models. +type LinearInterpolationSmoothingModel struct { + trigramLamda float64 + bigramLambda float64 + unigramLambda float64 +} + +func NewLinearInterpolationSmoothingModel(trigramLamda, bigramLambda, unigramLambda float64) *LinearInterpolationSmoothingModel { + return &LinearInterpolationSmoothingModel{ + trigramLamda: trigramLamda, + bigramLambda: bigramLambda, + unigramLambda: unigramLambda, + } +} + +func (sm *LinearInterpolationSmoothingModel) Type() string { + return "linear_interpolation" +} + +func (sm *LinearInterpolationSmoothingModel) Source() (interface{}, error) { + source := make(map[string]interface{}) + source["trigram_lambda"] = sm.trigramLamda + source["bigram_lambda"] = sm.bigramLambda + source["unigram_lambda"] = sm.unigramLambda + return source, nil +} + +// -- CandidateGenerator -- + +type CandidateGenerator interface { + Type() string + Source() (interface{}, error) +} + +// DirectCandidateGenerator implements a direct candidate generator. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models +// for details about smoothing models. +type DirectCandidateGenerator struct { + field string + preFilter *string + postFilter *string + suggestMode *string + accuracy *float64 + size *int + sort *string + stringDistance *string + maxEdits *int + maxInspections *int + maxTermFreq *float64 + prefixLength *int + minWordLength *int + minDocFreq *float64 +} + +func NewDirectCandidateGenerator(field string) *DirectCandidateGenerator { + return &DirectCandidateGenerator{ + field: field, + } +} + +func (g *DirectCandidateGenerator) Type() string { + return "direct_generator" +} + +func (g *DirectCandidateGenerator) Field(field string) *DirectCandidateGenerator { + g.field = field + return g +} + +func (g *DirectCandidateGenerator) PreFilter(preFilter string) *DirectCandidateGenerator { + g.preFilter = &preFilter + return g +} + +func (g *DirectCandidateGenerator) PostFilter(postFilter string) *DirectCandidateGenerator { + g.postFilter = &postFilter + return g +} + +func (g *DirectCandidateGenerator) SuggestMode(suggestMode string) *DirectCandidateGenerator { + g.suggestMode = &suggestMode + return g +} + +func (g *DirectCandidateGenerator) Accuracy(accuracy float64) *DirectCandidateGenerator { + g.accuracy = &accuracy + return g +} + +func (g *DirectCandidateGenerator) Size(size int) *DirectCandidateGenerator { + g.size = &size + return g +} + +func (g *DirectCandidateGenerator) Sort(sort string) *DirectCandidateGenerator { + g.sort = &sort + return g +} + +func (g *DirectCandidateGenerator) StringDistance(stringDistance string) *DirectCandidateGenerator { + g.stringDistance = &stringDistance + return g +} + +func (g *DirectCandidateGenerator) MaxEdits(maxEdits int) *DirectCandidateGenerator { + g.maxEdits = &maxEdits + return g +} + +func (g *DirectCandidateGenerator) MaxInspections(maxInspections int) *DirectCandidateGenerator { + g.maxInspections = &maxInspections + return g +} + +func (g *DirectCandidateGenerator) MaxTermFreq(maxTermFreq float64) *DirectCandidateGenerator { + g.maxTermFreq = &maxTermFreq + return g +} + +func (g *DirectCandidateGenerator) PrefixLength(prefixLength int) *DirectCandidateGenerator { + g.prefixLength = &prefixLength + return g +} + +func (g *DirectCandidateGenerator) MinWordLength(minWordLength int) *DirectCandidateGenerator { + g.minWordLength = &minWordLength + return g +} + +func (g *DirectCandidateGenerator) MinDocFreq(minDocFreq float64) *DirectCandidateGenerator { + g.minDocFreq = &minDocFreq + return g +} + +func (g *DirectCandidateGenerator) Source() (interface{}, error) { + source := make(map[string]interface{}) + if g.field != "" { + source["field"] = g.field + } + if g.suggestMode != nil { + source["suggest_mode"] = *g.suggestMode + } + if g.accuracy != nil { + source["accuracy"] = *g.accuracy + } + if g.size != nil { + source["size"] = *g.size + } + if g.sort != nil { + source["sort"] = *g.sort + } + if g.stringDistance != nil { + source["string_distance"] = *g.stringDistance + } + if g.maxEdits != nil { + source["max_edits"] = *g.maxEdits + } + if g.maxInspections != nil { + source["max_inspections"] = *g.maxInspections + } + if g.maxTermFreq != nil { + source["max_term_freq"] = *g.maxTermFreq + } + if g.prefixLength != nil { + source["prefix_length"] = *g.prefixLength + } + if g.minWordLength != nil { + source["min_word_length"] = *g.minWordLength + } + if g.minDocFreq != nil { + source["min_doc_freq"] = *g.minDocFreq + } + if g.preFilter != nil { + source["pre_filter"] = *g.preFilter + } + if g.postFilter != nil { + source["post_filter"] = *g.postFilter + } + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_phrase_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_phrase_test.go new file mode 100644 index 000000000..1eb46ce44 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_phrase_test.go @@ -0,0 +1,169 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestPhraseSuggesterSource(t *testing.T) { + s := NewPhraseSuggester("name"). + Text("Xor the Got-Jewel"). + Analyzer("body"). + Field("bigram"). + Size(1). + RealWordErrorLikelihood(0.95). + MaxErrors(0.5). + GramSize(2). + Highlight("", "") + src, err := s.Source(true) + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"name":{"text":"Xor the Got-Jewel","phrase":{"analyzer":"body","field":"bigram","gram_size":2,"highlight":{"post_tag":"\u003c/em\u003e","pre_tag":"\u003cem\u003e"},"max_errors":0.5,"real_word_error_likelihood":0.95,"size":1}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPhraseSuggesterSourceWithContextQuery(t *testing.T) { + geomapQ := NewSuggesterGeoMapping("location"). + Precision("1km", "5m"). + Neighbors(true). + FieldName("pin"). + DefaultLocations(GeoPointFromLatLon(0.0, 0.0)) + + s := NewPhraseSuggester("name"). + Text("Xor the Got-Jewel"). + Analyzer("body"). + Field("bigram"). + Size(1). + RealWordErrorLikelihood(0.95). + MaxErrors(0.5). + GramSize(2). + Highlight("", ""). + ContextQuery(geomapQ) + src, err := s.Source(true) + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"name":{"text":"Xor the Got-Jewel","phrase":{"analyzer":"body","context":{"location":{"default":{"lat":0,"lon":0},"neighbors":true,"path":"pin","precision":["1km","5m"],"type":"geo"}},"field":"bigram","gram_size":2,"highlight":{"post_tag":"\u003c/em\u003e","pre_tag":"\u003cem\u003e"},"max_errors":0.5,"real_word_error_likelihood":0.95,"size":1}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPhraseSuggesterComplexSource(t *testing.T) { + g1 := NewDirectCandidateGenerator("body"). + SuggestMode("always"). + MinWordLength(1) + + g2 := NewDirectCandidateGenerator("reverse"). + SuggestMode("always"). + MinWordLength(1). + PreFilter("reverse"). + PostFilter("reverse") + + s := NewPhraseSuggester("simple_phrase"). + Text("Xor the Got-Jewel"). + Analyzer("body"). + Field("bigram"). + Size(4). + RealWordErrorLikelihood(0.95). + Confidence(2.0). + GramSize(2). + CandidateGenerators(g1, g2). + CollateQuery(`"match":{"{{field_name}}" : "{{suggestion}}"}`). + CollateParams(map[string]interface{}{"field_name": "title"}). + CollatePreference("_primary"). + CollatePrune(true) + src, err := s.Source(true) + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"simple_phrase":{"text":"Xor the Got-Jewel","phrase":{"analyzer":"body","collate":{"params":{"field_name":"title"},"preference":"_primary","prune":true,"query":"\"match\":{\"{{field_name}}\" : \"{{suggestion}}\"}"},"confidence":2,"direct_generator":[{"field":"body","min_word_length":1,"suggest_mode":"always"},{"field":"reverse","min_word_length":1,"post_filter":"reverse","pre_filter":"reverse","suggest_mode":"always"}],"field":"bigram","gram_size":2,"real_word_error_likelihood":0.95,"size":4}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPhraseStupidBackoffSmoothingModel(t *testing.T) { + s := NewStupidBackoffSmoothingModel(0.42) + src, err := s.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + // The source does NOT include the smoothing model type! + expected := `{"discount":0.42}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } + if s.Type() != "stupid_backoff" { + t.Errorf("expected %q, got: %q", "stupid_backoff", s.Type()) + } +} + +func TestPhraseLaplaceSmoothingModel(t *testing.T) { + s := NewLaplaceSmoothingModel(0.63) + src, err := s.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + // The source does NOT include the smoothing model type! + expected := `{"alpha":0.63}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } + if s.Type() != "laplace" { + t.Errorf("expected %q, got: %q", "laplace", s.Type()) + } +} + +func TestLinearInterpolationSmoothingModel(t *testing.T) { + s := NewLinearInterpolationSmoothingModel(0.3, 0.2, 0.05) + src, err := s.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + // The source does NOT include the smoothing model type! + expected := `{"bigram_lambda":0.2,"trigram_lambda":0.3,"unigram_lambda":0.05}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } + if s.Type() != "linear_interpolation" { + t.Errorf("expected %q, got: %q", "linear_interpolation", s.Type()) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_term.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_term.go new file mode 100644 index 000000000..116af405a --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_term.go @@ -0,0 +1,233 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// For more details, see +// http://www.elasticsearch.org/guide/reference/api/search/term-suggest/ +type TermSuggester struct { + Suggester + name string + text string + field string + analyzer string + size *int + shardSize *int + contextQueries []SuggesterContextQuery + + // fields specific to term suggester + suggestMode string + accuracy *float64 + sort string + stringDistance string + maxEdits *int + maxInspections *int + maxTermFreq *float64 + prefixLength *int + minWordLength *int + minDocFreq *float64 +} + +// Creates a new term suggester. +func NewTermSuggester(name string) *TermSuggester { + return &TermSuggester{ + name: name, + contextQueries: make([]SuggesterContextQuery, 0), + } +} + +func (q *TermSuggester) Name() string { + return q.name +} + +func (q *TermSuggester) Text(text string) *TermSuggester { + q.text = text + return q +} + +func (q *TermSuggester) Field(field string) *TermSuggester { + q.field = field + return q +} + +func (q *TermSuggester) Analyzer(analyzer string) *TermSuggester { + q.analyzer = analyzer + return q +} + +func (q *TermSuggester) Size(size int) *TermSuggester { + q.size = &size + return q +} + +func (q *TermSuggester) ShardSize(shardSize int) *TermSuggester { + q.shardSize = &shardSize + return q +} + +func (q *TermSuggester) ContextQuery(query SuggesterContextQuery) *TermSuggester { + q.contextQueries = append(q.contextQueries, query) + return q +} + +func (q *TermSuggester) ContextQueries(queries ...SuggesterContextQuery) *TermSuggester { + q.contextQueries = append(q.contextQueries, queries...) + return q +} + +func (q *TermSuggester) SuggestMode(suggestMode string) *TermSuggester { + q.suggestMode = suggestMode + return q +} + +func (q *TermSuggester) Accuracy(accuracy float64) *TermSuggester { + q.accuracy = &accuracy + return q +} + +func (q *TermSuggester) Sort(sort string) *TermSuggester { + q.sort = sort + return q +} + +func (q *TermSuggester) StringDistance(stringDistance string) *TermSuggester { + q.stringDistance = stringDistance + return q +} + +func (q *TermSuggester) MaxEdits(maxEdits int) *TermSuggester { + q.maxEdits = &maxEdits + return q +} + +func (q *TermSuggester) MaxInspections(maxInspections int) *TermSuggester { + q.maxInspections = &maxInspections + return q +} + +func (q *TermSuggester) MaxTermFreq(maxTermFreq float64) *TermSuggester { + q.maxTermFreq = &maxTermFreq + return q +} + +func (q *TermSuggester) PrefixLength(prefixLength int) *TermSuggester { + q.prefixLength = &prefixLength + return q +} + +func (q *TermSuggester) MinWordLength(minWordLength int) *TermSuggester { + q.minWordLength = &minWordLength + return q +} + +func (q *TermSuggester) MinDocFreq(minDocFreq float64) *TermSuggester { + q.minDocFreq = &minDocFreq + return q +} + +// termSuggesterRequest is necessary because the order in which +// the JSON elements are routed to Elasticsearch is relevant. +// We got into trouble when using plain maps because the text element +// needs to go before the term element. +type termSuggesterRequest struct { + Text string `json:"text"` + Term interface{} `json:"term"` +} + +// Creates the source for the term suggester. +func (q *TermSuggester) Source(includeName bool) (interface{}, error) { + // "suggest" : { + // "my-suggest-1" : { + // "text" : "the amsterdma meetpu", + // "term" : { + // "field" : "body" + // } + // }, + // "my-suggest-2" : { + // "text" : "the rottredam meetpu", + // "term" : { + // "field" : "title", + // } + // } + // } + ts := &termSuggesterRequest{} + if q.text != "" { + ts.Text = q.text + } + + suggester := make(map[string]interface{}) + ts.Term = suggester + + if q.analyzer != "" { + suggester["analyzer"] = q.analyzer + } + if q.field != "" { + suggester["field"] = q.field + } + if q.size != nil { + suggester["size"] = *q.size + } + if q.shardSize != nil { + suggester["shard_size"] = *q.shardSize + } + switch len(q.contextQueries) { + case 0: + case 1: + src, err := q.contextQueries[0].Source() + if err != nil { + return nil, err + } + suggester["context"] = src + default: + ctxq := make([]interface{}, 0) + for _, query := range q.contextQueries { + src, err := query.Source() + if err != nil { + return nil, err + } + ctxq = append(ctxq, src) + } + suggester["context"] = ctxq + } + + // Specific to term suggester + if q.suggestMode != "" { + suggester["suggest_mode"] = q.suggestMode + } + if q.accuracy != nil { + suggester["accuracy"] = *q.accuracy + } + if q.sort != "" { + suggester["sort"] = q.sort + } + if q.stringDistance != "" { + suggester["string_distance"] = q.stringDistance + } + if q.maxEdits != nil { + suggester["max_edits"] = *q.maxEdits + } + if q.maxInspections != nil { + suggester["max_inspections"] = *q.maxInspections + } + if q.maxTermFreq != nil { + suggester["max_term_freq"] = *q.maxTermFreq + } + if q.prefixLength != nil { + suggester["prefix_len"] = *q.prefixLength + } + if q.minWordLength != nil { + suggester["min_word_len"] = *q.minWordLength + } + if q.minDocFreq != nil { + suggester["min_doc_freq"] = *q.minDocFreq + } + + if !includeName { + return ts, nil + } + + source := make(map[string]interface{}) + source[q.name] = ts + return source, nil +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_term_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_term_test.go new file mode 100644 index 000000000..869049890 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_term_test.go @@ -0,0 +1,29 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestTermSuggesterSource(t *testing.T) { + s := NewTermSuggester("name"). + Text("n"). + Field("suggest") + src, err := s.Source(true) + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"name":{"text":"n","term":{"field":"suggest"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/termvectors.go b/services/templeton/vendor/src/github.com/olivere/elastic/termvectors.go new file mode 100644 index 000000000..355108200 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/termvectors.go @@ -0,0 +1,458 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// TermvectorsService returns information and statistics on terms in the +// fields of a particular document. The document could be stored in the +// index or artificially provided by the user. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/2.1/docs-termvectors.html +// for documentation. +type TermvectorsService struct { + client *Client + pretty bool + id string + index string + typ string + dfs *bool + doc interface{} + fieldStatistics *bool + fields []string + filter *TermvectorsFilterSettings + perFieldAnalyzer map[string]string + offsets *bool + parent string + payloads *bool + positions *bool + preference string + realtime *bool + routing string + termStatistics *bool + version interface{} + versionType string + bodyJson interface{} + bodyString string +} + +// NewTermvectorsService creates a new TermvectorsService. +func NewTermvectorsService(client *Client) *TermvectorsService { + return &TermvectorsService{ + client: client, + } +} + +// Index in which the document resides. +func (s *TermvectorsService) Index(index string) *TermvectorsService { + s.index = index + return s +} + +// Type of the document. +func (s *TermvectorsService) Type(typ string) *TermvectorsService { + s.typ = typ + return s +} + +// Id of the document. +func (s *TermvectorsService) Id(id string) *TermvectorsService { + s.id = id + return s +} + +// Dfs specifies if distributed frequencies should be returned instead +// shard frequencies. +func (s *TermvectorsService) Dfs(dfs bool) *TermvectorsService { + s.dfs = &dfs + return s +} + +// Doc is the document to analyze. +func (s *TermvectorsService) Doc(doc interface{}) *TermvectorsService { + s.doc = doc + return s +} + +// FieldStatistics specifies if document count, sum of document frequencies +// and sum of total term frequencies should be returned. +func (s *TermvectorsService) FieldStatistics(fieldStatistics bool) *TermvectorsService { + s.fieldStatistics = &fieldStatistics + return s +} + +// Fields a list of fields to return. +func (s *TermvectorsService) Fields(fields ...string) *TermvectorsService { + if s.fields == nil { + s.fields = make([]string, 0) + } + s.fields = append(s.fields, fields...) + return s +} + +// Filter adds terms filter settings. +func (s *TermvectorsService) Filter(filter *TermvectorsFilterSettings) *TermvectorsService { + s.filter = filter + return s +} + +// PerFieldAnalyzer allows to specify a different analyzer than the one +// at the field. +func (s *TermvectorsService) PerFieldAnalyzer(perFieldAnalyzer map[string]string) *TermvectorsService { + s.perFieldAnalyzer = perFieldAnalyzer + return s +} + +// Offsets specifies if term offsets should be returned. +func (s *TermvectorsService) Offsets(offsets bool) *TermvectorsService { + s.offsets = &offsets + return s +} + +// Parent id of documents. +func (s *TermvectorsService) Parent(parent string) *TermvectorsService { + s.parent = parent + return s +} + +// Payloads specifies if term payloads should be returned. +func (s *TermvectorsService) Payloads(payloads bool) *TermvectorsService { + s.payloads = &payloads + return s +} + +// Positions specifies if term positions should be returned. +func (s *TermvectorsService) Positions(positions bool) *TermvectorsService { + s.positions = &positions + return s +} + +// Preference specify the node or shard the operation +// should be performed on (default: random). +func (s *TermvectorsService) Preference(preference string) *TermvectorsService { + s.preference = preference + return s +} + +// Realtime specifies if request is real-time as opposed to +// near-real-time (default: true). +func (s *TermvectorsService) Realtime(realtime bool) *TermvectorsService { + s.realtime = &realtime + return s +} + +// Routing is a specific routing value. +func (s *TermvectorsService) Routing(routing string) *TermvectorsService { + s.routing = routing + return s +} + +// TermStatistics specifies if total term frequency and document frequency +// should be returned. +func (s *TermvectorsService) TermStatistics(termStatistics bool) *TermvectorsService { + s.termStatistics = &termStatistics + return s +} + +// Version an explicit version number for concurrency control. +func (s *TermvectorsService) Version(version interface{}) *TermvectorsService { + s.version = version + return s +} + +// VersionType specifies a version type ("internal", "external", "external_gte", or "force"). +func (s *TermvectorsService) VersionType(versionType string) *TermvectorsService { + s.versionType = versionType + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *TermvectorsService) Pretty(pretty bool) *TermvectorsService { + s.pretty = pretty + return s +} + +// BodyJson defines the body parameters. See documentation. +func (s *TermvectorsService) BodyJson(body interface{}) *TermvectorsService { + s.bodyJson = body + return s +} + +// BodyString defines the body parameters as a string. See documentation. +func (s *TermvectorsService) BodyString(body string) *TermvectorsService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *TermvectorsService) buildURL() (string, url.Values, error) { + var pathParam = map[string]string{ + "index": s.index, + "type": s.typ, + } + var path string + var err error + + // Build URL + if s.id != "" { + pathParam["id"] = s.id + path, err = uritemplates.Expand("/{index}/{type}/{id}/_termvectors", pathParam) + } else { + path, err = uritemplates.Expand("/{index}/{type}/_termvectors", pathParam) + } + + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.dfs != nil { + params.Set("dfs", fmt.Sprintf("%v", *s.dfs)) + } + if s.fieldStatistics != nil { + params.Set("field_statistics", fmt.Sprintf("%v", *s.fieldStatistics)) + } + if len(s.fields) > 0 { + params.Set("fields", strings.Join(s.fields, ",")) + } + if s.offsets != nil { + params.Set("offsets", fmt.Sprintf("%v", *s.offsets)) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.payloads != nil { + params.Set("payloads", fmt.Sprintf("%v", *s.payloads)) + } + if s.positions != nil { + params.Set("positions", fmt.Sprintf("%v", *s.positions)) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.realtime != nil { + params.Set("realtime", fmt.Sprintf("%v", *s.realtime)) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.termStatistics != nil { + params.Set("term_statistics", fmt.Sprintf("%v", *s.termStatistics)) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *TermvectorsService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *TermvectorsService) Do() (*TermvectorsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else if s.bodyString != "" { + body = s.bodyString + } else { + data := make(map[string]interface{}) + if s.doc != nil { + data["doc"] = s.doc + } + if len(s.perFieldAnalyzer) > 0 { + data["per_field_analyzer"] = s.perFieldAnalyzer + } + if s.filter != nil { + src, err := s.filter.Source() + if err != nil { + return nil, err + } + data["filter"] = src + } + if len(data) > 0 { + body = data + } + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(TermvectorsResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Filter settings -- + +// TermvectorsFilterSettings adds additional filters to a Termsvector request. +// It allows to filter terms based on their tf-idf scores. +// See https://www.elastic.co/guide/en/elasticsearch/reference/2.1/docs-termvectors.html#_terms_filtering +// for more information. +type TermvectorsFilterSettings struct { + maxNumTerms *int64 + minTermFreq *int64 + maxTermFreq *int64 + minDocFreq *int64 + maxDocFreq *int64 + minWordLength *int64 + maxWordLength *int64 +} + +// NewTermvectorsFilterSettings creates and initializes a new TermvectorsFilterSettings struct. +func NewTermvectorsFilterSettings() *TermvectorsFilterSettings { + return &TermvectorsFilterSettings{} +} + +// MaxNumTerms specifies the maximum number of terms the must be returned per field. +func (fs *TermvectorsFilterSettings) MaxNumTerms(value int64) *TermvectorsFilterSettings { + fs.maxNumTerms = &value + return fs +} + +// MinTermFreq ignores words with less than this frequency in the source doc. +func (fs *TermvectorsFilterSettings) MinTermFreq(value int64) *TermvectorsFilterSettings { + fs.minTermFreq = &value + return fs +} + +// MaxTermFreq ignores words with more than this frequency in the source doc. +func (fs *TermvectorsFilterSettings) MaxTermFreq(value int64) *TermvectorsFilterSettings { + fs.maxTermFreq = &value + return fs +} + +// MinDocFreq ignores terms which do not occur in at least this many docs. +func (fs *TermvectorsFilterSettings) MinDocFreq(value int64) *TermvectorsFilterSettings { + fs.minDocFreq = &value + return fs +} + +// MaxDocFreq ignores terms which occur in more than this many docs. +func (fs *TermvectorsFilterSettings) MaxDocFreq(value int64) *TermvectorsFilterSettings { + fs.maxDocFreq = &value + return fs +} + +// MinWordLength specifies the minimum word length below which words will be ignored. +func (fs *TermvectorsFilterSettings) MinWordLength(value int64) *TermvectorsFilterSettings { + fs.minWordLength = &value + return fs +} + +// MaxWordLength specifies the maximum word length above which words will be ignored. +func (fs *TermvectorsFilterSettings) MaxWordLength(value int64) *TermvectorsFilterSettings { + fs.maxWordLength = &value + return fs +} + +// Source returns JSON for the query. +func (fs *TermvectorsFilterSettings) Source() (interface{}, error) { + source := make(map[string]interface{}) + if fs.maxNumTerms != nil { + source["max_num_terms"] = *fs.maxNumTerms + } + if fs.minTermFreq != nil { + source["min_term_freq"] = *fs.minTermFreq + } + if fs.maxTermFreq != nil { + source["max_term_freq"] = *fs.maxTermFreq + } + if fs.minDocFreq != nil { + source["min_doc_freq"] = *fs.minDocFreq + } + if fs.maxDocFreq != nil { + source["max_doc_freq"] = *fs.maxDocFreq + } + if fs.minWordLength != nil { + source["min_word_length"] = *fs.minWordLength + } + if fs.maxWordLength != nil { + source["max_word_length"] = *fs.maxWordLength + } + return source, nil +} + +// -- Response types -- + +type TokenInfo struct { + StartOffset int64 `json:"start_offset"` + EndOffset int64 `json:"end_offset"` + Position int64 `json:"position"` + Payload string `json:"payload"` +} + +type TermsInfo struct { + DocFreq int64 `json:"doc_freq"` + TermFreq int64 `json:"term_freq"` + Ttf int64 `json:"ttf"` + Tokens []TokenInfo `json:"tokens"` +} + +type FieldStatistics struct { + DocCount int64 `json:"doc_count"` + SumDocFreq int64 `json:"sum_doc_freq"` + SumTtf int64 `json:"sum_ttf"` +} + +type TermVectorsFieldInfo struct { + FieldStatistics FieldStatistics `json:"field_statistics"` + Terms map[string]TermsInfo `json:"terms"` +} + +// TermvectorsResponse is the response of TermvectorsService.Do. +type TermvectorsResponse struct { + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id,omitempty"` + Version int `json:"_version"` + Found bool `json:"found"` + Took int64 `json:"took"` + TermVectors map[string]TermVectorsFieldInfo `json:"term_vectors"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/termvectors_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/termvectors_test.go new file mode 100644 index 000000000..e487a24a4 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/termvectors_test.go @@ -0,0 +1,165 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + "time" +) + +func TestTermVectorsBuildURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Index string + Type string + Id string + Expected string + }{ + { + "twitter", + "tweet", + "", + "/twitter/tweet/_termvectors", + }, + { + "twitter", + "tweet", + "1", + "/twitter/tweet/1/_termvectors", + }, + } + + for _, test := range tests { + builder := client.TermVectors(test.Index, test.Type) + if test.Id != "" { + builder = builder.Id(test.Id) + } + path, _, err := builder.buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} + +func TestTermVectorsWithId(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + + // Add a document + indexResult, err := client.Index(). + Index(testIndexName). + Type("tweet"). + Id("1"). + BodyJson(&tweet1). + Refresh(true). + Do() + if err != nil { + t.Fatal(err) + } + if indexResult == nil { + t.Errorf("expected result to be != nil; got: %v", indexResult) + } + + // TermVectors by specifying ID + field := "Message" + result, err := client.TermVectors(testIndexName, "tweet"). + Id("1"). + Fields(field). + FieldStatistics(true). + TermStatistics(true). + Do() + if err != nil { + t.Fatal(err) + } + if result == nil { + t.Fatal("expected to return information and statistics") + } + if !result.Found { + t.Errorf("expected found to be %v; got: %v", true, result.Found) + } + if result.Took <= 0 { + t.Errorf("expected took in millis > 0; got: %v", result.Took) + } +} + +func TestTermVectorsWithDoc(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Travis lags sometimes + if isTravis() { + time.Sleep(2 * time.Second) + } + + // TermVectors by specifying Doc + var doc = map[string]interface{}{ + "fullname": "John Doe", + "text": "twitter test test test", + } + var perFieldAnalyzer = map[string]string{ + "fullname": "keyword", + } + + result, err := client.TermVectors(testIndexName, "tweet"). + Doc(doc). + PerFieldAnalyzer(perFieldAnalyzer). + FieldStatistics(true). + TermStatistics(true). + Do() + if err != nil { + t.Fatal(err) + } + if result == nil { + t.Fatal("expected to return information and statistics") + } + if !result.Found { + t.Errorf("expected found to be %v; got: %v", true, result.Found) + } + if result.Took <= 0 { + t.Errorf("expected took in millis > 0; got: %v", result.Took) + } +} + +func TestTermVectorsWithFilter(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Travis lags sometimes + if isTravis() { + time.Sleep(2 * time.Second) + } + + // TermVectors by specifying Doc + var doc = map[string]interface{}{ + "fullname": "John Doe", + "text": "twitter test test test", + } + var perFieldAnalyzer = map[string]string{ + "fullname": "keyword", + } + + result, err := client.TermVectors(testIndexName, "tweet"). + Doc(doc). + PerFieldAnalyzer(perFieldAnalyzer). + FieldStatistics(true). + TermStatistics(true). + Filter(NewTermvectorsFilterSettings().MinTermFreq(1)). + Do() + if err != nil { + t.Fatal(err) + } + if result == nil { + t.Fatal("expected to return information and statistics") + } + if !result.Found { + t.Errorf("expected found to be %v; got: %v", true, result.Found) + } + if result.Took <= 0 { + t.Errorf("expected took in millis > 0; got: %v", result.Took) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/update.go b/services/templeton/vendor/src/github.com/olivere/elastic/update.go new file mode 100644 index 000000000..a20149b1c --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/update.go @@ -0,0 +1,300 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// UpdateService updates a document in Elasticsearch. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-update.html +// for details. +type UpdateService struct { + client *Client + index string + typ string + id string + routing string + parent string + script *Script + fields []string + version *int64 + versionType string + retryOnConflict *int + refresh *bool + replicationType string + consistencyLevel string + upsert interface{} + scriptedUpsert *bool + docAsUpsert *bool + detectNoop *bool + doc interface{} + timeout string + pretty bool +} + +// NewUpdateService creates the service to update documents in Elasticsearch. +func NewUpdateService(client *Client) *UpdateService { + builder := &UpdateService{ + client: client, + fields: make([]string, 0), + } + return builder +} + +// Index is the name of the Elasticsearch index (required). +func (b *UpdateService) Index(name string) *UpdateService { + b.index = name + return b +} + +// Type is the type of the document (required). +func (b *UpdateService) Type(typ string) *UpdateService { + b.typ = typ + return b +} + +// Id is the identifier of the document to update (required). +func (b *UpdateService) Id(id string) *UpdateService { + b.id = id + return b +} + +// Routing specifies a specific routing value. +func (b *UpdateService) Routing(routing string) *UpdateService { + b.routing = routing + return b +} + +// Parent sets the id of the parent document. +func (b *UpdateService) Parent(parent string) *UpdateService { + b.parent = parent + return b +} + +// Script is the script definition. +func (b *UpdateService) Script(script *Script) *UpdateService { + b.script = script + return b +} + +// RetryOnConflict specifies how many times the operation should be retried +// when a conflict occurs (default: 0). +func (b *UpdateService) RetryOnConflict(retryOnConflict int) *UpdateService { + b.retryOnConflict = &retryOnConflict + return b +} + +// Fields is a list of fields to return in the response. +func (b *UpdateService) Fields(fields ...string) *UpdateService { + b.fields = make([]string, 0, len(fields)) + b.fields = append(b.fields, fields...) + return b +} + +// Version defines the explicit version number for concurrency control. +func (b *UpdateService) Version(version int64) *UpdateService { + b.version = &version + return b +} + +// VersionType is one of "internal" or "force". +func (b *UpdateService) VersionType(versionType string) *UpdateService { + b.versionType = versionType + return b +} + +// Refresh the index after performing the update. +func (b *UpdateService) Refresh(refresh bool) *UpdateService { + b.refresh = &refresh + return b +} + +// ReplicationType is one of "sync" or "async". +func (b *UpdateService) ReplicationType(replicationType string) *UpdateService { + b.replicationType = replicationType + return b +} + +// ConsistencyLevel is one of "one", "quorum", or "all". +// It sets the write consistency setting for the update operation. +func (b *UpdateService) ConsistencyLevel(consistencyLevel string) *UpdateService { + b.consistencyLevel = consistencyLevel + return b +} + +// Doc allows for updating a partial document. +func (b *UpdateService) Doc(doc interface{}) *UpdateService { + b.doc = doc + return b +} + +// Upsert can be used to index the document when it doesn't exist yet. +// Use this e.g. to initialize a document with a default value. +func (b *UpdateService) Upsert(doc interface{}) *UpdateService { + b.upsert = doc + return b +} + +// DocAsUpsert can be used to insert the document if it doesn't already exist. +func (b *UpdateService) DocAsUpsert(docAsUpsert bool) *UpdateService { + b.docAsUpsert = &docAsUpsert + return b +} + +// DetectNoop will instruct Elasticsearch to check if changes will occur +// when updating via Doc. It there aren't any changes, the request will +// turn into a no-op. +func (b *UpdateService) DetectNoop(detectNoop bool) *UpdateService { + b.detectNoop = &detectNoop + return b +} + +// ScriptedUpsert should be set to true if the referenced script +// (defined in Script or ScriptId) should be called to perform an insert. +// The default is false. +func (b *UpdateService) ScriptedUpsert(scriptedUpsert bool) *UpdateService { + b.scriptedUpsert = &scriptedUpsert + return b +} + +// Timeout is an explicit timeout for the operation, e.g. "1000", "1s" or "500ms". +func (b *UpdateService) Timeout(timeout string) *UpdateService { + b.timeout = timeout + return b +} + +// Pretty instructs to return human readable, prettified JSON. +func (b *UpdateService) Pretty(pretty bool) *UpdateService { + b.pretty = pretty + return b +} + +// url returns the URL part of the document request. +func (b *UpdateService) url() (string, url.Values, error) { + // Build url + path := "/{index}/{type}/{id}/_update" + path, err := uritemplates.Expand(path, map[string]string{ + "index": b.index, + "type": b.typ, + "id": b.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Parameters + params := make(url.Values) + if b.pretty { + params.Set("pretty", "true") + } + if b.routing != "" { + params.Set("routing", b.routing) + } + if b.parent != "" { + params.Set("parent", b.parent) + } + if b.timeout != "" { + params.Set("timeout", b.timeout) + } + if b.refresh != nil { + params.Set("refresh", fmt.Sprintf("%v", *b.refresh)) + } + if b.replicationType != "" { + params.Set("replication", b.replicationType) + } + if b.consistencyLevel != "" { + params.Set("consistency", b.consistencyLevel) + } + if len(b.fields) > 0 { + params.Set("fields", strings.Join(b.fields, ",")) + } + if b.version != nil { + params.Set("version", fmt.Sprintf("%d", *b.version)) + } + if b.versionType != "" { + params.Set("version_type", b.versionType) + } + if b.retryOnConflict != nil { + params.Set("retry_on_conflict", fmt.Sprintf("%v", *b.retryOnConflict)) + } + + return path, params, nil +} + +// body returns the body part of the document request. +func (b *UpdateService) body() (interface{}, error) { + source := make(map[string]interface{}) + + if b.script != nil { + src, err := b.script.Source() + if err != nil { + return nil, err + } + source["script"] = src + } + + if b.scriptedUpsert != nil { + source["scripted_upsert"] = *b.scriptedUpsert + } + + if b.upsert != nil { + source["upsert"] = b.upsert + } + + if b.doc != nil { + source["doc"] = b.doc + } + if b.docAsUpsert != nil { + source["doc_as_upsert"] = *b.docAsUpsert + } + if b.detectNoop != nil { + source["detect_noop"] = *b.detectNoop + } + + return source, nil +} + +// Do executes the update operation. +func (b *UpdateService) Do() (*UpdateResponse, error) { + path, params, err := b.url() + if err != nil { + return nil, err + } + + // Get body of the request + body, err := b.body() + if err != nil { + return nil, err + } + + // Get response + res, err := b.client.PerformRequest("POST", path, params, body) + if err != nil { + return nil, err + } + + // Return result + ret := new(UpdateResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// UpdateResponse is the result of updating a document in Elasticsearch. +type UpdateResponse struct { + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + Version int `json:"_version"` + Created bool `json:"created"` + GetResult *GetResult `json:"get"` +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/update_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/update_test.go new file mode 100644 index 000000000..57b26dc0e --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/update_test.go @@ -0,0 +1,312 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "net/url" + "testing" +) + +func TestUpdateViaScript(t *testing.T) { + client := setupTestClient(t) + update := client.Update(). + Index("test").Type("type1").Id("1"). + Script(NewScript("ctx._source.tags += tag").Params(map[string]interface{}{"tag": "blue"}).Lang("groovy")) + path, params, err := update.url() + if err != nil { + t.Fatalf("expected to return URL, got: %v", err) + } + expectedPath := `/test/type1/1/_update` + if expectedPath != path { + t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path) + } + expectedParams := url.Values{} + if expectedParams.Encode() != params.Encode() { + t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode()) + } + body, err := update.body() + if err != nil { + t.Fatalf("expected to return body, got: %v", err) + } + data, err := json.Marshal(body) + if err != nil { + t.Fatalf("expected to marshal body as JSON, got: %v", err) + } + got := string(data) + expected := `{"script":{"inline":"ctx._source.tags += tag","lang":"groovy","params":{"tag":"blue"}}}` + if got != expected { + t.Errorf("expected\n%s\ngot:\n%s", expected, got) + } +} + +func TestUpdateViaScriptId(t *testing.T) { + client := setupTestClient(t) + + scriptParams := map[string]interface{}{ + "pageViewEvent": map[string]interface{}{ + "url": "foo.com/bar", + "response": 404, + "time": "2014-01-01 12:32", + }, + } + script := NewScriptId("my_web_session_summariser").Params(scriptParams) + + update := client.Update(). + Index("sessions").Type("session").Id("dh3sgudg8gsrgl"). + Script(script). + ScriptedUpsert(true). + Upsert(map[string]interface{}{}) + path, params, err := update.url() + if err != nil { + t.Fatalf("expected to return URL, got: %v", err) + } + expectedPath := `/sessions/session/dh3sgudg8gsrgl/_update` + if expectedPath != path { + t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path) + } + expectedParams := url.Values{} + if expectedParams.Encode() != params.Encode() { + t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode()) + } + body, err := update.body() + if err != nil { + t.Fatalf("expected to return body, got: %v", err) + } + data, err := json.Marshal(body) + if err != nil { + t.Fatalf("expected to marshal body as JSON, got: %v", err) + } + got := string(data) + expected := `{"script":{"id":"my_web_session_summariser","params":{"pageViewEvent":{"response":404,"time":"2014-01-01 12:32","url":"foo.com/bar"}}},"scripted_upsert":true,"upsert":{}}` + if got != expected { + t.Errorf("expected\n%s\ngot:\n%s", expected, got) + } +} + +func TestUpdateViaScriptFile(t *testing.T) { + client := setupTestClient(t) + + scriptParams := map[string]interface{}{ + "pageViewEvent": map[string]interface{}{ + "url": "foo.com/bar", + "response": 404, + "time": "2014-01-01 12:32", + }, + } + script := NewScriptFile("update_script").Params(scriptParams) + + update := client.Update(). + Index("sessions").Type("session").Id("dh3sgudg8gsrgl"). + Script(script). + ScriptedUpsert(true). + Upsert(map[string]interface{}{}) + + path, params, err := update.url() + if err != nil { + t.Fatalf("expected to return URL, got: %v", err) + } + expectedPath := `/sessions/session/dh3sgudg8gsrgl/_update` + if expectedPath != path { + t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path) + } + expectedParams := url.Values{} + if expectedParams.Encode() != params.Encode() { + t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode()) + } + body, err := update.body() + if err != nil { + t.Fatalf("expected to return body, got: %v", err) + } + data, err := json.Marshal(body) + if err != nil { + t.Fatalf("expected to marshal body as JSON, got: %v", err) + } + got := string(data) + expected := `{"script":{"file":"update_script","params":{"pageViewEvent":{"response":404,"time":"2014-01-01 12:32","url":"foo.com/bar"}}},"scripted_upsert":true,"upsert":{}}` + if got != expected { + t.Errorf("expected\n%s\ngot:\n%s", expected, got) + } +} + +func TestUpdateViaScriptAndUpsert(t *testing.T) { + client := setupTestClient(t) + update := client.Update(). + Index("test").Type("type1").Id("1"). + Script(NewScript("ctx._source.counter += count").Params(map[string]interface{}{"count": 4})). + Upsert(map[string]interface{}{"counter": 1}) + path, params, err := update.url() + if err != nil { + t.Fatalf("expected to return URL, got: %v", err) + } + expectedPath := `/test/type1/1/_update` + if expectedPath != path { + t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path) + } + expectedParams := url.Values{} + if expectedParams.Encode() != params.Encode() { + t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode()) + } + body, err := update.body() + if err != nil { + t.Fatalf("expected to return body, got: %v", err) + } + data, err := json.Marshal(body) + if err != nil { + t.Fatalf("expected to marshal body as JSON, got: %v", err) + } + got := string(data) + expected := `{"script":{"inline":"ctx._source.counter += count","params":{"count":4}},"upsert":{"counter":1}}` + if got != expected { + t.Errorf("expected\n%s\ngot:\n%s", expected, got) + } +} + +func TestUpdateViaDoc(t *testing.T) { + client := setupTestClient(t) + update := client.Update(). + Index("test").Type("type1").Id("1"). + Doc(map[string]interface{}{"name": "new_name"}). + DetectNoop(true) + path, params, err := update.url() + if err != nil { + t.Fatalf("expected to return URL, got: %v", err) + } + expectedPath := `/test/type1/1/_update` + if expectedPath != path { + t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path) + } + expectedParams := url.Values{} + if expectedParams.Encode() != params.Encode() { + t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode()) + } + body, err := update.body() + if err != nil { + t.Fatalf("expected to return body, got: %v", err) + } + data, err := json.Marshal(body) + if err != nil { + t.Fatalf("expected to marshal body as JSON, got: %v", err) + } + got := string(data) + expected := `{"detect_noop":true,"doc":{"name":"new_name"}}` + if got != expected { + t.Errorf("expected\n%s\ngot:\n%s", expected, got) + } +} + +func TestUpdateViaDocAndUpsert(t *testing.T) { + client := setupTestClient(t) + update := client.Update(). + Index("test").Type("type1").Id("1"). + Doc(map[string]interface{}{"name": "new_name"}). + DocAsUpsert(true). + Timeout("1s"). + Refresh(true) + path, params, err := update.url() + if err != nil { + t.Fatalf("expected to return URL, got: %v", err) + } + expectedPath := `/test/type1/1/_update` + if expectedPath != path { + t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path) + } + expectedParams := url.Values{"refresh": []string{"true"}, "timeout": []string{"1s"}} + if expectedParams.Encode() != params.Encode() { + t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode()) + } + body, err := update.body() + if err != nil { + t.Fatalf("expected to return body, got: %v", err) + } + data, err := json.Marshal(body) + if err != nil { + t.Fatalf("expected to marshal body as JSON, got: %v", err) + } + got := string(data) + expected := `{"doc":{"name":"new_name"},"doc_as_upsert":true}` + if got != expected { + t.Errorf("expected\n%s\ngot:\n%s", expected, got) + } +} + +func TestUpdateViaScriptIntegration(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + if esversion >= "1.4.3" || (esversion < "1.4.0" && esversion >= "1.3.8") { + t.Skip("groovy scripting has been disabled as for [1.3.8,1.4.0) and 1.4.3+") + return + } + + tweet1 := tweet{User: "olivere", Retweets: 10, Message: "Welcome to Golang and Elasticsearch."} + + // Add a document + indexResult, err := client.Index(). + Index(testIndexName). + Type("tweet"). + Id("1"). + BodyJson(&tweet1). + Do() + if err != nil { + t.Fatal(err) + } + if indexResult == nil { + t.Errorf("expected result to be != nil; got: %v", indexResult) + } + + // Update number of retweets + increment := 1 + script := NewScript("ctx._source.retweets += num"). + Params(map[string]interface{}{"num": increment}). + Lang("groovy") // Use "groovy" as default language as 1.3 uses MVEL by default + update, err := client.Update().Index(testIndexName).Type("tweet").Id("1"). + Script(script). + Do() + if err != nil { + t.Fatal(err) + } + if update == nil { + t.Errorf("expected update to be != nil; got %v", update) + } + if update.Version != indexResult.Version+1 { + t.Errorf("expected version to be %d; got %d", indexResult.Version+1, update.Version) + } + + // Get document + getResult, err := client.Get(). + Index(testIndexName). + Type("tweet"). + Id("1"). + Do() + if err != nil { + t.Fatal(err) + } + if getResult.Index != testIndexName { + t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index) + } + if getResult.Type != "tweet" { + t.Errorf("expected GetResult.Type %q; got %q", "tweet", getResult.Type) + } + if getResult.Id != "1" { + t.Errorf("expected GetResult.Id %q; got %q", "1", getResult.Id) + } + if getResult.Source == nil { + t.Errorf("expected GetResult.Source to be != nil; got nil") + } + + // Decode the Source field + var tweetGot tweet + err = json.Unmarshal(*getResult.Source, &tweetGot) + if err != nil { + t.Fatal(err) + } + if tweetGot.Retweets != tweet1.Retweets+increment { + t.Errorf("expected Tweet.Retweets to be %d; got %d", tweet1.Retweets+increment, tweetGot.Retweets) + } +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/LICENSE b/services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/LICENSE new file mode 100644 index 000000000..de9c88cb6 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2013 Joshua Tacoma + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/uritemplates.go b/services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/uritemplates.go new file mode 100644 index 000000000..8a84813fe --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/uritemplates.go @@ -0,0 +1,359 @@ +// Copyright 2013 Joshua Tacoma. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uritemplates is a level 4 implementation of RFC 6570 (URI +// Template, http://tools.ietf.org/html/rfc6570). +// +// To use uritemplates, parse a template string and expand it with a value +// map: +// +// template, _ := uritemplates.Parse("https://api.github.com/repos{/user,repo}") +// values := make(map[string]interface{}) +// values["user"] = "jtacoma" +// values["repo"] = "uritemplates" +// expanded, _ := template.ExpandString(values) +// fmt.Printf(expanded) +// +package uritemplates + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]") + reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]") + validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$") + hex = []byte("0123456789ABCDEF") +) + +func pctEncode(src []byte) []byte { + dst := make([]byte, len(src)*3) + for i, b := range src { + buf := dst[i*3 : i*3+3] + buf[0] = 0x25 + buf[1] = hex[b/16] + buf[2] = hex[b%16] + } + return dst +} + +func escape(s string, allowReserved bool) (escaped string) { + if allowReserved { + escaped = string(reserved.ReplaceAllFunc([]byte(s), pctEncode)) + } else { + escaped = string(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) + } + return escaped +} + +// A UriTemplate is a parsed representation of a URI template. +type UriTemplate struct { + raw string + parts []templatePart +} + +// Parse parses a URI template string into a UriTemplate object. +func Parse(rawtemplate string) (template *UriTemplate, err error) { + template = new(UriTemplate) + template.raw = rawtemplate + split := strings.Split(rawtemplate, "{") + template.parts = make([]templatePart, len(split)*2-1) + for i, s := range split { + if i == 0 { + if strings.Contains(s, "}") { + err = errors.New("unexpected }") + break + } + template.parts[i].raw = s + } else { + subsplit := strings.Split(s, "}") + if len(subsplit) != 2 { + err = errors.New("malformed template") + break + } + expression := subsplit[0] + template.parts[i*2-1], err = parseExpression(expression) + if err != nil { + break + } + template.parts[i*2].raw = subsplit[1] + } + } + if err != nil { + template = nil + } + return template, err +} + +type templatePart struct { + raw string + terms []templateTerm + first string + sep string + named bool + ifemp string + allowReserved bool +} + +type templateTerm struct { + name string + explode bool + truncate int +} + +func parseExpression(expression string) (result templatePart, err error) { + switch expression[0] { + case '+': + result.sep = "," + result.allowReserved = true + expression = expression[1:] + case '.': + result.first = "." + result.sep = "." + expression = expression[1:] + case '/': + result.first = "/" + result.sep = "/" + expression = expression[1:] + case ';': + result.first = ";" + result.sep = ";" + result.named = true + expression = expression[1:] + case '?': + result.first = "?" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '&': + result.first = "&" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '#': + result.first = "#" + result.sep = "," + result.allowReserved = true + expression = expression[1:] + default: + result.sep = "," + } + rawterms := strings.Split(expression, ",") + result.terms = make([]templateTerm, len(rawterms)) + for i, raw := range rawterms { + result.terms[i], err = parseTerm(raw) + if err != nil { + break + } + } + return result, err +} + +func parseTerm(term string) (result templateTerm, err error) { + if strings.HasSuffix(term, "*") { + result.explode = true + term = term[:len(term)-1] + } + split := strings.Split(term, ":") + if len(split) == 1 { + result.name = term + } else if len(split) == 2 { + result.name = split[0] + var parsed int64 + parsed, err = strconv.ParseInt(split[1], 10, 0) + result.truncate = int(parsed) + } else { + err = errors.New("multiple colons in same term") + } + if !validname.MatchString(result.name) { + err = errors.New("not a valid name: " + result.name) + } + if result.explode && result.truncate > 0 { + err = errors.New("both explode and prefix modifers on same term") + } + return result, err +} + +// Expand expands a URI template with a set of values to produce a string. +func (self *UriTemplate) Expand(value interface{}) (string, error) { + values, ismap := value.(map[string]interface{}) + if !ismap { + if m, ismap := struct2map(value); !ismap { + return "", errors.New("expected map[string]interface{}, struct, or pointer to struct.") + } else { + return self.Expand(m) + } + } + var buf bytes.Buffer + for _, p := range self.parts { + err := p.expand(&buf, values) + if err != nil { + return "", err + } + } + return buf.String(), nil +} + +func (self *templatePart) expand(buf *bytes.Buffer, values map[string]interface{}) error { + if len(self.raw) > 0 { + buf.WriteString(self.raw) + return nil + } + var zeroLen = buf.Len() + buf.WriteString(self.first) + var firstLen = buf.Len() + for _, term := range self.terms { + value, exists := values[term.name] + if !exists { + continue + } + if buf.Len() != firstLen { + buf.WriteString(self.sep) + } + switch v := value.(type) { + case string: + self.expandString(buf, term, v) + case []interface{}: + self.expandArray(buf, term, v) + case map[string]interface{}: + if term.truncate > 0 { + return errors.New("cannot truncate a map expansion") + } + self.expandMap(buf, term, v) + default: + if m, ismap := struct2map(value); ismap { + if term.truncate > 0 { + return errors.New("cannot truncate a map expansion") + } + self.expandMap(buf, term, m) + } else { + str := fmt.Sprintf("%v", value) + self.expandString(buf, term, str) + } + } + } + if buf.Len() == firstLen { + original := buf.Bytes()[:zeroLen] + buf.Reset() + buf.Write(original) + } + return nil +} + +func (self *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) { + if self.named { + buf.WriteString(name) + if empty { + buf.WriteString(self.ifemp) + } else { + buf.WriteString("=") + } + } +} + +func (self *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) { + if len(s) > t.truncate && t.truncate > 0 { + s = s[:t.truncate] + } + self.expandName(buf, t.name, len(s) == 0) + buf.WriteString(escape(s, self.allowReserved)) +} + +func (self *templatePart) expandArray(buf *bytes.Buffer, t templateTerm, a []interface{}) { + if len(a) == 0 { + return + } else if !t.explode { + self.expandName(buf, t.name, false) + } + for i, value := range a { + if t.explode && i > 0 { + buf.WriteString(self.sep) + } else if i > 0 { + buf.WriteString(",") + } + var s string + switch v := value.(type) { + case string: + s = v + default: + s = fmt.Sprintf("%v", v) + } + if len(s) > t.truncate && t.truncate > 0 { + s = s[:t.truncate] + } + if self.named && t.explode { + self.expandName(buf, t.name, len(s) == 0) + } + buf.WriteString(escape(s, self.allowReserved)) + } +} + +func (self *templatePart) expandMap(buf *bytes.Buffer, t templateTerm, m map[string]interface{}) { + if len(m) == 0 { + return + } + if !t.explode { + self.expandName(buf, t.name, len(m) == 0) + } + var firstLen = buf.Len() + for k, value := range m { + if firstLen != buf.Len() { + if t.explode { + buf.WriteString(self.sep) + } else { + buf.WriteString(",") + } + } + var s string + switch v := value.(type) { + case string: + s = v + default: + s = fmt.Sprintf("%v", v) + } + if t.explode { + buf.WriteString(escape(k, self.allowReserved)) + buf.WriteRune('=') + buf.WriteString(escape(s, self.allowReserved)) + } else { + buf.WriteString(escape(k, self.allowReserved)) + buf.WriteRune(',') + buf.WriteString(escape(s, self.allowReserved)) + } + } +} + +func struct2map(v interface{}) (map[string]interface{}, bool) { + value := reflect.ValueOf(v) + switch value.Type().Kind() { + case reflect.Ptr: + return struct2map(value.Elem().Interface()) + case reflect.Struct: + m := make(map[string]interface{}) + for i := 0; i < value.NumField(); i++ { + tag := value.Type().Field(i).Tag + var name string + if strings.Contains(string(tag), ":") { + name = tag.Get("uri") + } else { + name = strings.TrimSpace(string(tag)) + } + if len(name) == 0 { + name = value.Type().Field(i).Name + } + m[name] = value.Field(i).Interface() + } + return m, true + } + return nil, false +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/utils.go b/services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/utils.go new file mode 100644 index 000000000..399ef4623 --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/utils.go @@ -0,0 +1,13 @@ +package uritemplates + +func Expand(path string, expansions map[string]string) (string, error) { + template, err := Parse(path) + if err != nil { + return "", err + } + values := make(map[string]interface{}) + for k, v := range expansions { + values[k] = v + } + return template.Expand(values) +} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/utils_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/utils_test.go new file mode 100644 index 000000000..633949b6f --- /dev/null +++ b/services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/utils_test.go @@ -0,0 +1,105 @@ +package uritemplates + +import ( + "testing" +) + +type ExpandTest struct { + in string + expansions map[string]string + want string +} + +var expandTests = []ExpandTest{ + // #0: no expansions + { + "http://www.golang.org/", + map[string]string{}, + "http://www.golang.org/", + }, + // #1: one expansion, no escaping + { + "http://www.golang.org/{bucket}/delete", + map[string]string{ + "bucket": "red", + }, + "http://www.golang.org/red/delete", + }, + // #2: one expansion, with hex escapes + { + "http://www.golang.org/{bucket}/delete", + map[string]string{ + "bucket": "red/blue", + }, + "http://www.golang.org/red%2Fblue/delete", + }, + // #3: one expansion, with space + { + "http://www.golang.org/{bucket}/delete", + map[string]string{ + "bucket": "red or blue", + }, + "http://www.golang.org/red%20or%20blue/delete", + }, + // #4: expansion not found + { + "http://www.golang.org/{object}/delete", + map[string]string{ + "bucket": "red or blue", + }, + "http://www.golang.org//delete", + }, + // #5: multiple expansions + { + "http://www.golang.org/{one}/{two}/{three}/get", + map[string]string{ + "one": "ONE", + "two": "TWO", + "three": "THREE", + }, + "http://www.golang.org/ONE/TWO/THREE/get", + }, + // #6: utf-8 characters + { + "http://www.golang.org/{bucket}/get", + map[string]string{ + "bucket": "£100", + }, + "http://www.golang.org/%C2%A3100/get", + }, + // #7: punctuations + { + "http://www.golang.org/{bucket}/get", + map[string]string{ + "bucket": `/\@:,.*~`, + }, + "http://www.golang.org/%2F%5C%40%3A%2C.%2A~/get", + }, + // #8: mis-matched brackets + { + "http://www.golang.org/{bucket/get", + map[string]string{ + "bucket": "red", + }, + "", + }, + // #9: "+" prefix for suppressing escape + // See also: http://tools.ietf.org/html/rfc6570#section-3.2.3 + { + "http://www.golang.org/{+topic}", + map[string]string{ + "topic": "/topics/myproject/mytopic", + }, + // The double slashes here look weird, but it's intentional + "http://www.golang.org//topics/myproject/mytopic", + }, +} + +func TestExpand(t *testing.T) { + for i, test := range expandTests { + got, _ := Expand(test.in, test.expansions) + if got != test.want { + t.Errorf("got %q expected %q in test %d", got, test.want, i) + } + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/CHANGELOG-3.0.md b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/CHANGELOG-3.0.md new file mode 100644 index 000000000..07f3e66bf --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/CHANGELOG-3.0.md @@ -0,0 +1,363 @@ +# Elastic 3.0 + +Elasticsearch 2.0 comes with some [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/breaking-changes-2.0.html). You will probably need to upgrade your application and/or rewrite part of it due to those changes. + +We use that window of opportunity to also update Elastic (the Go client) from version 2.0 to 3.0. This will introduce both changes due to the Elasticsearch 2.0 update as well as changes that make Elastic cleaner by removing some old cruft. + +So, to summarize: + +1. Elastic 2.0 is compatible with Elasticsearch 1.7+ and is still actively maintained. +2. Elastic 3.0 is compatible with Elasticsearch 2.0+ and will soon become the new master branch. + +The rest of the document is a list of all changes in Elastic 3.0. + +## Pointer types + +All types have changed to be pointer types, not value types. This not only is cleaner but also simplifies the API as illustrated by the following example: + +Example for Elastic 2.0 (old): + +```go +q := elastic.NewMatchAllQuery() +res, err := elastic.Search("one").Query(&q).Do() // notice the & here +``` + +Example for Elastic 3.0 (new): + +```go +q := elastic.NewMatchAllQuery() +res, err := elastic.Search("one").Query(q).Do() // no more & +// ... which can be simplified as: +res, err := elastic.Search("one").Query(elastic.NewMatchAllQuery()).Do() +``` + +It also helps to prevent [subtle issues](https://github.com/olivere/elastic/issues/115#issuecomment-130753046). + +## Query/filter merge + +One of the biggest changes in Elasticsearch 2.0 is the [merge of queries and filters](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_queries_and_filters_merged). In Elasticsearch 1.x, you had a whole range of queries and filters that were basically identical (e.g. `term_query` and `term_filter`). + +The practical aspect of the merge is that you can now basically use queries where once you had to use filters instead. For Elastic 3.0 this means: We could remove a whole bunch of files. Yay! + +Notice that some methods still come by "filter", e.g. `PostFilter`. However, they accept a `Query` now when they used to accept a `Filter` before. + +Example for Elastic 2.0 (old): + +```go +q := elastic.NewMatchAllQuery() +f := elastic.NewTermFilter("tag", "important") +res, err := elastic.Search().Index("one").Query(&q).PostFilter(f) +``` + +Example for Elastic 3.0 (new): + +```go +q := elastic.NewMatchAllQuery() +f := elastic.NewTermQuery("tag", "important") // it's a query now! +res, err := elastic.Search().Index("one").Query(q).PostFilter(f) +``` + +## Facets are removed + +[Facets have been removed](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_removed_features.html#_facets_have_been_removed) in Elasticsearch 2.0. You need to use aggregations now. + +## Errors + +Elasticsearch 2.0 returns more information about an error in the HTTP response body. Elastic 3.0 now reads this information and makes it accessible by the consumer. + +Errors and all its details are now returned in [`Error`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L59). + +### HTTP Status 404 (Not Found) + +When Elasticsearch does not find an entity or an index, it generally returns HTTP status code 404. In Elastic 2.0 this was a valid result and didn't raise an error from the `Do` functions. This has now changed in Elastic 3.0. + +Starting with Elastic 3.0, there are only two types of responses considered successful. First, responses with HTTP status codes [200..299]. Second, HEAD requests which return HTTP status 404. The latter is used by Elasticsearch to e.g. check for existence of indices or documents. All other responses will return an error. + +To check for HTTP Status 404 (with non-HEAD requests), e.g. when trying to get or delete a missing document, you can use the [`IsNotFound`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L84) helper (see below). + +The following example illustrates how to check for a missing document in Elastic 2.0 and what has changed in 3.0. + +Example for Elastic 2.0 (old): + +```go +res, err = client.Get().Index("one").Type("tweet").Id("no-such-id").Do() +if err != nil { + // Something else went wrong (but 404 is NOT an error in Elastic 2.0) +} +if !res.Found { + // Document has not been found +} +``` + +Example for Elastic 3.0 (new): + +```go +res, err = client.Get().Index("one").Type("tweet").Id("no-such-id").Do() +if err != nil { + if elastic.IsNotFound(err) { + // Document has not been found + } else { + // Something else went wrong + } +} +``` + +### HTTP Status 408 (Timeouts) + +Elasticsearch now responds with HTTP status code 408 (Timeout) when a request fails due to a timeout. E.g. if you specify a timeout with the Cluster Health API, the HTTP response status will be 408 if the timeout is raised. See [here](https://github.com/elastic/elasticsearch/commit/fe3179d9cccb569784434b2135ca9ae13d5158d3) for the specific commit to the Cluster Health API. + +To check for HTTP Status 408, we introduced the [`IsTimeout`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L101) helper. + +Example for Elastic 2.0 (old): + +```go +health, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("1s").Do() +if err != nil { + // ... +} +if health.TimedOut { + // We have a timeout +} +``` + +Example for Elastic 3.0 (new): + +```go +health, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("1s").Do() +if elastic.IsTimeout(err) { + // We have a timeout +} +``` + +### Bulk Errors + +The error response of a bulk operation used to be a simple string in Elasticsearch 1.x. +In Elasticsearch 2.0, it returns a structured JSON object with a lot more details about the error. +These errors are now captured in an object of type [`ErrorDetails`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L59) which is used in [`BulkResponseItem`](https://github.com/olivere/elastic/blob/release-branch.v3/bulk.go#L206). + +### Removed specific Elastic errors + +The specific error types `ErrMissingIndex`, `ErrMissingType`, and `ErrMissingId` have been removed. They were only used by `DeleteService` and are replaced by a generic error message. + +## Numeric types + +Elastic 3.0 has settled to use `float64` everywhere. It used to be a mix of `float32` and `float64` in Elastic 2.0. E.g. all boostable queries in Elastic 3.0 now have a boost type of `float64` where it used to be `float32`. + +## Pluralization + +Some services accept zero, one or more indices or types to operate on. +E.g. in the `SearchService` accepts a list of zero, one, or more indices to +search and therefor had a func called `Index(index string)` and a func +called `Indices(indices ...string)`. + +Elastic 3.0 now only uses the singular form that, when applicable, accepts a +variadic type. E.g. in the case of the `SearchService`, you now only have +one func with the following signature: `Index(indices ...string)`. + +Notice this is only limited to `Index(...)` and `Type(...)`. There are other +services with variadic functions. These have not been changed. + +## Multiple calls to variadic functions + +Some services with variadic functions have cleared the underlying slice when +called while other services just add to the existing slice. This has now been +normalized to always add to the underlying slice. + +Example for Elastic 2.0 (old): + +```go +// Would only cleared scroll id "two" +// because ScrollId cleared the values when called multiple times +client.ClearScroll().ScrollId("one").ScrollId("two").Do() +``` + +Example for Elastic 3.0 (new): + +```go +// Now (correctly) clears both scroll id "one" and "two" +// because ScrollId no longer clears the values when called multiple times +client.ClearScroll().ScrollId("one").ScrollId("two").Do() +``` + +## Ping service requires URL + +The `Ping` service raised some issues because it is different from all +other services. If not explicitly given a URL, it always pings `127.0.0.1:9200`. + +Users expected to ping the cluster, but that is not possible as the cluster +can be a set of many nodes: So which node do we ping then? + +To make it more clear, the `Ping` function on the client now requires users +to explicitly set the URL of the node to ping. + +## Meta fields + +Many of the meta fields e.g. `_parent` or `_routing` are now +[part of the top-level of a document](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_mapping_changes.html#migration-meta-fields) +and are no longer returned as parts of the `fields` object. We had to change +larger parts of e.g. the `Reindexer` to get it to work seamlessly with Elasticsearch 2.0. + +Notice that all stored meta-fields are now [returned by default](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_crud_and_routing_changes.html#_all_stored_meta_fields_returned_by_default). + +## HasParentQuery / HasChildQuery + +`NewHasParentQuery` and `NewHasChildQuery` must now include both parent/child type and query. It is now in line with the Java API. + +Example for Elastic 2.0 (old): + +```go +allQ := elastic.NewMatchAllQuery() +q := elastic.NewHasChildFilter("tweet").Query(&allQ) +``` + +Example for Elastic 3.0 (new): + +```go +q := elastic.NewHasChildQuery("tweet", elastic.NewMatchAllQuery()) +``` + +## SetBasicAuth client option + +You can now tell Elastic to pass HTTP Basic Auth credentials with each request. In previous versions of Elastic you had to set up your own `http.Transport` to do this. This should make it more convenient to use Elastic in combination with [Shield](https://www.elastic.co/products/shield) in its [basic setup](https://www.elastic.co/guide/en/shield/current/enable-basic-auth.html). + +Example: + +```go +client, err := elastic.NewClient(elastic.SetBasicAuth("user", "secret")) +if err != nil { + log.Fatal(err) +} +``` + +## Delete-by-Query API + +The Delete-by-Query API is [a plugin now](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_removed_features.html#_delete_by_query_is_now_a_plugin). It is no longer core part of Elasticsearch. You can [install it as a plugin as described here](https://www.elastic.co/guide/en/elasticsearch/plugins/2.0/plugins-delete-by-query.html). + +Elastic 3.0 still contains the `DeleteByQueryService`, but you need to install the plugin first. If you don't install it and use `DeleteByQueryService` you will most probably get a 404. + +An older version of this document stated the following: + +> Elastic 3.0 still contains the `DeleteByQueryService` but it will fail with `ErrPluginNotFound` when the plugin is not installed. +> +> Example for Elastic 3.0 (new): +> +> ```go +> _, err := client.DeleteByQuery().Query(elastic.NewTermQuery("client", "1")).Do() +> if err == elastic.ErrPluginNotFound { +> // Delete By Query API is not available +> } +> ``` + +I have decided that this is not a good way to handle the case of a missing plugin. The main reason is that with this logic, you'd always have to check if the plugin is missing in case of an error. This is not only slow, but it also puts logic into a service where it should really be just opaque and return the response of Elasticsearch. + +If you rely on certain plugins to be installed, you should check on startup. That's where the following two helpers come into play. + +## HasPlugin and SetRequiredPlugins + +Some of the core functionality of Elasticsearch has now been moved into plugins. E.g. the Delete-by-Query API is [a plugin now](https://www.elastic.co/guide/en/elasticsearch/plugins/2.0/plugins-delete-by-query.html). + +You need to make sure to add these plugins to your Elasticsearch installation to still be able to use the `DeleteByQueryService`. You can test this now with the `HasPlugin(name string)` helper in the client. + +Example for Elastic 3.0 (new): + +```go +err, found := client.HasPlugin("delete-by-query") +if err == nil && found { + // ... Delete By Query API is available +} +``` + +To simplify this process, there is now a `SetRequiredPlugins` helper that can be passed as an option func when creating a new client. If the plugin is not installed, the client wouldn't be created in the first place. + +```go +// Will raise an error if the "delete-by-query" plugin is NOT installed +client, err := elastic.NewClient(elastic.SetRequiredPlugins("delete-by-query")) +if err != nil { + log.Fatal(err) +} +``` + +Notice that there also is a way to define [mandatory plugins](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-plugins.html#_mandatory_plugins) in the Elasticsearch configuration file. + +## Common Query has been renamed to Common Terms Query + +The `CommonQuery` has been renamed to `CommonTermsQuery` to be in line with the [Java API](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_java_api_changes.html#_query_filter_refactoring). + +## Remove `MoreLikeThis` and `MoreLikeThisField` + +The More Like This API and the More Like This Field query [have been removed](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_more_like_this) and replaced with the `MoreLikeThisQuery`. + +## Remove Filtered Query + +With the merge of queries and filters, the [filtered query became deprecated](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_filtered_literal_query_and_literal_query_literal_filter_deprecated). While it is only deprecated and therefore still available in Elasticsearch 2.0, we have decided to remove it from Elastic 3.0. Why? Because we think that when you're already forced to rewrite many of your application code, it might be a good chance to get rid of things that are deprecated as well. So you might simply change your filtered query with a boolean query as [described here](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_filtered_literal_query_and_literal_query_literal_filter_deprecated). + +## Remove FuzzyLikeThis and FuzzyLikeThisField + +Both have been removed from Elasticsearch 2.0 as well. + +## Remove LimitFilter + +The `limit` filter is [deprecated in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_limit_literal_filter_deprecated) and becomes a no-op. Now is a good chance to remove it from your application as well. Use the `terminate_after` parameter in your search [as described here](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/search-request-body.html) to achieve similar effects. + +## Remove `_cache` and `_cache_key` from filters + +Both have been [removed from Elasticsearch 2.0 as well](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_filter_auto_caching). + +## Partial fields are gone + +Partial fields are [removed in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_search_changes.html#_partial_fields) in favor of [source filtering](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/search-request-source-filtering.html). + +## Scripting + +A [`Script`](https://github.com/olivere/elastic/blob/release-branch.v3/script.go) type has been added to Elastic 3.0. In Elastic 2.0, there were various places (e.g. aggregations) where you could just add the script as a string, specify the scripting language, add parameters etc. With Elastic 3.0, you should now always use the `Script` type. + +Example for Elastic 2.0 (old): + +```go +update, err := client.Update().Index("twitter").Type("tweet").Id("1"). + Script("ctx._source.retweets += num"). + ScriptParams(map[string]interface{}{"num": 1}). + Upsert(map[string]interface{}{"retweets": 0}). + Do() +``` + +Example for Elastic 3.0 (new): + +```go +update, err := client.Update().Index("twitter").Type("tweet").Id("1"). + Script(elastic.NewScript("ctx._source.retweets += num").Param("num", 1)). + Upsert(map[string]interface{}{"retweets": 0}). + Do() +``` + +## Cluster State + +The combination of `Metric(string)` and `Metrics(...string)` has been replaced by a single func with the signature `Metric(...string)`. + +## Unexported structs in response + +Services generally return a typed response from a `Do` func. Those structs are exported so that they can be passed around in your own application. In Elastic 3.0 however, we changed that (most) sub-structs are now unexported, meaning: You can only pass around the whole response, not sub-structures of it. This makes it easier for restructuring responses according to the Elasticsearch API. See [`ClusterStateResponse`](https://github.com/olivere/elastic/blob/release-branch.v3/cluster_state.go#L182) as an example. + +## Add offset to Histogram aggregation + +Histogram aggregations now have an [offset](https://github.com/elastic/elasticsearch/pull/9505) option. + +## Services + +### REST API specification + +As you might know, Elasticsearch comes with a REST API specification. The specification describes the endpoints in a JSON structure. + +Most services in Elastic predated the REST API specification. We are in the process of bringing all these services in line with the specification. Services can be generated by `go generate` (not 100% automatic though). This is an ongoing process. + +This probably doesn't mean a lot to you. However, you can now be more confident that Elastic supports all features that the REST API specification describes. + +At the same time, the file names of the services are renamed to match the REST API specification naming. + +### REST API Test Suite + +The REST API specification of Elasticsearch comes along with a test suite that official clients typically use to test for conformance. Up until now, Elastic didn't run this test suite. However, we are in the process of setting up infrastructure and tests to match this suite as well. + +This process in not completed though. + + diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/CONTRIBUTING.md b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/CONTRIBUTING.md new file mode 100644 index 000000000..4fbc79dd0 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/CONTRIBUTING.md @@ -0,0 +1,40 @@ +# How to contribute + +Elastic is an open-source project and we are looking forward to each +contribution. + +Notice that while the [official Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) is rather good, it is a high-level +overview of the features of Elasticsearch. However, Elastic tries to resemble +the Java API of Elasticsearch which you can find [on GitHub](https://github.com/elastic/elasticsearch). + +This explains why you might think that some options are strange or missing +in Elastic, while often they're just different. Please check the Java API first. + +Having said that: Elasticsearch is moving fast and it might be very likely +that we missed some features or changes. Feel free to change that. + +## Your Pull Request + +To make it easy to review and understand your changes, please keep the +following things in mind before submitting your pull request: + +* You compared the existing implemenation with the Java API, did you? +* Please work on the latest possible state of `olivere/elastic`. + Use `release-branch.v2` for targeting Elasticsearch 1.x and + `release-branch.v3` for targeting 2.x. +* Create a branch dedicated to your change. +* If possible, write a test case which confirms your change. +* Make sure your changes and your tests work with all recent versions of + Elasticsearch. We currently support Elasticsearch 1.7.x in the + release-branch.v2 and Elasticsearch 2.x in the release-branch.v3. +* Test your changes before creating a pull request (`go test ./...`). +* Don't mix several features or bug fixes in one pull request. +* Create a meaningful commit message. +* Explain your change, e.g. provide a link to the issue you are fixing and + probably a link to the Elasticsearch documentation and/or source code. +* Format your source with `go fmt`. + +## Additional Resources + +* [GitHub documentation](http://help.github.com/) +* [GitHub pull request documentation](http://help.github.com/send-pull-requests/) diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/CONTRIBUTORS b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/CONTRIBUTORS new file mode 100644 index 000000000..0743d2d15 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/CONTRIBUTORS @@ -0,0 +1,35 @@ +# This is a list of people who have contributed code +# to the Elastic repository. +# +# It is just my small "thank you" to all those that helped +# making Elastic what it is. +# +# Please keep this list sorted. + +Adam Alix [@adamalix](https://github.com/adamalix) +Adam Weiner [@adamweiner](https://github.com/adamweiner) +Alexey Sharov [@nizsheanez](https://github.com/nizsheanez) +Braden Bassingthwaite [@bbassingthwaite-va](https://github.com/bbassingthwaite-va) +Christophe Courtaut [@kri5](https://github.com/kri5) +Conrad Pankoff [@deoxxa](https://github.com/deoxxa) +Corey Scott [@corsc](https://github.com/corsc) +Daniel Heckrath [@DanielHeckrath](https://github.com/DanielHeckrath) +Gerhard Häring [@ghaering](https://github.com/ghaering) +Guilherme Silveira [@guilherme-santos](https://github.com/guilherme-santos) +Guillaume J. Charmes [@creack](https://github.com/creack) +Igor Dubinskiy [@idubinskiy](https://github.com/idubinskiy) +Isaac Saldana [@isaldana](https://github.com/isaldana) +Jack Lindamood [@cep21](https://github.com/cep21) +John Goodall [@jgoodall](https://github.com/jgoodall) +Junpei Tsuji [@jun06t](https://github.com/jun06t) +Maciej Lisiewski [@c2h5oh](https://github.com/c2h5oh) +Mara Kim [@autochthe](https://github.com/autochthe) +Medhi Bechina [@mdzor](https://github.com/mdzor) +Nicholas Wolff [@nwolff](https://github.com/nwolff) +Orne Brocaar [@brocaar](https://github.com/brocaar) +Sacheendra talluri [@sacheendra](https://github.com/sacheendra) +Sean DuBois [@Sean-Der](https://github.com/Sean-Der) +Shalin LK [@shalinlk](https://github.com/shalinlk) +Sundar [@sundarv85](https://github.com/sundarv85) +Tetsuya Morimoto [@t2y](https://github.com/t2y) +zakthomas [@zakthomas](https://github.com/zakthomas) diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/LICENSE b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/LICENSE new file mode 100644 index 000000000..8b22cdb60 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) +Copyright © 2012-2015 Oliver Eilhard + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the “Software”), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/README.md b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/README.md new file mode 100644 index 000000000..eefd530df --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/README.md @@ -0,0 +1,415 @@ +# Elastic + +Elastic is an [Elasticsearch](http://www.elasticsearch.org/) client for the +[Go](http://www.golang.org/) programming language. + +[![Build Status](https://travis-ci.org/olivere/elastic.svg?branch=release-branch.v3)](https://travis-ci.org/olivere/elastic) +[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](http://godoc.org/gopkg.in/olivere/elastic.v3) +[![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/olivere/elastic/master/LICENSE) + +See the [wiki](https://github.com/olivere/elastic/wiki) for additional information about Elastic. + + +## Releases + +**The release branches (e.g. [`release-branch.v3`](https://github.com/olivere/elastic/tree/release-branch.v3)) are actively being worked on and can break at any time. If you want to use stable versions of Elastic, please use the packages released via [gopkg.in](https://gopkg.in).** + +Here's the version matrix: + +Elasticsearch version | Elastic version -| Package URL +----------------------|------------------|------------ +2.x | 3.0 | [`gopkg.in/olivere/elastic.v3`](https://gopkg.in/olivere/elastic.v3) ([source](https://github.com/olivere/elastic/tree/release-branch.v3) [doc](http://godoc.org/gopkg.in/olivere/elastic.v3)) +1.x | 2.0 | [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2) ([source](https://github.com/olivere/elastic/tree/release-branch.v2) [doc](http://godoc.org/gopkg.in/olivere/elastic.v2)) +0.9-1.3 | 1.0 | [`gopkg.in/olivere/elastic.v1`](https://gopkg.in/olivere/elastic.v1) ([source](https://github.com/olivere/elastic/tree/release-branch.v1) [doc](http://godoc.org/gopkg.in/olivere/elastic.v1)) + +**Example:** + +You have installed Elasticsearch 2.1.1 and want to use Elastic. As listed above, you should use Elastic 3.0. So you first install the stable release of Elastic 3.0 from gopkg.in. + +```sh +$ go get gopkg.in/olivere/elastic.v3 +``` + +You then import it with this import path: + +```go +import "gopkg.in/olivere/elastic.v3" +``` + +### Elastic 3.0 + +Elastic 3.0 targets Elasticsearch 2.0 and later. Elasticsearch 2.0.0 was [released on 28th October 2015](https://www.elastic.co/blog/elasticsearch-2-0-0-released). + +Notice that there are a lot of [breaking changes in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/breaking-changes-2.0.html) and we used this as an opportunity to [clean up and refactor Elastic as well](https://github.com/olivere/elastic/blob/release-branch.v3/CHANGELOG-3.0.md). + +### Elastic 2.0 + +Elastic 2.0 targets Elasticsearch 1.x and is published via [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2). + +### Elastic 1.0 + +Elastic 1.0 is deprecated. You should really update Elasticsearch and Elastic +to a recent version. + +However, if you cannot update for some reason, don't worry. Version 1.0 is +still available. All you need to do is go-get it and change your import path +as described above. + + +## Status + +We use Elastic in production since 2012. Elastic is stable but the API changes +now and then. We strive for API compatibility. +However, Elasticsearch sometimes introduces [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes.html) +and we sometimes have to adapt. + +Having said that, there have been no big API changes that required you +to rewrite your application big time. More often than not it's renaming APIs +and adding/removing features so that Elastic is in sync with Elasticsearch. + +Elastic has been used in production with the following Elasticsearch versions: +0.90, 1.0-1.7. Furthermore, we use [Travis CI](https://travis-ci.org/) +to test Elastic with the most recent versions of Elasticsearch and Go. +See the [.travis.yml](https://github.com/olivere/elastic/blob/master/.travis.yml) +file for the exact matrix and [Travis](https://travis-ci.org/olivere/elastic) +for the results. + +Elasticsearch has quite a few features. Most of them are implemented +by Elastic. I add features and APIs as required. It's straightforward +to implement missing pieces. I'm accepting pull requests :-) + +Having said that, I hope you find the project useful. + + +## Getting Started + +The first thing you do is to create a [Client](https://github.com/olivere/elastic/blob/master/client.go). The client connects to Elasticsearch on `http://127.0.0.1:9200` by default. + +You typically create one client for your app. Here's a complete example of +creating a client, creating an index, adding a document, executing a search etc. + +```go +// Create a client +client, err := elastic.NewClient() +if err != nil { + // Handle error +} + +// Create an index +_, err = client.CreateIndex("twitter").Do() +if err != nil { + // Handle error + panic(err) +} + +// Add a document to the index +tweet := Tweet{User: "olivere", Message: "Take Five"} +_, err = client.Index(). + Index("twitter"). + Type("tweet"). + Id("1"). + BodyJson(tweet). + Do() +if err != nil { + // Handle error + panic(err) +} + +// Search with a term query +termQuery := elastic.NewTermQuery("user", "olivere") +searchResult, err := client.Search(). + Index("twitter"). // search in index "twitter" + Query(termQuery). // specify the query + Sort("user", true). // sort by "user" field, ascending + From(0).Size(10). // take documents 0-9 + Pretty(true). // pretty print request and response JSON + Do() // execute +if err != nil { + // Handle error + panic(err) +} + +// searchResult is of type SearchResult and returns hits, suggestions, +// and all kinds of other information from Elasticsearch. +fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis) + +// Each is a convenience function that iterates over hits in a search result. +// It makes sure you don't need to check for nil values in the response. +// However, it ignores errors in serialization. If you want full control +// over iterating the hits, see below. +var ttyp Tweet +for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) { + if t, ok := item.(Tweet); ok { + fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } +} +// TotalHits is another convenience function that works even when something goes wrong. +fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits()) + +// Here's how you iterate through results with full control over each step. +if searchResult.Hits != nil { + fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits) + + // Iterate through results + for _, hit := range searchResult.Hits.Hits { + // hit.Index contains the name of the index + + // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}). + var t Tweet + err := json.Unmarshal(*hit.Source, &t) + if err != nil { + // Deserialization failed + } + + // Work with tweet + fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } +} else { + // No hits + fmt.Print("Found no tweets\n") +} + +// Delete the index again +_, err = client.DeleteIndex("twitter").Do() +if err != nil { + // Handle error + panic(err) +} +``` + +See the [wiki](https://github.com/olivere/elastic/wiki) for more details. + + +## API Status + +### Document APIs + +- [x] Index API +- [x] Get API +- [x] Delete API +- [x] Update API +- [x] Multi Get API +- [x] Bulk API +- [x] Delete By Query API +- [x] Term Vectors +- [ ] Multi termvectors API + +### Search APIs + +- [x] Search +- [x] Search Template +- [ ] Search Shards API +- [x] Suggesters + - [x] Term Suggester + - [x] Phrase Suggester + - [x] Completion Suggester + - [x] Context Suggester +- [x] Multi Search API +- [x] Count API +- [ ] Search Exists API +- [ ] Validate API +- [x] Explain API +- [x] Percolator API +- [ ] Field Stats API + +### Aggregations + +- Metrics Aggregations + - [x] Avg + - [x] Cardinality + - [x] Extended Stats + - [x] Geo Bounds + - [x] Max + - [x] Min + - [x] Percentiles + - [x] Percentile Ranks + - [ ] Scripted Metric + - [x] Stats + - [x] Sum + - [x] Top Hits + - [x] Value Count +- Bucket Aggregations + - [x] Children + - [x] Date Histogram + - [x] Date Range + - [x] Filter + - [x] Filters + - [x] Geo Distance + - [ ] GeoHash Grid + - [x] Global + - [x] Histogram + - [x] IPv4 Range + - [x] Missing + - [x] Nested + - [x] Range + - [x] Reverse Nested + - [x] Sampler + - [x] Significant Terms + - [x] Terms +- Pipeline Aggregations + - [x] Avg Bucket + - [x] Derivative + - [x] Max Bucket + - [x] Min Bucket + - [x] Sum Bucket + - [x] Moving Average + - [x] Cumulative Sum + - [x] Bucket Script + - [x] Bucket Selector + - [x] Serial Differencing +- [x] Aggregation Metadata + +### Indices APIs + +- [x] Create Index +- [x] Delete Index +- [x] Get Index +- [x] Indices Exists +- [x] Open / Close Index +- [x] Put Mapping +- [x] Get Mapping +- [ ] Get Field Mapping +- [ ] Types Exists +- [x] Index Aliases +- [x] Update Indices Settings +- [x] Get Settings +- [ ] Analyze +- [x] Index Templates +- [x] Warmers +- [x] Indices Stats +- [ ] Indices Segments +- [ ] Indices Recovery +- [ ] Clear Cache +- [x] Flush +- [x] Refresh +- [x] Optimize +- [ ] Shadow Replica Indices +- [ ] Upgrade + +### cat APIs + +The cat APIs are not implemented as of now. We think they are better suited for operating with Elasticsearch on the command line. + +- [ ] cat aliases +- [ ] cat allocation +- [ ] cat count +- [ ] cat fielddata +- [ ] cat health +- [ ] cat indices +- [ ] cat master +- [ ] cat nodes +- [ ] cat pending tasks +- [ ] cat plugins +- [ ] cat recovery +- [ ] cat thread pool +- [ ] cat shards +- [ ] cat segments + +### Cluster APIs + +- [x] Cluster Health +- [x] Cluster State +- [x] Cluster Stats +- [ ] Pending Cluster Tasks +- [ ] Cluster Reroute +- [ ] Cluster Update Settings +- [ ] Nodes Stats +- [x] Nodes Info +- [ ] Nodes hot_threads + +### Query DSL + +- [x] Match All Query +- [x] Inner hits +- Full text queries + - [x] Match Query + - [x] Multi Match Query + - [x] Common Terms Query + - [x] Query String Query + - [x] Simple Query String Query +- Term level queries + - [x] Term Query + - [x] Terms Query + - [x] Range Query + - [x] Exists Query + - [x] Missing Query + - [x] Prefix Query + - [x] Wildcard Query + - [x] Regexp Query + - [x] Fuzzy Query + - [x] Type Query + - [x] Ids Query +- Compound queries + - [x] Constant Score Query + - [x] Bool Query + - [x] Dis Max Query + - [x] Function Score Query + - [x] Boosting Query + - [x] Indices Query + - [x] And Query (deprecated) + - [x] Not Query + - [x] Or Query (deprecated) + - [ ] Filtered Query (deprecated) + - [ ] Limit Query (deprecated) +- Joining queries + - [x] Nested Query + - [x] Has Child Query + - [x] Has Parent Query +- Geo queries + - [ ] GeoShape Query + - [x] Geo Bounding Box Query + - [x] Geo Distance Query + - [ ] Geo Distance Range Query + - [x] Geo Polygon Query + - [ ] Geohash Cell Query +- Specialized queries + - [x] More Like This Query + - [x] Template Query + - [x] Script Query +- Span queries + - [ ] Span Term Query + - [ ] Span Multi Term Query + - [ ] Span First Query + - [ ] Span Near Query + - [ ] Span Or Query + - [ ] Span Not Query + - [ ] Span Containing Query + - [ ] Span Within Query + +### Modules + +- [ ] Snapshot and Restore + +### Sorting + +- [x] Sort by score +- [x] Sort by field +- [x] Sort by geo distance +- [x] Sort by script + +### Scan + +Scrolling through documents (e.g. `search_type=scan`) are implemented via +the `Scroll` and `Scan` services. The `ClearScroll` API is implemented as well. + + +## How to contribute + +Read [the contribution guidelines](https://github.com/olivere/elastic/blob/master/CONTRIBUTING.md). + +## Credits + +Thanks a lot for the great folks working hard on +[Elasticsearch](http://www.elasticsearch.org/) +and +[Go](http://www.golang.org/). + +Elastic uses portions of the +[uritemplates](https://github.com/jtacoma/uritemplates) library +by Joshua Tacoma and +[backoff](https://github.com/cenkalti/backoff) by Cenk Altı. + +## LICENSE + +MIT-LICENSE. See [LICENSE](http://olivere.mit-license.org/) +or the LICENSE file provided in the repository for details. diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/backoff/LICENSE b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/backoff/LICENSE new file mode 100644 index 000000000..f6f2dcc97 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/backoff/LICENSE @@ -0,0 +1,22 @@ +Portions of this code rely on this LICENSE: + +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/backoff/backoff.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/backoff/backoff.go new file mode 100644 index 000000000..f6d7ad9a0 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/backoff/backoff.go @@ -0,0 +1,159 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package backoff + +import ( + "math" + "math/rand" + "sync" + "sync/atomic" + "time" +) + +// Backoff is an interface for different types of backoff algorithms. +type Backoff interface { + Next() time.Duration + Reset() +} + +// Stop is used as a signal to indicate that no more retries should be made. +const Stop time.Duration = -1 + +// -- Simple Backoff -- + +// SimpleBackoff takes a list of fixed values for backoff intervals. +// Each call to Next returns the next value from that fixed list. +// After each value is returned, subsequent calls to Next will only return +// the last element. The caller may specify if the values are "jittered". +type SimpleBackoff struct { + sync.Mutex + ticks []int + index int + jitter bool + stop bool +} + +// NewSimpleBackoff creates a SimpleBackoff algorithm with the specified +// list of fixed intervals in milliseconds. +func NewSimpleBackoff(ticks ...int) *SimpleBackoff { + return &SimpleBackoff{ + ticks: ticks, + index: 0, + jitter: false, + stop: false, + } +} + +// Jitter, when set, randomizes to return a value of [0.5*value .. 1.5*value]. +func (b *SimpleBackoff) Jitter(doJitter bool) *SimpleBackoff { + b.Lock() + defer b.Unlock() + b.jitter = doJitter + return b +} + +// SendStop, when enables, makes Next to return Stop once +// the list of values is exhausted. +func (b *SimpleBackoff) SendStop(doStop bool) *SimpleBackoff { + b.Lock() + defer b.Unlock() + b.stop = doStop + return b +} + +// Next returns the next wait interval. +func (b *SimpleBackoff) Next() time.Duration { + b.Lock() + defer b.Unlock() + + i := b.index + if i >= len(b.ticks) { + if b.stop { + return Stop + } + i = len(b.ticks) - 1 + b.index = i + } else { + b.index++ + } + + ms := b.ticks[i] + if b.jitter { + ms = jitter(ms) + } + return time.Duration(ms) * time.Millisecond +} + +// Reset resets SimpleBackoff. +func (b *SimpleBackoff) Reset() { + b.Lock() + b.index = 0 + b.Unlock() +} + +// jitter randomizes the interval to return a value of [0.5*millis .. 1.5*millis]. +func jitter(millis int) int { + if millis <= 0 { + return 0 + } + return millis/2 + rand.Intn(millis) +} + +// -- Exponential -- + +// ExponentialBackoff implements the simple exponential backoff described by +// Douglas Thain at http://dthain.blogspot.de/2009/02/exponential-backoff-in-distributed.html. +type ExponentialBackoff struct { + sync.Mutex + t float64 // initial timeout (in msec) + f float64 // exponential factor (e.g. 2) + m float64 // maximum timeout (in msec) + n int64 // number of retries + stop bool // indicates whether Next should send "Stop" whan max timeout is reached +} + +// NewExponentialBackoff returns a ExponentialBackoff backoff policy. +// Use initialTimeout to set the first/minimal interval +// and maxTimeout to set the maximum wait interval. +func NewExponentialBackoff(initialTimeout, maxTimeout time.Duration) *ExponentialBackoff { + return &ExponentialBackoff{ + t: float64(int64(initialTimeout / time.Millisecond)), + f: 2.0, + m: float64(int64(maxTimeout / time.Millisecond)), + n: 0, + stop: false, + } +} + +// SendStop, when enables, makes Next to return Stop once +// the maximum timeout is reached. +func (b *ExponentialBackoff) SendStop(doStop bool) *ExponentialBackoff { + b.Lock() + defer b.Unlock() + b.stop = doStop + return b +} + +// Next returns the next wait interval. +func (t *ExponentialBackoff) Next() time.Duration { + t.Lock() + defer t.Unlock() + + n := float64(atomic.AddInt64(&t.n, 1)) + r := 1.0 + rand.Float64() // random number in [1..2] + m := math.Min(r*t.t*math.Pow(t.f, n), t.m) + if t.stop && m >= t.m { + return Stop + } + d := time.Duration(int64(m)) * time.Millisecond + return d +} + +// Reset resets the backoff policy so that it can be reused. +func (t *ExponentialBackoff) Reset() { + t.Lock() + t.n = 0 + t.Unlock() +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/backoff/backoff_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/backoff/backoff_test.go new file mode 100644 index 000000000..9b5bcf0e1 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/backoff/backoff_test.go @@ -0,0 +1,146 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package backoff + +import ( + "math/rand" + "testing" + "time" +) + +func TestSimpleBackoff(t *testing.T) { + b := NewSimpleBackoff(1, 2, 7) + + if got, want := b.Next(), time.Duration(1)*time.Millisecond; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := b.Next(), time.Duration(2)*time.Millisecond; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := b.Next(), time.Duration(7)*time.Millisecond; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := b.Next(), time.Duration(7)*time.Millisecond; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + + b.Reset() + + if got, want := b.Next(), time.Duration(1)*time.Millisecond; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := b.Next(), time.Duration(2)*time.Millisecond; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := b.Next(), time.Duration(7)*time.Millisecond; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := b.Next(), time.Duration(7)*time.Millisecond; got != want { + t.Errorf("expected %v; got: %v", want, got) + } +} + +func TestSimpleBackoffWithStop(t *testing.T) { + b := NewSimpleBackoff(1, 2, 7).SendStop(true) + + // It should eventually return Stop (-1) after some loops. + var last time.Duration + for i := 0; i < 10; i++ { + last = b.Next() + if last == Stop { + break + } + } + if got, want := last, Stop; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + + b.Reset() + + // It should eventually return Stop (-1) after some loops. + for i := 0; i < 10; i++ { + last = b.Next() + if last == Stop { + break + } + } + if got, want := last, Stop; got != want { + t.Errorf("expected %v; got: %v", want, got) + } +} + +func TestExponentialBackoff(t *testing.T) { + rand.Seed(time.Now().UnixNano()) + + min := time.Duration(8) * time.Millisecond + max := time.Duration(256) * time.Millisecond + b := NewExponentialBackoff(min, max) + + between := func(value time.Duration, a, b int) bool { + x := int(value / time.Millisecond) + return a <= x && x <= b + } + + if got := b.Next(); !between(got, 8, 256) { + t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) + } + if got := b.Next(); !between(got, 8, 256) { + t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) + } + if got := b.Next(); !between(got, 8, 256) { + t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) + } + if got := b.Next(); !between(got, 8, 256) { + t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) + } + + b.Reset() + + if got := b.Next(); !between(got, 8, 256) { + t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) + } + if got := b.Next(); !between(got, 8, 256) { + t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) + } + if got := b.Next(); !between(got, 8, 256) { + t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) + } + if got := b.Next(); !between(got, 8, 256) { + t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) + } +} + +func TestExponentialBackoffWithStop(t *testing.T) { + rand.Seed(time.Now().UnixNano()) + + min := time.Duration(8) * time.Millisecond + max := time.Duration(256) * time.Millisecond + b := NewExponentialBackoff(min, max).SendStop(true) + + // It should eventually return Stop (-1) after some loops. + var last time.Duration + for i := 0; i < 10; i++ { + last = b.Next() + if last == Stop { + break + } + } + if got, want := last, Stop; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + + b.Reset() + + // It should eventually return Stop (-1) after some loops. + for i := 0; i < 10; i++ { + last = b.Next() + if last == Stop { + break + } + } + if got, want := last, Stop; got != want { + t.Errorf("expected %v; got: %v", want, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/backoff/retry.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/backoff/retry.go new file mode 100644 index 000000000..701e03ccc --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/backoff/retry.go @@ -0,0 +1,53 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +// This file is (c) 2014 Cenk Altı and governed by the MIT license. +// See https://github.com/cenkalti/backoff for original source. + +package backoff + +import "time" + +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +// Retry the function f until it does not return error or BackOff stops. +// f is guaranteed to be run at least once. +// It is the caller's responsibility to reset b after Retry returns. +// +// Retry sleeps the goroutine for the duration returned by BackOff after a +// failed operation returns. +func Retry(o Operation, b Backoff) error { return RetryNotify(o, b, nil) } + +// RetryNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func RetryNotify(operation Operation, b Backoff, notify Notify) error { + var err error + var next time.Duration + + b.Reset() + for { + if err = operation(); err == nil { + return nil + } + + if next = b.Next(); next == Stop { + return err + } + + if notify != nil { + notify(err, next) + } + + time.Sleep(next) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/backoff/retry_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/backoff/retry_test.go new file mode 100644 index 000000000..0dd45404b --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/backoff/retry_test.go @@ -0,0 +1,44 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +// This file is (c) 2014 Cenk Altı and governed by the MIT license. +// See https://github.com/cenkalti/backoff for original source. + +package backoff + +import ( + "errors" + "log" + "testing" + "time" +) + +func TestRetry(t *testing.T) { + const successOn = 3 + var i = 0 + + // This function is successfull on "successOn" calls. + f := func() error { + i++ + log.Printf("function is called %d. time\n", i) + + if i == successOn { + log.Println("OK") + return nil + } + + log.Println("error") + return errors.New("error") + } + + min := time.Duration(8) * time.Millisecond + max := time.Duration(256) * time.Millisecond + err := Retry(f, NewExponentialBackoff(min, max).SendStop(true)) + if err != nil { + t.Errorf("unexpected error: %s", err.Error()) + } + if i != successOn { + t.Errorf("invalid number of retries: %d", i) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk.go new file mode 100644 index 000000000..91c7a9c17 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk.go @@ -0,0 +1,314 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +type BulkService struct { + client *Client + + index string + _type string + requests []BulkableRequest + //replicationType string + //consistencyLevel string + timeout string + refresh *bool + pretty bool + + sizeInBytes int64 +} + +func NewBulkService(client *Client) *BulkService { + builder := &BulkService{ + client: client, + requests: make([]BulkableRequest, 0), + } + return builder +} + +func (s *BulkService) reset() { + s.requests = make([]BulkableRequest, 0) + s.sizeInBytes = 0 +} + +func (s *BulkService) Index(index string) *BulkService { + s.index = index + return s +} + +func (s *BulkService) Type(_type string) *BulkService { + s._type = _type + return s +} + +func (s *BulkService) Timeout(timeout string) *BulkService { + s.timeout = timeout + return s +} + +func (s *BulkService) Refresh(refresh bool) *BulkService { + s.refresh = &refresh + return s +} + +func (s *BulkService) Pretty(pretty bool) *BulkService { + s.pretty = pretty + return s +} + +func (s *BulkService) Add(r BulkableRequest) *BulkService { + s.requests = append(s.requests, r) + s.sizeInBytes += s.estimateSizeInBytes(r) + return s +} + +func (s *BulkService) EstimatedSizeInBytes() int64 { + return s.sizeInBytes +} + +func (s *BulkService) estimateSizeInBytes(r BulkableRequest) int64 { + // +1 for the \n + return int64(1 + len([]byte(r.String()))) +} + +func (s *BulkService) NumberOfActions() int { + return len(s.requests) +} + +func (s *BulkService) bodyAsString() (string, error) { + buf := bytes.NewBufferString("") + + for _, req := range s.requests { + source, err := req.Source() + if err != nil { + return "", err + } + for _, line := range source { + _, err := buf.WriteString(fmt.Sprintf("%s\n", line)) + if err != nil { + return "", nil + } + } + } + + return buf.String(), nil +} + +func (s *BulkService) Do() (*BulkResponse, error) { + // No actions? + if s.NumberOfActions() == 0 { + return nil, errors.New("elastic: No bulk actions to commit") + } + + // Get body + body, err := s.bodyAsString() + if err != nil { + return nil, err + } + + // Build url + path := "/" + if s.index != "" { + index, err := uritemplates.Expand("{index}", map[string]string{ + "index": s.index, + }) + if err != nil { + return nil, err + } + path += index + "/" + } + if s._type != "" { + typ, err := uritemplates.Expand("{type}", map[string]string{ + "type": s._type, + }) + if err != nil { + return nil, err + } + path += typ + "/" + } + path += "_bulk" + + // Parameters + params := make(url.Values) + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + if s.refresh != nil { + params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + + // Get response + res, err := s.client.PerformRequest("POST", path, params, body) + if err != nil { + return nil, err + } + + // Return results + ret := new(BulkResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + + // Reset so the request can be reused + s.reset() + + return ret, nil +} + +// BulkResponse is a response to a bulk execution. +// +// Example: +// { +// "took":3, +// "errors":false, +// "items":[{ +// "index":{ +// "_index":"index1", +// "_type":"tweet", +// "_id":"1", +// "_version":3, +// "status":201 +// } +// },{ +// "index":{ +// "_index":"index2", +// "_type":"tweet", +// "_id":"2", +// "_version":3, +// "status":200 +// } +// },{ +// "delete":{ +// "_index":"index1", +// "_type":"tweet", +// "_id":"1", +// "_version":4, +// "status":200, +// "found":true +// } +// },{ +// "update":{ +// "_index":"index2", +// "_type":"tweet", +// "_id":"2", +// "_version":4, +// "status":200 +// } +// }] +// } +type BulkResponse struct { + Took int `json:"took,omitempty"` + Errors bool `json:"errors,omitempty"` + Items []map[string]*BulkResponseItem `json:"items,omitempty"` +} + +// BulkResponseItem is the result of a single bulk request. +type BulkResponseItem struct { + Index string `json:"_index,omitempty"` + Type string `json:"_type,omitempty"` + Id string `json:"_id,omitempty"` + Version int `json:"_version,omitempty"` + Status int `json:"status,omitempty"` + Found bool `json:"found,omitempty"` + Error *ErrorDetails `json:"error,omitempty"` +} + +// Indexed returns all bulk request results of "index" actions. +func (r *BulkResponse) Indexed() []*BulkResponseItem { + return r.ByAction("index") +} + +// Created returns all bulk request results of "create" actions. +func (r *BulkResponse) Created() []*BulkResponseItem { + return r.ByAction("create") +} + +// Updated returns all bulk request results of "update" actions. +func (r *BulkResponse) Updated() []*BulkResponseItem { + return r.ByAction("update") +} + +// Deleted returns all bulk request results of "delete" actions. +func (r *BulkResponse) Deleted() []*BulkResponseItem { + return r.ByAction("delete") +} + +// ByAction returns all bulk request results of a certain action, +// e.g. "index" or "delete". +func (r *BulkResponse) ByAction(action string) []*BulkResponseItem { + if r.Items == nil { + return nil + } + items := make([]*BulkResponseItem, 0) + for _, item := range r.Items { + if result, found := item[action]; found { + items = append(items, result) + } + } + return items +} + +// ById returns all bulk request results of a given document id, +// regardless of the action ("index", "delete" etc.). +func (r *BulkResponse) ById(id string) []*BulkResponseItem { + if r.Items == nil { + return nil + } + items := make([]*BulkResponseItem, 0) + for _, item := range r.Items { + for _, result := range item { + if result.Id == id { + items = append(items, result) + } + } + } + return items +} + +// Failed returns those items of a bulk response that have errors, +// i.e. those that don't have a status code between 200 and 299. +func (r *BulkResponse) Failed() []*BulkResponseItem { + if r.Items == nil { + return nil + } + errors := make([]*BulkResponseItem, 0) + for _, item := range r.Items { + for _, result := range item { + if !(result.Status >= 200 && result.Status <= 299) { + errors = append(errors, result) + } + } + } + return errors +} + +// Succeeded returns those items of a bulk response that have no errors, +// i.e. those have a status code between 200 and 299. +func (r *BulkResponse) Succeeded() []*BulkResponseItem { + if r.Items == nil { + return nil + } + succeeded := make([]*BulkResponseItem, 0) + for _, item := range r.Items { + for _, result := range item { + if result.Status >= 200 && result.Status <= 299 { + succeeded = append(succeeded, result) + } + } + } + return succeeded +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_delete_request.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_delete_request.go new file mode 100644 index 000000000..0ea372209 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_delete_request.go @@ -0,0 +1,112 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "strings" +) + +// -- Bulk delete request -- + +// Bulk request to remove document from Elasticsearch. +type BulkDeleteRequest struct { + BulkableRequest + index string + typ string + id string + routing string + refresh *bool + version int64 // default is MATCH_ANY + versionType string // default is "internal" +} + +func NewBulkDeleteRequest() *BulkDeleteRequest { + return &BulkDeleteRequest{} +} + +func (r *BulkDeleteRequest) Index(index string) *BulkDeleteRequest { + r.index = index + return r +} + +func (r *BulkDeleteRequest) Type(typ string) *BulkDeleteRequest { + r.typ = typ + return r +} + +func (r *BulkDeleteRequest) Id(id string) *BulkDeleteRequest { + r.id = id + return r +} + +func (r *BulkDeleteRequest) Routing(routing string) *BulkDeleteRequest { + r.routing = routing + return r +} + +func (r *BulkDeleteRequest) Refresh(refresh bool) *BulkDeleteRequest { + r.refresh = &refresh + return r +} + +func (r *BulkDeleteRequest) Version(version int64) *BulkDeleteRequest { + r.version = version + return r +} + +// VersionType can be "internal" (default), "external", "external_gte", +// "external_gt", or "force". +func (r *BulkDeleteRequest) VersionType(versionType string) *BulkDeleteRequest { + r.versionType = versionType + return r +} + +func (r *BulkDeleteRequest) String() string { + lines, err := r.Source() + if err == nil { + return strings.Join(lines, "\n") + } + return fmt.Sprintf("error: %v", err) +} + +func (r *BulkDeleteRequest) Source() ([]string, error) { + lines := make([]string, 1) + + source := make(map[string]interface{}) + deleteCommand := make(map[string]interface{}) + if r.index != "" { + deleteCommand["_index"] = r.index + } + if r.typ != "" { + deleteCommand["_type"] = r.typ + } + if r.id != "" { + deleteCommand["_id"] = r.id + } + if r.routing != "" { + deleteCommand["_routing"] = r.routing + } + if r.version > 0 { + deleteCommand["_version"] = r.version + } + if r.versionType != "" { + deleteCommand["_version_type"] = r.versionType + } + if r.refresh != nil { + deleteCommand["refresh"] = *r.refresh + } + source["delete"] = deleteCommand + + body, err := json.Marshal(source) + if err != nil { + return nil, err + } + + lines[0] = string(body) + + return lines, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_delete_request_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_delete_request_test.go new file mode 100644 index 000000000..73abfcd40 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_delete_request_test.go @@ -0,0 +1,42 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestBulkDeleteRequestSerialization(t *testing.T) { + tests := []struct { + Request BulkableRequest + Expected []string + }{ + // #0 + { + Request: NewBulkDeleteRequest().Index("index1").Type("tweet").Id("1"), + Expected: []string{ + `{"delete":{"_id":"1","_index":"index1","_type":"tweet"}}`, + }, + }, + } + + for i, test := range tests { + lines, err := test.Request.Source() + if err != nil { + t.Fatalf("case #%d: expected no error, got: %v", i, err) + } + if lines == nil { + t.Fatalf("case #%d: expected lines, got nil", i) + } + if len(lines) != len(test.Expected) { + t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines)) + } + for j, line := range lines { + if line != test.Expected[j] { + t.Errorf("case #%d: expected line #%d to be %s, got: %s", i, j, test.Expected[j], line) + } + } + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_index_request.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_index_request.go new file mode 100644 index 000000000..495694671 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_index_request.go @@ -0,0 +1,173 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Bulk request to add document to Elasticsearch. +type BulkIndexRequest struct { + BulkableRequest + index string + typ string + id string + opType string + routing string + parent string + timestamp string + ttl int64 + refresh *bool + version int64 // default is MATCH_ANY + versionType string // default is "internal" + doc interface{} +} + +func NewBulkIndexRequest() *BulkIndexRequest { + return &BulkIndexRequest{ + opType: "index", + } +} + +func (r *BulkIndexRequest) Index(index string) *BulkIndexRequest { + r.index = index + return r +} + +func (r *BulkIndexRequest) Type(typ string) *BulkIndexRequest { + r.typ = typ + return r +} + +func (r *BulkIndexRequest) Id(id string) *BulkIndexRequest { + r.id = id + return r +} + +func (r *BulkIndexRequest) OpType(opType string) *BulkIndexRequest { + r.opType = opType + return r +} + +func (r *BulkIndexRequest) Routing(routing string) *BulkIndexRequest { + r.routing = routing + return r +} + +func (r *BulkIndexRequest) Parent(parent string) *BulkIndexRequest { + r.parent = parent + return r +} + +func (r *BulkIndexRequest) Timestamp(timestamp string) *BulkIndexRequest { + r.timestamp = timestamp + return r +} + +func (r *BulkIndexRequest) Ttl(ttl int64) *BulkIndexRequest { + r.ttl = ttl + return r +} + +func (r *BulkIndexRequest) Refresh(refresh bool) *BulkIndexRequest { + r.refresh = &refresh + return r +} + +func (r *BulkIndexRequest) Version(version int64) *BulkIndexRequest { + r.version = version + return r +} + +func (r *BulkIndexRequest) VersionType(versionType string) *BulkIndexRequest { + r.versionType = versionType + return r +} + +func (r *BulkIndexRequest) Doc(doc interface{}) *BulkIndexRequest { + r.doc = doc + return r +} + +func (r *BulkIndexRequest) String() string { + lines, err := r.Source() + if err == nil { + return strings.Join(lines, "\n") + } + return fmt.Sprintf("error: %v", err) +} + +func (r *BulkIndexRequest) Source() ([]string, error) { + // { "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } } + // { "field1" : "value1" } + + lines := make([]string, 2) + + // "index" ... + command := make(map[string]interface{}) + indexCommand := make(map[string]interface{}) + if r.index != "" { + indexCommand["_index"] = r.index + } + if r.typ != "" { + indexCommand["_type"] = r.typ + } + if r.id != "" { + indexCommand["_id"] = r.id + } + if r.routing != "" { + indexCommand["_routing"] = r.routing + } + if r.parent != "" { + indexCommand["_parent"] = r.parent + } + if r.timestamp != "" { + indexCommand["_timestamp"] = r.timestamp + } + if r.ttl > 0 { + indexCommand["_ttl"] = r.ttl + } + if r.version > 0 { + indexCommand["_version"] = r.version + } + if r.versionType != "" { + indexCommand["_version_type"] = r.versionType + } + if r.refresh != nil { + indexCommand["refresh"] = *r.refresh + } + command[r.opType] = indexCommand + line, err := json.Marshal(command) + if err != nil { + return nil, err + } + lines[0] = string(line) + + // "field1" ... + if r.doc != nil { + switch t := r.doc.(type) { + default: + body, err := json.Marshal(r.doc) + if err != nil { + return nil, err + } + lines[1] = string(body) + case json.RawMessage: + lines[1] = string(t) + case *json.RawMessage: + lines[1] = string(*t) + case string: + lines[1] = t + case *string: + lines[1] = *t + } + } else { + lines[1] = "{}" + } + + return lines, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_index_request_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_index_request_test.go new file mode 100644 index 000000000..271347e30 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_index_request_test.go @@ -0,0 +1,63 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + "time" +) + +func TestBulkIndexRequestSerialization(t *testing.T) { + tests := []struct { + Request BulkableRequest + Expected []string + }{ + // #0 + { + Request: NewBulkIndexRequest().Index("index1").Type("tweet").Id("1"). + Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}), + Expected: []string{ + `{"index":{"_id":"1","_index":"index1","_type":"tweet"}}`, + `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`, + }, + }, + // #1 + { + Request: NewBulkIndexRequest().OpType("create").Index("index1").Type("tweet").Id("1"). + Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}), + Expected: []string{ + `{"create":{"_id":"1","_index":"index1","_type":"tweet"}}`, + `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`, + }, + }, + // #2 + { + Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("tweet").Id("1"). + Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}), + Expected: []string{ + `{"index":{"_id":"1","_index":"index1","_type":"tweet"}}`, + `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`, + }, + }, + } + + for i, test := range tests { + lines, err := test.Request.Source() + if err != nil { + t.Fatalf("case #%d: expected no error, got: %v", i, err) + } + if lines == nil { + t.Fatalf("case #%d: expected lines, got nil", i) + } + if len(lines) != len(test.Expected) { + t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines)) + } + for j, line := range lines { + if line != test.Expected[j] { + t.Errorf("case #%d: expected line #%d to be %s, got: %s", i, j, test.Expected[j], line) + } + } + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_processor.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_processor.go new file mode 100644 index 000000000..04492a47c --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_processor.go @@ -0,0 +1,515 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "sync" + "sync/atomic" + "time" + + "gopkg.in/olivere/elastic.v3/backoff" +) + +// BulkProcessorService allows to easily process bulk requests. It allows setting +// policies when to flush new bulk requests, e.g. based on a number of actions, +// on the size of the actions, and/or to flush periodically. It also allows +// to control the number of concurrent bulk requests allowed to be executed +// in parallel. +// +// BulkProcessorService, by default, commits either every 1000 requests or when the +// (estimated) size of the bulk requests exceeds 5 MB. However, it does not +// commit periodically. BulkProcessorService also does retry by default, using +// an exponential backoff algorithm. +// +// The caller is responsible for setting the index and type on every +// bulk request added to BulkProcessorService. +// +// BulkProcessorService takes ideas from the BulkProcessor of the +// Elasticsearch Java API as documented in +// https://www.elastic.co/guide/en/elasticsearch/client/java-api/current/java-docs-bulk-processor.html. +type BulkProcessorService struct { + c *Client + beforeFn BulkBeforeFunc + afterFn BulkAfterFunc + name string // name of processor + numWorkers int // # of workers (>= 1) + bulkActions int // # of requests after which to commit + bulkSize int // # of bytes after which to commit + flushInterval time.Duration // periodic flush interval + wantStats bool // indicates whether to gather statistics + initialTimeout time.Duration // initial wait time before retry on errors + maxTimeout time.Duration // max time to wait for retry on errors +} + +// NewBulkProcessorService creates a new BulkProcessorService. +func NewBulkProcessorService(client *Client) *BulkProcessorService { + return &BulkProcessorService{ + c: client, + numWorkers: 1, + bulkActions: 1000, + bulkSize: 5 << 20, // 5 MB + initialTimeout: time.Duration(200) * time.Millisecond, + maxTimeout: time.Duration(10000) * time.Millisecond, + } +} + +// BulkBeforeFunc defines the signature of callbacks that are executed +// before a commit to Elasticsearch. +type BulkBeforeFunc func(executionId int64, requests []BulkableRequest) + +// BulkAfterFunc defines the signature of callbacks that are executed +// after a commit to Elasticsearch. The err parameter signals an error. +type BulkAfterFunc func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) + +// Before specifies a function to be executed before bulk requests get comitted +// to Elasticsearch. +func (s *BulkProcessorService) Before(fn BulkBeforeFunc) *BulkProcessorService { + s.beforeFn = fn + return s +} + +// After specifies a function to be executed when bulk requests have been +// comitted to Elasticsearch. The After callback executes both when the +// commit was successful as well as on failures. +func (s *BulkProcessorService) After(fn BulkAfterFunc) *BulkProcessorService { + s.afterFn = fn + return s +} + +// Name is an optional name to identify this bulk processor. +func (s *BulkProcessorService) Name(name string) *BulkProcessorService { + s.name = name + return s +} + +// Workers is the number of concurrent workers allowed to be +// executed. Defaults to 1 and must be greater or equal to 1. +func (s *BulkProcessorService) Workers(num int) *BulkProcessorService { + s.numWorkers = num + return s +} + +// BulkActions specifies when to flush based on the number of actions +// currently added. Defaults to 1000 and can be set to -1 to be disabled. +func (s *BulkProcessorService) BulkActions(bulkActions int) *BulkProcessorService { + s.bulkActions = bulkActions + return s +} + +// BulkSize specifies when to flush based on the size (in bytes) of the actions +// currently added. Defaults to 5 MB and can be set to -1 to be disabled. +func (s *BulkProcessorService) BulkSize(bulkSize int) *BulkProcessorService { + s.bulkSize = bulkSize + return s +} + +// FlushInterval specifies when to flush at the end of the given interval. +// This is disabled by default. If you want the bulk processor to +// operate completely asynchronously, set both BulkActions and BulkSize to +// -1 and set the FlushInterval to a meaningful interval. +func (s *BulkProcessorService) FlushInterval(interval time.Duration) *BulkProcessorService { + s.flushInterval = interval + return s +} + +// Stats tells bulk processor to gather stats while running. +// Use Stats to return the stats. This is disabled by default. +func (s *BulkProcessorService) Stats(wantStats bool) *BulkProcessorService { + s.wantStats = wantStats + return s +} + +// Do creates a new BulkProcessor and starts it. +// Consider the BulkProcessor as a running instance that accepts bulk requests +// and commits them to Elasticsearch, spreading the work across one or more +// workers. +// +// You can interoperate with the BulkProcessor returned by Do, e.g. Start and +// Stop (or Close) it. +// +// Calling Do several times returns new BulkProcessors. You probably don't +// want to do this. BulkProcessorService implements just a builder pattern. +func (s *BulkProcessorService) Do() (*BulkProcessor, error) { + p := newBulkProcessor( + s.c, + s.beforeFn, + s.afterFn, + s.name, + s.numWorkers, + s.bulkActions, + s.bulkSize, + s.flushInterval, + s.wantStats, + s.initialTimeout, + s.maxTimeout) + + err := p.Start() + if err != nil { + return nil, err + } + return p, nil +} + +// -- Bulk Processor Statistics -- + +// BulkProcessorStats contains various statistics of a bulk processor +// while it is running. Use the Stats func to return it while running. +type BulkProcessorStats struct { + Flushed int64 // number of times the flush interval has been invoked + Committed int64 // # of times workers committed bulk requests + Indexed int64 // # of requests indexed + Created int64 // # of requests that ES reported as creates (201) + Updated int64 // # of requests that ES reported as updates + Deleted int64 // # of requests that ES reported as deletes + Succeeded int64 // # of requests that ES reported as successful + Failed int64 // # of requests that ES reported as failed + + Workers []*BulkProcessorWorkerStats // stats for each worker +} + +// BulkProcessorWorkerStats represents per-worker statistics. +type BulkProcessorWorkerStats struct { + Queued int64 // # of requests queued in this worker + LastDuration time.Duration // duration of last commit +} + +// newBulkProcessorStats initializes and returns a BulkProcessorStats struct. +func newBulkProcessorStats(workers int) *BulkProcessorStats { + stats := &BulkProcessorStats{ + Workers: make([]*BulkProcessorWorkerStats, workers), + } + for i := 0; i < workers; i++ { + stats.Workers[i] = &BulkProcessorWorkerStats{} + } + return stats +} + +// -- Bulk Processor -- + +// BulkProcessor encapsulates a task that accepts bulk requests and +// orchestrates committing them to Elasticsearch via one or more workers. +// +// BulkProcessor is returned by setting up a BulkProcessorService and +// calling the Do method. +type BulkProcessor struct { + c *Client + beforeFn BulkBeforeFunc + afterFn BulkAfterFunc + name string + bulkActions int + bulkSize int + numWorkers int + executionId int64 + requestsC chan BulkableRequest + workerWg sync.WaitGroup + workers []*bulkWorker + flushInterval time.Duration + flusherStopC chan struct{} + wantStats bool + initialTimeout time.Duration // initial wait time before retry on errors + maxTimeout time.Duration // max time to wait for retry on errors + + startedMu sync.Mutex // guards the following block + started bool + + statsMu sync.Mutex // guards the following block + stats *BulkProcessorStats +} + +func newBulkProcessor( + client *Client, + beforeFn BulkBeforeFunc, + afterFn BulkAfterFunc, + name string, + numWorkers int, + bulkActions int, + bulkSize int, + flushInterval time.Duration, + wantStats bool, + initialTimeout time.Duration, + maxTimeout time.Duration) *BulkProcessor { + return &BulkProcessor{ + c: client, + beforeFn: beforeFn, + afterFn: afterFn, + name: name, + numWorkers: numWorkers, + bulkActions: bulkActions, + bulkSize: bulkSize, + flushInterval: flushInterval, + wantStats: wantStats, + initialTimeout: initialTimeout, + maxTimeout: maxTimeout, + } +} + +// Start starts the bulk processor. If the processor is already started, +// nil is returned. +func (p *BulkProcessor) Start() error { + p.startedMu.Lock() + defer p.startedMu.Unlock() + + if p.started { + return nil + } + + // We must have at least one worker. + if p.numWorkers < 1 { + p.numWorkers = 1 + } + + p.requestsC = make(chan BulkableRequest) + p.executionId = 0 + p.stats = newBulkProcessorStats(p.numWorkers) + + // Create and start up workers. + p.workers = make([]*bulkWorker, p.numWorkers) + for i := 0; i < p.numWorkers; i++ { + p.workerWg.Add(1) + p.workers[i] = newBulkWorker(p, i) + go p.workers[i].work() + } + + // Start the ticker for flush (if enabled) + if int64(p.flushInterval) > 0 { + p.flusherStopC = make(chan struct{}) + go p.flusher(p.flushInterval) + } + + p.started = true + + return nil +} + +// Stop is an alias for Close. +func (p *BulkProcessor) Stop() error { + return p.Close() +} + +// Close stops the bulk processor previously started with Do. +// If it is already stopped, this is a no-op and nil is returned. +// +// By implementing Close, BulkProcessor implements the io.Closer interface. +func (p *BulkProcessor) Close() error { + p.startedMu.Lock() + defer p.startedMu.Unlock() + + // Already stopped? Do nothing. + if !p.started { + return nil + } + + // Stop flusher (if enabled) + if p.flusherStopC != nil { + p.flusherStopC <- struct{}{} + <-p.flusherStopC + close(p.flusherStopC) + p.flusherStopC = nil + } + + // Stop all workers. + close(p.requestsC) + p.workerWg.Wait() + + p.started = false + + return nil +} + +// Stats returns the latest bulk processor statistics. +// Collecting stats must be enabled first by calling Stats(true) on +// the service that created this processor. +func (p *BulkProcessor) Stats() BulkProcessorStats { + p.statsMu.Lock() + defer p.statsMu.Unlock() + return *p.stats +} + +// Add adds a single request to commit by the BulkProcessorService. +// +// The caller is responsible for setting the index and type on the request. +func (p *BulkProcessor) Add(request BulkableRequest) { + p.requestsC <- request +} + +// Flush manually asks all workers to commit their outstanding requests. +// It returns only when all workers acknowledge completion. +func (p *BulkProcessor) Flush() error { + p.statsMu.Lock() + p.stats.Flushed++ + p.statsMu.Unlock() + + for _, w := range p.workers { + w.flushC <- struct{}{} + <-w.flushAckC // wait for completion + } + return nil +} + +// flusher is a single goroutine that periodically asks all workers to +// commit their outstanding bulk requests. It is only started if +// FlushInterval is greater than 0. +func (p *BulkProcessor) flusher(interval time.Duration) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: // Periodic flush + p.Flush() // TODO swallow errors here? + + case <-p.flusherStopC: + p.flusherStopC <- struct{}{} + return + } + } +} + +// -- Bulk Worker -- + +// bulkWorker encapsulates a single worker, running in a goroutine, +// receiving bulk requests and eventually committing them to Elasticsearch. +// It is strongly bound to a BulkProcessor. +type bulkWorker struct { + p *BulkProcessor + i int + bulkActions int + bulkSize int + service *BulkService + flushC chan struct{} + flushAckC chan struct{} +} + +// newBulkWorker creates a new bulkWorker instance. +func newBulkWorker(p *BulkProcessor, i int) *bulkWorker { + return &bulkWorker{ + p: p, + i: i, + bulkActions: p.bulkActions, + bulkSize: p.bulkSize, + service: NewBulkService(p.c), + flushC: make(chan struct{}), + flushAckC: make(chan struct{}), + } +} + +// work waits for bulk requests and manual flush calls on the respective +// channels and is invoked as a goroutine when the bulk processor is started. +func (w *bulkWorker) work() { + defer func() { + w.p.workerWg.Done() + close(w.flushAckC) + close(w.flushC) + }() + + var stop bool + for !stop { + select { + case req, open := <-w.p.requestsC: + if open { + // Received a new request + w.service.Add(req) + if w.commitRequired() { + w.commit() // TODO swallow errors here? + } + } else { + // Channel closed: Stop. + stop = true + if w.service.NumberOfActions() > 0 { + w.commit() // TODO swallow errors here? + } + } + + case <-w.flushC: + // Commit outstanding requests + if w.service.NumberOfActions() > 0 { + w.commit() // TODO swallow errors here? + } + w.flushAckC <- struct{}{} + } + } +} + +// commit commits the bulk requests in the given service, +// invoking callbacks as specified. +func (w *bulkWorker) commit() error { + var res *BulkResponse + + // commitFunc will commit bulk requests and, on failure, be retried + // via exponential backoff + commitFunc := func() error { + var err error + res, err = w.service.Do() + return err + } + // notifyFunc will be called if retry fails + notifyFunc := func(err error, d time.Duration) { + w.p.c.errorf("elastic: bulk processor %q failed but will retry in %v: %v", w.p.name, d, err) + } + + id := atomic.AddInt64(&w.p.executionId, 1) + + // Update # documents in queue before eventual retries + w.p.statsMu.Lock() + if w.p.wantStats { + w.p.stats.Workers[w.i].Queued = int64(len(w.service.requests)) + } + w.p.statsMu.Unlock() + + // Invoke before callback + if w.p.beforeFn != nil { + w.p.beforeFn(id, w.service.requests) + } + + // Commit bulk requests + policy := backoff.NewExponentialBackoff(w.p.initialTimeout, w.p.maxTimeout).SendStop(true) + err := backoff.RetryNotify(commitFunc, policy, notifyFunc) + w.updateStats(res) + if err != nil { + w.p.c.errorf("elastic: bulk processor %q failed: %v", w.p.name, err) + } + + // Invoke after callback + if w.p.afterFn != nil { + w.p.afterFn(id, w.service.requests, res, err) + } + + return err +} + +func (w *bulkWorker) updateStats(res *BulkResponse) { + // Update stats + if res != nil { + w.p.statsMu.Lock() + if w.p.wantStats { + w.p.stats.Committed++ + if res != nil { + w.p.stats.Indexed += int64(len(res.Indexed())) + w.p.stats.Created += int64(len(res.Created())) + w.p.stats.Updated += int64(len(res.Updated())) + w.p.stats.Deleted += int64(len(res.Deleted())) + w.p.stats.Succeeded += int64(len(res.Succeeded())) + w.p.stats.Failed += int64(len(res.Failed())) + } + w.p.stats.Workers[w.i].Queued = int64(len(w.service.requests)) + w.p.stats.Workers[w.i].LastDuration = time.Duration(int64(res.Took)) * time.Millisecond + } + w.p.statsMu.Unlock() + } +} + +// commitRequired returns true if the service has to commit its +// bulk requests. This can be either because the number of actions +// or the estimated size in bytes is larger than specified in the +// BulkProcessorService. +func (w *bulkWorker) commitRequired() bool { + if w.bulkActions >= 0 && w.service.NumberOfActions() >= w.bulkActions { + return true + } + if w.bulkSize >= 0 && w.service.EstimatedSizeInBytes() >= int64(w.bulkSize) { + return true + } + return false +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_processor_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_processor_test.go new file mode 100644 index 000000000..645617b4d --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_processor_test.go @@ -0,0 +1,406 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "math/rand" + "sync/atomic" + "testing" + "time" +) + +func TestBulkProcessorDefaults(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + p := client.BulkProcessor() + if p == nil { + t.Fatalf("expected BulkProcessorService; got: %v", p) + } + if got, want := p.name, ""; got != want { + t.Errorf("expected %q; got: %q", want, got) + } + if got, want := p.numWorkers, 1; got != want { + t.Errorf("expected %d; got: %d", want, got) + } + if got, want := p.bulkActions, 1000; got != want { + t.Errorf("expected %d; got: %d", want, got) + } + if got, want := p.bulkSize, 5*1024*1024; got != want { + t.Errorf("expected %d; got: %d", want, got) + } + if got, want := p.flushInterval, time.Duration(0); got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := p.wantStats, false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } +} + +func TestBulkProcessorCommitOnBulkActions(t *testing.T) { + //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0))) + client := setupTestClientAndCreateIndex(t) + + testBulkProcessor(t, + 10000, + client.BulkProcessor(). + Name("Actions-1"). + Workers(1). + BulkActions(100). + BulkSize(-1), + ) + + testBulkProcessor(t, + 10000, + client.BulkProcessor(). + Name("Actions-2"). + Workers(2). + BulkActions(100). + BulkSize(-1), + ) +} + +func TestBulkProcessorCommitOnBulkSize(t *testing.T) { + //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0))) + client := setupTestClientAndCreateIndex(t) + + testBulkProcessor(t, + 10000, + client.BulkProcessor(). + Name("Size-1"). + Workers(1). + BulkActions(-1). + BulkSize(64*1024), + ) + + testBulkProcessor(t, + 10000, + client.BulkProcessor(). + Name("Size-2"). + Workers(2). + BulkActions(-1). + BulkSize(64*1024), + ) +} + +func TestBulkProcessorBasedOnFlushInterval(t *testing.T) { + //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0))) + client := setupTestClientAndCreateIndex(t) + + var beforeRequests int64 + var befores int64 + var afters int64 + var failures int64 + + beforeFn := func(executionId int64, requests []BulkableRequest) { + atomic.AddInt64(&beforeRequests, int64(len(requests))) + atomic.AddInt64(&befores, 1) + } + afterFn := func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) { + atomic.AddInt64(&afters, 1) + if err != nil { + atomic.AddInt64(&failures, 1) + } + } + + svc := client.BulkProcessor(). + Name("FlushInterval-1"). + Workers(2). + BulkActions(-1). + BulkSize(-1). + FlushInterval(1 * time.Second). + Before(beforeFn). + After(afterFn) + + p, err := svc.Do() + if err != nil { + t.Fatal(err) + } + + const numDocs = 1000 // low-enough number that flush should be invoked + + for i := 1; i <= numDocs; i++ { + tweet := tweet{User: "olivere", Message: fmt.Sprintf("%d. %s", i, randomString(rand.Intn(64)))} + request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet) + p.Add(request) + } + + // Should flush at least once + time.Sleep(2 * time.Second) + + err = p.Close() + if err != nil { + t.Fatal(err) + } + + if p.stats.Flushed == 0 { + t.Errorf("expected at least 1 flush; got: %d", p.stats.Flushed) + } + if got, want := beforeRequests, int64(numDocs); got != want { + t.Errorf("expected %d requests to before callback; got: %d", want, got) + } + if befores == 0 { + t.Error("expected at least 1 call to before callback") + } + if afters == 0 { + t.Error("expected at least 1 call to after callback") + } + if failures != 0 { + t.Errorf("expected 0 calls to failure callback; got: %d", failures) + } + + // Check number of documents that were bulk indexed + _, err = p.c.Flush(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + count, err := p.c.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if count != int64(numDocs) { + t.Fatalf("expected %d documents; got: %d", numDocs, count) + } +} + +func TestBulkProcessorClose(t *testing.T) { + //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0))) + client := setupTestClientAndCreateIndex(t) + + var beforeRequests int64 + var befores int64 + var afters int64 + var failures int64 + + beforeFn := func(executionId int64, requests []BulkableRequest) { + atomic.AddInt64(&beforeRequests, int64(len(requests))) + atomic.AddInt64(&befores, 1) + } + afterFn := func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) { + atomic.AddInt64(&afters, 1) + if err != nil { + atomic.AddInt64(&failures, 1) + } + } + + p, err := client.BulkProcessor(). + Name("FlushInterval-1"). + Workers(2). + BulkActions(-1). + BulkSize(-1). + FlushInterval(30 * time.Second). // 30 seconds to flush + Before(beforeFn).After(afterFn). + Do() + if err != nil { + t.Fatal(err) + } + + const numDocs = 1000 // low-enough number that flush should be invoked + + for i := 1; i <= numDocs; i++ { + tweet := tweet{User: "olivere", Message: fmt.Sprintf("%d. %s", i, randomString(rand.Intn(64)))} + request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet) + p.Add(request) + } + + // Should not flush because 30s > 1s + time.Sleep(1 * time.Second) + + // Close should flush + err = p.Close() + if err != nil { + t.Fatal(err) + } + + if p.stats.Flushed != 0 { + t.Errorf("expected no flush; got: %d", p.stats.Flushed) + } + if got, want := beforeRequests, int64(numDocs); got != want { + t.Errorf("expected %d requests to before callback; got: %d", want, got) + } + if befores == 0 { + t.Error("expected at least 1 call to before callback") + } + if afters == 0 { + t.Error("expected at least 1 call to after callback") + } + if failures != 0 { + t.Errorf("expected 0 calls to failure callback; got: %d", failures) + } + + // Check number of documents that were bulk indexed + _, err = p.c.Flush(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + count, err := p.c.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if count != int64(numDocs) { + t.Fatalf("expected %d documents; got: %d", numDocs, count) + } +} + +func TestBulkProcessorFlush(t *testing.T) { + //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0))) + client := setupTestClientAndCreateIndex(t) + + p, err := client.BulkProcessor(). + Name("ManualFlush"). + Workers(10). + BulkActions(-1). + BulkSize(-1). + FlushInterval(30 * time.Second). // 30 seconds to flush + Stats(true). + Do() + if err != nil { + t.Fatal(err) + } + + const numDocs = 100 + + for i := 1; i <= numDocs; i++ { + tweet := tweet{User: "olivere", Message: fmt.Sprintf("%d. %s", i, randomString(rand.Intn(64)))} + request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet) + p.Add(request) + } + + // Should not flush because 30s > 1s + time.Sleep(1 * time.Second) + + // No flush yet + stats := p.Stats() + if stats.Flushed != 0 { + t.Errorf("expected no flush; got: %d", p.stats.Flushed) + } + + // Manual flush + err = p.Flush() + if err != nil { + t.Fatal(err) + } + + time.Sleep(1 * time.Second) + + // Now flushed + stats = p.Stats() + if got, want := p.stats.Flushed, int64(1); got != want { + t.Errorf("expected %d flush; got: %d", want, got) + } + + // Close should not start another flush + err = p.Close() + if err != nil { + t.Fatal(err) + } + + // Still 1 flush + stats = p.Stats() + if got, want := p.stats.Flushed, int64(1); got != want { + t.Errorf("expected %d flush; got: %d", want, got) + } + + // Check number of documents that were bulk indexed + _, err = p.c.Flush(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + count, err := p.c.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if count != int64(numDocs) { + t.Fatalf("expected %d documents; got: %d", numDocs, count) + } +} + +// -- Helper -- + +func testBulkProcessor(t *testing.T, numDocs int, svc *BulkProcessorService) { + var beforeRequests int64 + var befores int64 + var afters int64 + var failures int64 + + beforeFn := func(executionId int64, requests []BulkableRequest) { + atomic.AddInt64(&beforeRequests, int64(len(requests))) + atomic.AddInt64(&befores, 1) + } + afterFn := func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) { + atomic.AddInt64(&afters, 1) + if err != nil { + atomic.AddInt64(&failures, 1) + } + } + + p, err := svc.Before(beforeFn).After(afterFn).Stats(true).Do() + if err != nil { + t.Fatal(err) + } + + for i := 1; i <= numDocs; i++ { + tweet := tweet{User: "olivere", Message: fmt.Sprintf("%07d. %s", i, randomString(1+rand.Intn(63)))} + request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet) + p.Add(request) + } + + err = p.Close() + if err != nil { + t.Fatal(err) + } + + stats := p.Stats() + + if stats.Flushed != 0 { + t.Errorf("expected no flush; got: %d", stats.Flushed) + } + if stats.Committed <= 0 { + t.Errorf("expected committed > %d; got: %d", 0, stats.Committed) + } + if got, want := stats.Indexed, int64(numDocs); got != want { + t.Errorf("expected indexed = %d; got: %d", want, got) + } + if got, want := stats.Created, int64(0); got != want { + t.Errorf("expected created = %d; got: %d", want, got) + } + if got, want := stats.Updated, int64(0); got != want { + t.Errorf("expected updated = %d; got: %d", want, got) + } + if got, want := stats.Deleted, int64(0); got != want { + t.Errorf("expected deleted = %d; got: %d", want, got) + } + if got, want := stats.Succeeded, int64(numDocs); got != want { + t.Errorf("expected succeeded = %d; got: %d", want, got) + } + if got, want := stats.Failed, int64(0); got != want { + t.Errorf("expected failed = %d; got: %d", want, got) + } + if got, want := beforeRequests, int64(numDocs); got != want { + t.Errorf("expected %d requests to before callback; got: %d", want, got) + } + if befores == 0 { + t.Error("expected at least 1 call to before callback") + } + if afters == 0 { + t.Error("expected at least 1 call to after callback") + } + if failures != 0 { + t.Errorf("expected 0 calls to failure callback; got: %d", failures) + } + + // Check number of documents that were bulk indexed + _, err = p.c.Flush(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + count, err := p.c.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if count != int64(numDocs) { + t.Fatalf("expected %d documents; got: %d", numDocs, count) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_request.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_request.go new file mode 100644 index 000000000..315b535ca --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_request.go @@ -0,0 +1,17 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" +) + +// -- Bulkable request (index/update/delete) -- + +// Generic interface to bulkable requests. +type BulkableRequest interface { + fmt.Stringer + Source() ([]string, error) +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_test.go new file mode 100644 index 000000000..7ce9053c8 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_test.go @@ -0,0 +1,463 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestBulk(t *testing.T) { + //client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."} + + index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1) + index2Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("2").Doc(tweet2) + delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1") + + bulkRequest := client.Bulk() + bulkRequest = bulkRequest.Add(index1Req) + bulkRequest = bulkRequest.Add(index2Req) + bulkRequest = bulkRequest.Add(delete1Req) + + if bulkRequest.NumberOfActions() != 3 { + t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 3, bulkRequest.NumberOfActions()) + } + + bulkResponse, err := bulkRequest.Do() + if err != nil { + t.Fatal(err) + } + if bulkResponse == nil { + t.Errorf("expected bulkResponse to be != nil; got nil") + } + + if bulkRequest.NumberOfActions() != 0 { + t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions()) + } + + // Document with Id="1" should not exist + exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do() + if err != nil { + t.Fatal(err) + } + if exists { + t.Errorf("expected exists %v; got %v", false, exists) + } + + // Document with Id="2" should exist + exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("2").Do() + if err != nil { + t.Fatal(err) + } + if !exists { + t.Errorf("expected exists %v; got %v", true, exists) + } + + // Update + updateDoc := struct { + Retweets int `json:"retweets"` + }{ + 42, + } + update1Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2").Doc(&updateDoc) + bulkRequest = client.Bulk() + bulkRequest = bulkRequest.Add(update1Req) + + if bulkRequest.NumberOfActions() != 1 { + t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 1, bulkRequest.NumberOfActions()) + } + + bulkResponse, err = bulkRequest.Do() + if err != nil { + t.Fatal(err) + } + if bulkResponse == nil { + t.Errorf("expected bulkResponse to be != nil; got nil") + } + + if bulkRequest.NumberOfActions() != 0 { + t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions()) + } + + // Document with Id="1" should have a retweets count of 42 + doc, err := client.Get().Index(testIndexName).Type("tweet").Id("2").Do() + if err != nil { + t.Fatal(err) + } + if doc == nil { + t.Fatal("expected doc to be != nil; got nil") + } + if !doc.Found { + t.Fatalf("expected doc to be found; got found = %v", doc.Found) + } + if doc.Source == nil { + t.Fatal("expected doc source to be != nil; got nil") + } + var updatedTweet tweet + err = json.Unmarshal(*doc.Source, &updatedTweet) + if err != nil { + t.Fatal(err) + } + if updatedTweet.Retweets != 42 { + t.Errorf("expected updated tweet retweets = %v; got %v", 42, updatedTweet.Retweets) + } + + // Update with script + update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2"). + RetryOnConflict(3). + Script(NewScript("ctx._source.retweets += v").Param("v", 1)) + bulkRequest = client.Bulk() + bulkRequest = bulkRequest.Add(update2Req) + if bulkRequest.NumberOfActions() != 1 { + t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 1, bulkRequest.NumberOfActions()) + } + bulkResponse, err = bulkRequest.Do() + if err != nil { + t.Fatal(err) + } + if bulkResponse == nil { + t.Errorf("expected bulkResponse to be != nil; got nil") + } + + if bulkRequest.NumberOfActions() != 0 { + t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions()) + } + + // Document with Id="1" should have a retweets count of 43 + doc, err = client.Get().Index(testIndexName).Type("tweet").Id("2").Do() + if err != nil { + t.Fatal(err) + } + if doc == nil { + t.Fatal("expected doc to be != nil; got nil") + } + if !doc.Found { + t.Fatalf("expected doc to be found; got found = %v", doc.Found) + } + if doc.Source == nil { + t.Fatal("expected doc source to be != nil; got nil") + } + err = json.Unmarshal(*doc.Source, &updatedTweet) + if err != nil { + t.Fatal(err) + } + if updatedTweet.Retweets != 43 { + t.Errorf("expected updated tweet retweets = %v; got %v", 43, updatedTweet.Retweets) + } +} + +func TestBulkWithIndexSetOnClient(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."} + + index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1) + index2Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("2").Doc(tweet2) + delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1") + + bulkRequest := client.Bulk().Index(testIndexName).Type("tweet") + bulkRequest = bulkRequest.Add(index1Req) + bulkRequest = bulkRequest.Add(index2Req) + bulkRequest = bulkRequest.Add(delete1Req) + + if bulkRequest.NumberOfActions() != 3 { + t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 3, bulkRequest.NumberOfActions()) + } + + bulkResponse, err := bulkRequest.Do() + if err != nil { + t.Fatal(err) + } + if bulkResponse == nil { + t.Errorf("expected bulkResponse to be != nil; got nil") + } + + // Document with Id="1" should not exist + exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do() + if err != nil { + t.Fatal(err) + } + if exists { + t.Errorf("expected exists %v; got %v", false, exists) + } + + // Document with Id="2" should exist + exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("2").Do() + if err != nil { + t.Fatal(err) + } + if !exists { + t.Errorf("expected exists %v; got %v", true, exists) + } +} + +func TestBulkRequestsSerialization(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."} + + index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1) + index2Req := NewBulkIndexRequest().OpType("create").Index(testIndexName).Type("tweet").Id("2").Doc(tweet2) + delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1") + update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2"). + Doc(struct { + Retweets int `json:"retweets"` + }{ + Retweets: 42, + }) + + bulkRequest := client.Bulk() + bulkRequest = bulkRequest.Add(index1Req) + bulkRequest = bulkRequest.Add(index2Req) + bulkRequest = bulkRequest.Add(delete1Req) + bulkRequest = bulkRequest.Add(update2Req) + + if bulkRequest.NumberOfActions() != 4 { + t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 4, bulkRequest.NumberOfActions()) + } + + expected := `{"index":{"_id":"1","_index":"` + testIndexName + `","_type":"tweet"}} +{"user":"olivere","message":"Welcome to Golang and Elasticsearch.","retweets":0,"created":"0001-01-01T00:00:00Z"} +{"create":{"_id":"2","_index":"` + testIndexName + `","_type":"tweet"}} +{"user":"sandrae","message":"Dancing all night long. Yeah.","retweets":0,"created":"0001-01-01T00:00:00Z"} +{"delete":{"_id":"1","_index":"` + testIndexName + `","_type":"tweet"}} +{"update":{"_id":"2","_index":"` + testIndexName + `","_type":"tweet"}} +{"doc":{"retweets":42}} +` + got, err := bulkRequest.bodyAsString() + if err != nil { + t.Fatalf("expected no error, got: %v", err) + } + if got != expected { + t.Errorf("expected\n%s\ngot:\n%s", expected, got) + } + + // Run the bulk request + bulkResponse, err := bulkRequest.Do() + if err != nil { + t.Fatal(err) + } + if bulkResponse == nil { + t.Errorf("expected bulkResponse to be != nil; got nil") + } + if bulkResponse.Took == 0 { + t.Errorf("expected took to be > 0; got %d", bulkResponse.Took) + } + if bulkResponse.Errors { + t.Errorf("expected errors to be %v; got %v", false, bulkResponse.Errors) + } + if len(bulkResponse.Items) != 4 { + t.Fatalf("expected 4 result items; got %d", len(bulkResponse.Items)) + } + + // Indexed actions + indexed := bulkResponse.Indexed() + if indexed == nil { + t.Fatal("expected indexed to be != nil; got nil") + } + if len(indexed) != 1 { + t.Fatalf("expected len(indexed) == %d; got %d", 1, len(indexed)) + } + if indexed[0].Id != "1" { + t.Errorf("expected indexed[0].Id == %s; got %s", "1", indexed[0].Id) + } + if indexed[0].Status != 201 { + t.Errorf("expected indexed[0].Status == %d; got %d", 201, indexed[0].Status) + } + + // Created actions + created := bulkResponse.Created() + if created == nil { + t.Fatal("expected created to be != nil; got nil") + } + if len(created) != 1 { + t.Fatalf("expected len(created) == %d; got %d", 1, len(created)) + } + if created[0].Id != "2" { + t.Errorf("expected created[0].Id == %s; got %s", "2", created[0].Id) + } + if created[0].Status != 201 { + t.Errorf("expected created[0].Status == %d; got %d", 201, created[0].Status) + } + + // Deleted actions + deleted := bulkResponse.Deleted() + if deleted == nil { + t.Fatal("expected deleted to be != nil; got nil") + } + if len(deleted) != 1 { + t.Fatalf("expected len(deleted) == %d; got %d", 1, len(deleted)) + } + if deleted[0].Id != "1" { + t.Errorf("expected deleted[0].Id == %s; got %s", "1", deleted[0].Id) + } + if deleted[0].Status != 200 { + t.Errorf("expected deleted[0].Status == %d; got %d", 200, deleted[0].Status) + } + if !deleted[0].Found { + t.Errorf("expected deleted[0].Found == %v; got %v", true, deleted[0].Found) + } + + // Updated actions + updated := bulkResponse.Updated() + if updated == nil { + t.Fatal("expected updated to be != nil; got nil") + } + if len(updated) != 1 { + t.Fatalf("expected len(updated) == %d; got %d", 1, len(updated)) + } + if updated[0].Id != "2" { + t.Errorf("expected updated[0].Id == %s; got %s", "2", updated[0].Id) + } + if updated[0].Status != 200 { + t.Errorf("expected updated[0].Status == %d; got %d", 200, updated[0].Status) + } + if updated[0].Version != 2 { + t.Errorf("expected updated[0].Version == %d; got %d", 2, updated[0].Version) + } + + // Succeeded actions + succeeded := bulkResponse.Succeeded() + if succeeded == nil { + t.Fatal("expected succeeded to be != nil; got nil") + } + if len(succeeded) != 4 { + t.Fatalf("expected len(succeeded) == %d; got %d", 4, len(succeeded)) + } + + // ById + id1Results := bulkResponse.ById("1") + if id1Results == nil { + t.Fatal("expected id1Results to be != nil; got nil") + } + if len(id1Results) != 2 { + t.Fatalf("expected len(id1Results) == %d; got %d", 2, len(id1Results)) + } + if id1Results[0].Id != "1" { + t.Errorf("expected id1Results[0].Id == %s; got %s", "1", id1Results[0].Id) + } + if id1Results[0].Status != 201 { + t.Errorf("expected id1Results[0].Status == %d; got %d", 201, id1Results[0].Status) + } + if id1Results[0].Version != 1 { + t.Errorf("expected id1Results[0].Version == %d; got %d", 1, id1Results[0].Version) + } + if id1Results[1].Id != "1" { + t.Errorf("expected id1Results[1].Id == %s; got %s", "1", id1Results[1].Id) + } + if id1Results[1].Status != 200 { + t.Errorf("expected id1Results[1].Status == %d; got %d", 200, id1Results[1].Status) + } + if id1Results[1].Version != 2 { + t.Errorf("expected id1Results[1].Version == %d; got %d", 2, id1Results[1].Version) + } +} + +func TestFailedBulkRequests(t *testing.T) { + js := `{ + "took" : 2, + "errors" : true, + "items" : [ { + "index" : { + "_index" : "elastic-test", + "_type" : "tweet", + "_id" : "1", + "_version" : 1, + "status" : 201 + } + }, { + "create" : { + "_index" : "elastic-test", + "_type" : "tweet", + "_id" : "2", + "_version" : 1, + "status" : 423, + "error" : { + "type":"routing_missing_exception", + "reason":"routing is required for [elastic-test2]/[comment]/[1]" + } + } + }, { + "delete" : { + "_index" : "elastic-test", + "_type" : "tweet", + "_id" : "1", + "_version" : 2, + "status" : 404, + "found" : false + } + }, { + "update" : { + "_index" : "elastic-test", + "_type" : "tweet", + "_id" : "2", + "_version" : 2, + "status" : 200 + } + } ] +}` + + var resp BulkResponse + err := json.Unmarshal([]byte(js), &resp) + if err != nil { + t.Fatal(err) + } + failed := resp.Failed() + if len(failed) != 2 { + t.Errorf("expected %d failed items; got: %d", 2, len(failed)) + } +} + +func TestBulkEstimatedSizeInBytes(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."} + + index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1) + index2Req := NewBulkIndexRequest().OpType("create").Index(testIndexName).Type("tweet").Id("2").Doc(tweet2) + delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1") + update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2"). + Doc(struct { + Retweets int `json:"retweets"` + }{ + Retweets: 42, + }) + + bulkRequest := client.Bulk() + bulkRequest = bulkRequest.Add(index1Req) + bulkRequest = bulkRequest.Add(index2Req) + bulkRequest = bulkRequest.Add(delete1Req) + bulkRequest = bulkRequest.Add(update2Req) + + if bulkRequest.NumberOfActions() != 4 { + t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 4, bulkRequest.NumberOfActions()) + } + + // The estimated size of the bulk request in bytes must be at least + // the length of the body request. + raw, err := bulkRequest.bodyAsString() + if err != nil { + t.Fatal(err) + } + rawlen := int64(len([]byte(raw))) + + if got, want := bulkRequest.EstimatedSizeInBytes(), rawlen; got < want { + t.Errorf("expected an EstimatedSizeInBytes = %d; got: %v", want, got) + } + + // Reset should also reset the calculated estimated byte size + bulkRequest.reset() + + if got, want := bulkRequest.EstimatedSizeInBytes(), int64(0); got != want { + t.Errorf("expected an EstimatedSizeInBytes = %d; got: %v", want, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_update_request.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_update_request.go new file mode 100644 index 000000000..5adef7111 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_update_request.go @@ -0,0 +1,219 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Bulk request to update document in Elasticsearch. +type BulkUpdateRequest struct { + BulkableRequest + index string + typ string + id string + + routing string + parent string + script *Script + version int64 // default is MATCH_ANY + versionType string // default is "internal" + retryOnConflict *int + refresh *bool + upsert interface{} + docAsUpsert *bool + doc interface{} + ttl int64 + timestamp string +} + +func NewBulkUpdateRequest() *BulkUpdateRequest { + return &BulkUpdateRequest{} +} + +func (r *BulkUpdateRequest) Index(index string) *BulkUpdateRequest { + r.index = index + return r +} + +func (r *BulkUpdateRequest) Type(typ string) *BulkUpdateRequest { + r.typ = typ + return r +} + +func (r *BulkUpdateRequest) Id(id string) *BulkUpdateRequest { + r.id = id + return r +} + +func (r *BulkUpdateRequest) Routing(routing string) *BulkUpdateRequest { + r.routing = routing + return r +} + +func (r *BulkUpdateRequest) Parent(parent string) *BulkUpdateRequest { + r.parent = parent + return r +} + +func (r *BulkUpdateRequest) Script(script *Script) *BulkUpdateRequest { + r.script = script + return r +} + +func (r *BulkUpdateRequest) RetryOnConflict(retryOnConflict int) *BulkUpdateRequest { + r.retryOnConflict = &retryOnConflict + return r +} + +func (r *BulkUpdateRequest) Version(version int64) *BulkUpdateRequest { + r.version = version + return r +} + +// VersionType can be "internal" (default), "external", "external_gte", +// "external_gt", or "force". +func (r *BulkUpdateRequest) VersionType(versionType string) *BulkUpdateRequest { + r.versionType = versionType + return r +} + +func (r *BulkUpdateRequest) Refresh(refresh bool) *BulkUpdateRequest { + r.refresh = &refresh + return r +} + +func (r *BulkUpdateRequest) Doc(doc interface{}) *BulkUpdateRequest { + r.doc = doc + return r +} + +func (r *BulkUpdateRequest) DocAsUpsert(docAsUpsert bool) *BulkUpdateRequest { + r.docAsUpsert = &docAsUpsert + return r +} + +func (r *BulkUpdateRequest) Upsert(doc interface{}) *BulkUpdateRequest { + r.upsert = doc + return r +} + +func (r *BulkUpdateRequest) Ttl(ttl int64) *BulkUpdateRequest { + r.ttl = ttl + return r +} + +func (r *BulkUpdateRequest) Timestamp(timestamp string) *BulkUpdateRequest { + r.timestamp = timestamp + return r +} + +func (r *BulkUpdateRequest) String() string { + lines, err := r.Source() + if err == nil { + return strings.Join(lines, "\n") + } + return fmt.Sprintf("error: %v", err) +} + +func (r *BulkUpdateRequest) getSourceAsString(data interface{}) (string, error) { + switch t := data.(type) { + default: + body, err := json.Marshal(data) + if err != nil { + return "", err + } + return string(body), nil + case json.RawMessage: + return string(t), nil + case *json.RawMessage: + return string(*t), nil + case string: + return t, nil + case *string: + return *t, nil + } +} + +func (r BulkUpdateRequest) Source() ([]string, error) { + // { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } } + // { "doc" : { "field1" : "value1", ... } } + // or + // { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } } + // { "script" : { ... } } + + lines := make([]string, 2) + + // "update" ... + command := make(map[string]interface{}) + updateCommand := make(map[string]interface{}) + if r.index != "" { + updateCommand["_index"] = r.index + } + if r.typ != "" { + updateCommand["_type"] = r.typ + } + if r.id != "" { + updateCommand["_id"] = r.id + } + if r.routing != "" { + updateCommand["_routing"] = r.routing + } + if r.parent != "" { + updateCommand["_parent"] = r.parent + } + if r.timestamp != "" { + updateCommand["_timestamp"] = r.timestamp + } + if r.ttl > 0 { + updateCommand["_ttl"] = r.ttl + } + if r.version > 0 { + updateCommand["_version"] = r.version + } + if r.versionType != "" { + updateCommand["_version_type"] = r.versionType + } + if r.refresh != nil { + updateCommand["refresh"] = *r.refresh + } + if r.retryOnConflict != nil { + updateCommand["_retry_on_conflict"] = *r.retryOnConflict + } + if r.upsert != nil { + updateCommand["upsert"] = r.upsert + } + command["update"] = updateCommand + line, err := json.Marshal(command) + if err != nil { + return nil, err + } + lines[0] = string(line) + + // 2nd line: {"doc" : { ... }} or {"script": {...}} + source := make(map[string]interface{}) + if r.docAsUpsert != nil { + source["doc_as_upsert"] = *r.docAsUpsert + } + if r.doc != nil { + // {"doc":{...}} + source["doc"] = r.doc + } else if r.script != nil { + // {"script":...} + src, err := r.script.Source() + if err != nil { + return nil, err + } + source["script"] = src + } + lines[1], err = r.getSourceAsString(source) + if err != nil { + return nil, err + } + + return lines, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_update_request_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_update_request_test.go new file mode 100644 index 000000000..75c5b6d7f --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/bulk_update_request_test.go @@ -0,0 +1,77 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestBulkUpdateRequestSerialization(t *testing.T) { + tests := []struct { + Request BulkableRequest + Expected []string + }{ + // #0 + { + Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").Doc(struct { + Counter int64 `json:"counter"` + }{ + Counter: 42, + }), + Expected: []string{ + `{"update":{"_id":"1","_index":"index1","_type":"tweet"}}`, + `{"doc":{"counter":42}}`, + }, + }, + // #1 + { + Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1"). + RetryOnConflict(3). + DocAsUpsert(true). + Doc(struct { + Counter int64 `json:"counter"` + }{ + Counter: 42, + }), + Expected: []string{ + `{"update":{"_id":"1","_index":"index1","_retry_on_conflict":3,"_type":"tweet"}}`, + `{"doc":{"counter":42},"doc_as_upsert":true}`, + }, + }, + // #2 + { + Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1"). + RetryOnConflict(3). + Script(NewScript(`ctx._source.retweets += param1`).Lang("javascript").Param("param1", 42)). + Upsert(struct { + Counter int64 `json:"counter"` + }{ + Counter: 42, + }), + Expected: []string{ + `{"update":{"_id":"1","_index":"index1","_retry_on_conflict":3,"_type":"tweet","upsert":{"counter":42}}}`, + `{"script":{"inline":"ctx._source.retweets += param1","lang":"javascript","params":{"param1":42}}}`, + }, + }, + } + + for i, test := range tests { + lines, err := test.Request.Source() + if err != nil { + t.Fatalf("case #%d: expected no error, got: %v", i, err) + } + if lines == nil { + t.Fatalf("case #%d: expected lines, got nil", i) + } + if len(lines) != len(test.Expected) { + t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines)) + } + for j, line := range lines { + if line != test.Expected[j] { + t.Errorf("case #%d: expected line #%d to be\n%s\nbut got:\n%s", i, j, test.Expected[j], line) + } + } + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/canonicalize.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/canonicalize.go new file mode 100644 index 000000000..645930859 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/canonicalize.go @@ -0,0 +1,28 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "net/url" + +// canonicalize takes a list of URLs and returns its canonicalized form, i.e. +// remove anything but scheme, userinfo, host, and port. It also removes the +// slash at the end. It also skips invalid URLs or URLs that do not use +// protocol http or https. +// +// Example: +// http://127.0.0.1:9200/path?query=1 -> http://127.0.0.1:9200 +func canonicalize(rawurls ...string) []string { + canonicalized := make([]string, 0) + for _, rawurl := range rawurls { + u, err := url.Parse(rawurl) + if err == nil && (u.Scheme == "http" || u.Scheme == "https") { + u.Fragment = "" + u.Path = "" + u.RawQuery = "" + canonicalized = append(canonicalized, u.String()) + } + } + return canonicalized +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/canonicalize_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/canonicalize_test.go new file mode 100644 index 000000000..ada2ff22d --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/canonicalize_test.go @@ -0,0 +1,41 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "reflect" + "testing" +) + +func TestCanonicalize(t *testing.T) { + tests := []struct { + Input []string + Output []string + }{ + { + Input: []string{"http://127.0.0.1/"}, + Output: []string{"http://127.0.0.1"}, + }, + { + Input: []string{"http://127.0.0.1:9200/", "gopher://golang.org/", "http://127.0.0.1:9201"}, + Output: []string{"http://127.0.0.1:9200", "http://127.0.0.1:9201"}, + }, + { + Input: []string{"http://user:secret@127.0.0.1/path?query=1#fragment"}, + Output: []string{"http://user:secret@127.0.0.1"}, + }, + { + Input: []string{"https://somewhere.on.mars:9999/path?query=1#fragment"}, + Output: []string{"https://somewhere.on.mars:9999"}, + }, + } + + for _, test := range tests { + got := canonicalize(test.Input...) + if !reflect.DeepEqual(got, test.Output) { + t.Errorf("expected %v; got: %v", test.Output, got) + } + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/clear_scroll.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/clear_scroll.go new file mode 100644 index 000000000..c57093267 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/clear_scroll.go @@ -0,0 +1,102 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" +) + +// ClearScrollService clears one or more scroll contexts by their ids. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html#_clear_scroll_api +// for details. +type ClearScrollService struct { + client *Client + pretty bool + scrollId []string +} + +// NewClearScrollService creates a new ClearScrollService. +func NewClearScrollService(client *Client) *ClearScrollService { + return &ClearScrollService{ + client: client, + scrollId: make([]string, 0), + } +} + +// ScrollId is a list of scroll IDs to clear. +// Use _all to clear all search contexts. +func (s *ClearScrollService) ScrollId(scrollIds ...string) *ClearScrollService { + s.scrollId = append(s.scrollId, scrollIds...) + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ClearScrollService) Pretty(pretty bool) *ClearScrollService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *ClearScrollService) buildURL() (string, url.Values, error) { + // Build URL + path := "/_search/scroll/" + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ClearScrollService) Validate() error { + var invalid []string + if len(s.scrollId) == 0 { + invalid = append(invalid, "ScrollId") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *ClearScrollService) Do() (*ClearScrollResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + body := strings.Join(s.scrollId, ",") + + // Get HTTP response + res, err := s.client.PerformRequest("DELETE", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(ClearScrollResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// ClearScrollResponse is the response of ClearScrollService.Do. +type ClearScrollResponse struct { +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/clear_scroll_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/clear_scroll_test.go new file mode 100644 index 000000000..bbb659df9 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/clear_scroll_test.go @@ -0,0 +1,85 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + _ "net/http" + "testing" +) + +func TestClearScroll(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + res, err := client.Scroll(testIndexName).Size(1).Do() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Errorf("expected results != nil; got nil") + } + if res.ScrollId == "" { + t.Errorf("expected scrollId in results; got %q", res.ScrollId) + } + + // Search should succeed + _, err = client.Scroll(testIndexName).Size(1).ScrollId(res.ScrollId).Do() + if err != nil { + t.Fatal(err) + } + + // Clear scroll id + clearScrollRes, err := client.ClearScroll().ScrollId(res.ScrollId).Do() + if err != nil { + t.Fatal(err) + } + if clearScrollRes == nil { + t.Error("expected results != nil; got nil") + } + + // Search result should fail + _, err = client.Scroll(testIndexName).Size(1).ScrollId(res.ScrollId).Do() + if err == nil { + t.Fatalf("expected scroll to fail") + } +} + +func TestClearScrollValidate(t *testing.T) { + client := setupTestClient(t) + + // No scroll id -> fail with error + res, err := NewClearScrollService(client).Do() + if err == nil { + t.Fatalf("expected ClearScroll to fail without scroll ids") + } + if res != nil { + t.Fatalf("expected result to be nil; got: %v", res) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/client.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/client.go new file mode 100644 index 000000000..a30cee418 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/client.go @@ -0,0 +1,1551 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "math/rand" + "net/http" + "net/http/httputil" + "net/url" + "regexp" + "strings" + "sync" + "time" +) + +const ( + // Version is the current version of Elastic. + Version = "3.0.21" + + // DefaultUrl is the default endpoint of Elasticsearch on the local machine. + // It is used e.g. when initializing a new Client without a specific URL. + DefaultURL = "http://127.0.0.1:9200" + + // DefaultScheme is the default protocol scheme to use when sniffing + // the Elasticsearch cluster. + DefaultScheme = "http" + + // DefaultHealthcheckEnabled specifies if healthchecks are enabled by default. + DefaultHealthcheckEnabled = true + + // DefaultHealthcheckTimeoutStartup is the time the healthcheck waits + // for a response from Elasticsearch on startup, i.e. when creating a + // client. After the client is started, a shorter timeout is commonly used + // (its default is specified in DefaultHealthcheckTimeout). + DefaultHealthcheckTimeoutStartup = 5 * time.Second + + // DefaultHealthcheckTimeout specifies the time a running client waits for + // a response from Elasticsearch. Notice that the healthcheck timeout + // when a client is created is larger by default (see DefaultHealthcheckTimeoutStartup). + DefaultHealthcheckTimeout = 1 * time.Second + + // DefaultHealthcheckInterval is the default interval between + // two health checks of the nodes in the cluster. + DefaultHealthcheckInterval = 60 * time.Second + + // DefaultSnifferEnabled specifies if the sniffer is enabled by default. + DefaultSnifferEnabled = true + + // DefaultSnifferInterval is the interval between two sniffing procedures, + // i.e. the lookup of all nodes in the cluster and their addition/removal + // from the list of actual connections. + DefaultSnifferInterval = 15 * time.Minute + + // DefaultSnifferTimeoutStartup is the default timeout for the sniffing + // process that is initiated while creating a new client. For subsequent + // sniffing processes, DefaultSnifferTimeout is used (by default). + DefaultSnifferTimeoutStartup = 5 * time.Second + + // DefaultSnifferTimeout is the default timeout after which the + // sniffing process times out. Notice that for the initial sniffing + // process, DefaultSnifferTimeoutStartup is used. + DefaultSnifferTimeout = 2 * time.Second + + // DefaultMaxRetries is the number of retries for a single request after + // Elastic will give up and return an error. It is zero by default, so + // retry is disabled by default. + DefaultMaxRetries = 0 + + // DefaultSendGetBodyAs is the HTTP method to use when elastic is sending + // a GET request with a body. + DefaultSendGetBodyAs = "GET" + + // DefaultGzipEnabled specifies if gzip compression is enabled by default. + DefaultGzipEnabled = false + + // off is used to disable timeouts. + off = -1 * time.Second +) + +var ( + // ErrNoClient is raised when no Elasticsearch node is available. + ErrNoClient = errors.New("no Elasticsearch node available") + + // ErrRetry is raised when a request cannot be executed after the configured + // number of retries. + ErrRetry = errors.New("cannot connect after several retries") + + // ErrTimeout is raised when a request timed out, e.g. when WaitForStatus + // didn't return in time. + ErrTimeout = errors.New("timeout") +) + +// ClientOptionFunc is a function that configures a Client. +// It is used in NewClient. +type ClientOptionFunc func(*Client) error + +// Client is an Elasticsearch client. Create one by calling NewClient. +type Client struct { + c *http.Client // net/http Client to use for requests + + connsMu sync.RWMutex // connsMu guards the next block + conns []*conn // all connections + cindex int // index into conns + + mu sync.RWMutex // guards the next block + urls []string // set of URLs passed initially to the client + running bool // true if the client's background processes are running + errorlog Logger // error log for critical messages + infolog Logger // information log for e.g. response times + tracelog Logger // trace log for debugging + maxRetries int // max. number of retries + scheme string // http or https + healthcheckEnabled bool // healthchecks enabled or disabled + healthcheckTimeoutStartup time.Duration // time the healthcheck waits for a response from Elasticsearch on startup + healthcheckTimeout time.Duration // time the healthcheck waits for a response from Elasticsearch + healthcheckInterval time.Duration // interval between healthchecks + healthcheckStop chan bool // notify healthchecker to stop, and notify back + snifferEnabled bool // sniffer enabled or disabled + snifferTimeoutStartup time.Duration // time the sniffer waits for a response from nodes info API on startup + snifferTimeout time.Duration // time the sniffer waits for a response from nodes info API + snifferInterval time.Duration // interval between sniffing + snifferStop chan bool // notify sniffer to stop, and notify back + decoder Decoder // used to decode data sent from Elasticsearch + basicAuth bool // indicates whether to send HTTP Basic Auth credentials + basicAuthUsername string // username for HTTP Basic Auth + basicAuthPassword string // password for HTTP Basic Auth + sendGetBodyAs string // override for when sending a GET with a body + requiredPlugins []string // list of required plugins + gzipEnabled bool // gzip compression enabled or disabled (default) +} + +// NewClient creates a new client to work with Elasticsearch. +// +// NewClient, by default, is meant to be long-lived and shared across +// your application. If you need a short-lived client, e.g. for request-scope, +// consider using NewSimpleClient instead. +// +// The caller can configure the new client by passing configuration options +// to the func. +// +// Example: +// +// client, err := elastic.NewClient( +// elastic.SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"), +// elastic.SetMaxRetries(10), +// elastic.SetBasicAuth("user", "secret")) +// +// If no URL is configured, Elastic uses DefaultURL by default. +// +// If the sniffer is enabled (the default), the new client then sniffes +// the cluster via the Nodes Info API +// (see http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cluster-nodes-info.html#cluster-nodes-info). +// It uses the URLs specified by the caller. The caller is responsible +// to only pass a list of URLs of nodes that belong to the same cluster. +// This sniffing process is run on startup and periodically. +// Use SnifferInterval to set the interval between two sniffs (default is +// 15 minutes). In other words: By default, the client will find new nodes +// in the cluster and remove those that are no longer available every +// 15 minutes. Disable the sniffer by passing SetSniff(false) to NewClient. +// +// The list of nodes found in the sniffing process will be used to make +// connections to the REST API of Elasticsearch. These nodes are also +// periodically checked in a shorter time frame. This process is called +// a health check. By default, a health check is done every 60 seconds. +// You can set a shorter or longer interval by SetHealthcheckInterval. +// Disabling health checks is not recommended, but can be done by +// SetHealthcheck(false). +// +// Connections are automatically marked as dead or healthy while +// making requests to Elasticsearch. When a request fails, Elastic will +// retry up to a maximum number of retries configured with SetMaxRetries. +// Retries are disabled by default. +// +// If no HttpClient is configured, then http.DefaultClient is used. +// You can use your own http.Client with some http.Transport for +// advanced scenarios. +// +// An error is also returned when some configuration option is invalid or +// the new client cannot sniff the cluster (if enabled). +func NewClient(options ...ClientOptionFunc) (*Client, error) { + // Set up the client + c := &Client{ + c: http.DefaultClient, + conns: make([]*conn, 0), + cindex: -1, + scheme: DefaultScheme, + decoder: &DefaultDecoder{}, + maxRetries: DefaultMaxRetries, + healthcheckEnabled: DefaultHealthcheckEnabled, + healthcheckTimeoutStartup: DefaultHealthcheckTimeoutStartup, + healthcheckTimeout: DefaultHealthcheckTimeout, + healthcheckInterval: DefaultHealthcheckInterval, + healthcheckStop: make(chan bool), + snifferEnabled: DefaultSnifferEnabled, + snifferTimeoutStartup: DefaultSnifferTimeoutStartup, + snifferTimeout: DefaultSnifferTimeout, + snifferInterval: DefaultSnifferInterval, + snifferStop: make(chan bool), + sendGetBodyAs: DefaultSendGetBodyAs, + gzipEnabled: DefaultGzipEnabled, + } + + // Run the options on it + for _, option := range options { + if err := option(c); err != nil { + return nil, err + } + } + + if len(c.urls) == 0 { + c.urls = []string{DefaultURL} + } + c.urls = canonicalize(c.urls...) + + // Check if we can make a request to any of the specified URLs + if c.healthcheckEnabled { + if err := c.startupHealthcheck(c.healthcheckTimeoutStartup); err != nil { + return nil, err + } + } + + if c.snifferEnabled { + // Sniff the cluster initially + if err := c.sniff(c.snifferTimeoutStartup); err != nil { + return nil, err + } + } else { + // Do not sniff the cluster initially. Use the provided URLs instead. + for _, url := range c.urls { + c.conns = append(c.conns, newConn(url, url)) + } + } + + if c.healthcheckEnabled { + // Perform an initial health check + c.healthcheck(c.healthcheckTimeoutStartup, true) + } + // Ensure that we have at least one connection available + if err := c.mustActiveConn(); err != nil { + return nil, err + } + + // Check the required plugins + for _, plugin := range c.requiredPlugins { + found, err := c.HasPlugin(plugin) + if err != nil { + return nil, err + } + if !found { + return nil, fmt.Errorf("elastic: plugin %s not found", plugin) + } + } + + if c.snifferEnabled { + go c.sniffer() // periodically update cluster information + } + if c.healthcheckEnabled { + go c.healthchecker() // start goroutine periodically ping all nodes of the cluster + } + + c.mu.Lock() + c.running = true + c.mu.Unlock() + + return c, nil +} + +// NewSimpleClient creates a new short-lived Client that can be used in +// use cases where you need e.g. one client per request. +// +// While NewClient by default sets up e.g. periodic health checks +// and sniffing for new nodes in separate goroutines, NewSimpleClient does +// not and it meant as a simple replacement where you don't need all the +// heavy lifting of NewClient. +// +// NewSimpleClient does the following by default: First, all health checks +// are disabled, including timeouts and periodic checks. Second, sniffing +// is disable, including timeouts and periodic checks. The number of retries +// is set to 1. NewSimpleClient also does not start any goroutines. +// +// Notice that you can still override settings by passing additional options, +// just like with NewClient. +func NewSimpleClient(options ...ClientOptionFunc) (*Client, error) { + c := &Client{ + c: http.DefaultClient, + conns: make([]*conn, 0), + cindex: -1, + scheme: DefaultScheme, + decoder: &DefaultDecoder{}, + maxRetries: 1, + healthcheckEnabled: false, + healthcheckTimeoutStartup: off, + healthcheckTimeout: off, + healthcheckInterval: off, + healthcheckStop: make(chan bool), + snifferEnabled: false, + snifferTimeoutStartup: off, + snifferTimeout: off, + snifferInterval: off, + snifferStop: make(chan bool), + sendGetBodyAs: DefaultSendGetBodyAs, + gzipEnabled: DefaultGzipEnabled, + } + + // Run the options on it + for _, option := range options { + if err := option(c); err != nil { + return nil, err + } + } + + if len(c.urls) == 0 { + c.urls = []string{DefaultURL} + } + c.urls = canonicalize(c.urls...) + + for _, url := range c.urls { + c.conns = append(c.conns, newConn(url, url)) + } + + // Ensure that we have at least one connection available + if err := c.mustActiveConn(); err != nil { + return nil, err + } + + // Check the required plugins + for _, plugin := range c.requiredPlugins { + found, err := c.HasPlugin(plugin) + if err != nil { + return nil, err + } + if !found { + return nil, fmt.Errorf("elastic: plugin %s not found", plugin) + } + } + + c.mu.Lock() + c.running = true + c.mu.Unlock() + + return c, nil +} + +// SetHttpClient can be used to specify the http.Client to use when making +// HTTP requests to Elasticsearch. +func SetHttpClient(httpClient *http.Client) ClientOptionFunc { + return func(c *Client) error { + if httpClient != nil { + c.c = httpClient + } else { + c.c = http.DefaultClient + } + return nil + } +} + +// SetBasicAuth can be used to specify the HTTP Basic Auth credentials to +// use when making HTTP requests to Elasticsearch. +func SetBasicAuth(username, password string) ClientOptionFunc { + return func(c *Client) error { + c.basicAuthUsername = username + c.basicAuthPassword = password + c.basicAuth = c.basicAuthUsername != "" || c.basicAuthPassword != "" + return nil + } +} + +// SetURL defines the URL endpoints of the Elasticsearch nodes. Notice that +// when sniffing is enabled, these URLs are used to initially sniff the +// cluster on startup. +func SetURL(urls ...string) ClientOptionFunc { + return func(c *Client) error { + switch len(urls) { + case 0: + c.urls = []string{DefaultURL} + default: + c.urls = urls + } + return nil + } +} + +// SetScheme sets the HTTP scheme to look for when sniffing (http or https). +// This is http by default. +func SetScheme(scheme string) ClientOptionFunc { + return func(c *Client) error { + c.scheme = scheme + return nil + } +} + +// SetSniff enables or disables the sniffer (enabled by default). +func SetSniff(enabled bool) ClientOptionFunc { + return func(c *Client) error { + c.snifferEnabled = enabled + return nil + } +} + +// SetSnifferTimeoutStartup sets the timeout for the sniffer that is used +// when creating a new client. The default is 5 seconds. Notice that the +// timeout being used for subsequent sniffing processes is set with +// SetSnifferTimeout. +func SetSnifferTimeoutStartup(timeout time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.snifferTimeoutStartup = timeout + return nil + } +} + +// SetSnifferTimeout sets the timeout for the sniffer that finds the +// nodes in a cluster. The default is 2 seconds. Notice that the timeout +// used when creating a new client on startup is usually greater and can +// be set with SetSnifferTimeoutStartup. +func SetSnifferTimeout(timeout time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.snifferTimeout = timeout + return nil + } +} + +// SetSnifferInterval sets the interval between two sniffing processes. +// The default interval is 15 minutes. +func SetSnifferInterval(interval time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.snifferInterval = interval + return nil + } +} + +// SetHealthcheck enables or disables healthchecks (enabled by default). +func SetHealthcheck(enabled bool) ClientOptionFunc { + return func(c *Client) error { + c.healthcheckEnabled = enabled + return nil + } +} + +// SetHealthcheckTimeoutStartup sets the timeout for the initial health check. +// The default timeout is 5 seconds (see DefaultHealthcheckTimeoutStartup). +// Notice that timeouts for subsequent health checks can be modified with +// SetHealthcheckTimeout. +func SetHealthcheckTimeoutStartup(timeout time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.healthcheckTimeoutStartup = timeout + return nil + } +} + +// SetHealthcheckTimeout sets the timeout for periodic health checks. +// The default timeout is 1 second (see DefaultHealthcheckTimeout). +// Notice that a different (usually larger) timeout is used for the initial +// healthcheck, which is initiated while creating a new client. +// The startup timeout can be modified with SetHealthcheckTimeoutStartup. +func SetHealthcheckTimeout(timeout time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.healthcheckTimeout = timeout + return nil + } +} + +// SetHealthcheckInterval sets the interval between two health checks. +// The default interval is 60 seconds. +func SetHealthcheckInterval(interval time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.healthcheckInterval = interval + return nil + } +} + +// SetMaxRetries sets the maximum number of retries before giving up when +// performing a HTTP request to Elasticsearch. +func SetMaxRetries(maxRetries int) ClientOptionFunc { + return func(c *Client) error { + if maxRetries < 0 { + return errors.New("MaxRetries must be greater than or equal to 0") + } + c.maxRetries = maxRetries + return nil + } +} + +// SetGzip enables or disables gzip compression (disabled by default). +func SetGzip(enabled bool) ClientOptionFunc { + return func(c *Client) error { + c.gzipEnabled = enabled + return nil + } +} + +// SetDecoder sets the Decoder to use when decoding data from Elasticsearch. +// DefaultDecoder is used by default. +func SetDecoder(decoder Decoder) ClientOptionFunc { + return func(c *Client) error { + if decoder != nil { + c.decoder = decoder + } else { + c.decoder = &DefaultDecoder{} + } + return nil + } +} + +// SetRequiredPlugins can be used to indicate that some plugins are required +// before a Client will be created. +func SetRequiredPlugins(plugins ...string) ClientOptionFunc { + return func(c *Client) error { + if c.requiredPlugins == nil { + c.requiredPlugins = make([]string, 0) + } + c.requiredPlugins = append(c.requiredPlugins, plugins...) + return nil + } +} + +// SetErrorLog sets the logger for critical messages like nodes joining +// or leaving the cluster or failing requests. It is nil by default. +func SetErrorLog(logger Logger) ClientOptionFunc { + return func(c *Client) error { + c.errorlog = logger + return nil + } +} + +// SetInfoLog sets the logger for informational messages, e.g. requests +// and their response times. It is nil by default. +func SetInfoLog(logger Logger) ClientOptionFunc { + return func(c *Client) error { + c.infolog = logger + return nil + } +} + +// SetTraceLog specifies the log.Logger to use for output of HTTP requests +// and responses which is helpful during debugging. It is nil by default. +func SetTraceLog(logger Logger) ClientOptionFunc { + return func(c *Client) error { + c.tracelog = logger + return nil + } +} + +// SendGetBodyAs specifies the HTTP method to use when sending a GET request +// with a body. It is GET by default. +func SetSendGetBodyAs(httpMethod string) ClientOptionFunc { + return func(c *Client) error { + c.sendGetBodyAs = httpMethod + return nil + } +} + +// String returns a string representation of the client status. +func (c *Client) String() string { + c.connsMu.Lock() + conns := c.conns + c.connsMu.Unlock() + + var buf bytes.Buffer + for i, conn := range conns { + if i > 0 { + buf.WriteString(", ") + } + buf.WriteString(conn.String()) + } + return buf.String() +} + +// IsRunning returns true if the background processes of the client are +// running, false otherwise. +func (c *Client) IsRunning() bool { + c.mu.RLock() + defer c.mu.RUnlock() + return c.running +} + +// Start starts the background processes like sniffing the cluster and +// periodic health checks. You don't need to run Start when creating a +// client with NewClient; the background processes are run by default. +// +// If the background processes are already running, this is a no-op. +func (c *Client) Start() { + c.mu.RLock() + if c.running { + c.mu.RUnlock() + return + } + c.mu.RUnlock() + + if c.snifferEnabled { + go c.sniffer() + } + if c.healthcheckEnabled { + go c.healthchecker() + } + + c.mu.Lock() + c.running = true + c.mu.Unlock() + + c.infof("elastic: client started") +} + +// Stop stops the background processes that the client is running, +// i.e. sniffing the cluster periodically and running health checks +// on the nodes. +// +// If the background processes are not running, this is a no-op. +func (c *Client) Stop() { + c.mu.RLock() + if !c.running { + c.mu.RUnlock() + return + } + c.mu.RUnlock() + + if c.healthcheckEnabled { + c.healthcheckStop <- true + <-c.healthcheckStop + } + + if c.snifferEnabled { + c.snifferStop <- true + <-c.snifferStop + } + + c.mu.Lock() + c.running = false + c.mu.Unlock() + + c.infof("elastic: client stopped") +} + +// errorf logs to the error log. +func (c *Client) errorf(format string, args ...interface{}) { + if c.errorlog != nil { + c.errorlog.Printf(format, args...) + } +} + +// infof logs informational messages. +func (c *Client) infof(format string, args ...interface{}) { + if c.infolog != nil { + c.infolog.Printf(format, args...) + } +} + +// tracef logs to the trace log. +func (c *Client) tracef(format string, args ...interface{}) { + if c.tracelog != nil { + c.tracelog.Printf(format, args...) + } +} + +// dumpRequest dumps the given HTTP request to the trace log. +func (c *Client) dumpRequest(r *http.Request) { + if c.tracelog != nil { + out, err := httputil.DumpRequestOut(r, true) + if err == nil { + c.tracef("%s\n", string(out)) + } + } +} + +// dumpResponse dumps the given HTTP response to the trace log. +func (c *Client) dumpResponse(resp *http.Response) { + if c.tracelog != nil { + out, err := httputil.DumpResponse(resp, true) + if err == nil { + c.tracef("%s\n", string(out)) + } + } +} + +// sniffer periodically runs sniff. +func (c *Client) sniffer() { + for { + c.mu.RLock() + timeout := c.snifferTimeout + ticker := time.After(c.snifferInterval) + c.mu.RUnlock() + + select { + case <-c.snifferStop: + // we are asked to stop, so we signal back that we're stopping now + c.snifferStop <- true + return + case <-ticker: + c.sniff(timeout) + } + } +} + +// sniff uses the Node Info API to return the list of nodes in the cluster. +// It uses the list of URLs passed on startup plus the list of URLs found +// by the preceding sniffing process (if sniffing is enabled). +// +// If sniffing is disabled, this is a no-op. +func (c *Client) sniff(timeout time.Duration) error { + c.mu.RLock() + if !c.snifferEnabled { + c.mu.RUnlock() + return nil + } + + // Use all available URLs provided to sniff the cluster. + urlsMap := make(map[string]bool) + urls := make([]string, 0) + + // Add all URLs provided on startup + for _, url := range c.urls { + urlsMap[url] = true + urls = append(urls, url) + } + c.mu.RUnlock() + + // Add all URLs found by sniffing + c.connsMu.RLock() + for _, conn := range c.conns { + if !conn.IsDead() { + url := conn.URL() + if _, found := urlsMap[url]; !found { + urls = append(urls, url) + } + } + } + c.connsMu.RUnlock() + + if len(urls) == 0 { + return ErrNoClient + } + + // Start sniffing on all found URLs + ch := make(chan []*conn, len(urls)) + for _, url := range urls { + go func(url string) { ch <- c.sniffNode(url) }(url) + } + + // Wait for the results to come back, or the process times out. + for { + select { + case conns := <-ch: + if len(conns) > 0 { + c.updateConns(conns) + return nil + } + case <-time.After(timeout): + // We get here if no cluster responds in time + return ErrNoClient + } + } +} + +// reSniffHostAndPort is used to extract hostname and port from a result +// from a Nodes Info API (example: "inet[/127.0.0.1:9200]"). +var reSniffHostAndPort = regexp.MustCompile(`\/([^:]*):([0-9]+)\]`) + +// sniffNode sniffs a single node. This method is run as a goroutine +// in sniff. If successful, it returns the list of node URLs extracted +// from the result of calling Nodes Info API. Otherwise, an empty array +// is returned. +func (c *Client) sniffNode(url string) []*conn { + nodes := make([]*conn, 0) + + // Call the Nodes Info API at /_nodes/http + req, err := NewRequest("GET", url+"/_nodes/http") + if err != nil { + return nodes + } + + c.mu.RLock() + if c.basicAuth { + req.SetBasicAuth(c.basicAuthUsername, c.basicAuthPassword) + } + c.mu.RUnlock() + + res, err := c.c.Do((*http.Request)(req)) + if err != nil { + return nodes + } + if res == nil { + return nodes + } + + if res.Body != nil { + defer res.Body.Close() + } + + var info NodesInfoResponse + if err := json.NewDecoder(res.Body).Decode(&info); err == nil { + if len(info.Nodes) > 0 { + switch c.scheme { + case "https": + for nodeID, node := range info.Nodes { + if strings.HasPrefix(node.HTTPSAddress, "inet") { + m := reSniffHostAndPort.FindStringSubmatch(node.HTTPSAddress) + if len(m) == 3 { + url := fmt.Sprintf("https://%s:%s", m[1], m[2]) + nodes = append(nodes, newConn(nodeID, url)) + } + } else { + url := fmt.Sprintf("https://%s", node.HTTPSAddress) + nodes = append(nodes, newConn(nodeID, url)) + } + } + default: + for nodeID, node := range info.Nodes { + if strings.HasPrefix(node.HTTPAddress, "inet") { + m := reSniffHostAndPort.FindStringSubmatch(node.HTTPAddress) + if len(m) == 3 { + url := fmt.Sprintf("http://%s:%s", m[1], m[2]) + nodes = append(nodes, newConn(nodeID, url)) + } + } else { + url := fmt.Sprintf("http://%s", node.HTTPAddress) + nodes = append(nodes, newConn(nodeID, url)) + } + } + } + } + } + return nodes +} + +// updateConns updates the clients' connections with new information +// gather by a sniff operation. +func (c *Client) updateConns(conns []*conn) { + c.connsMu.Lock() + + newConns := make([]*conn, 0) + + // Build up new connections: + // If we find an existing connection, use that (including no. of failures etc.). + // If we find a new connection, add it. + for _, conn := range conns { + var found bool + for _, oldConn := range c.conns { + if oldConn.NodeID() == conn.NodeID() { + // Take over the old connection + newConns = append(newConns, oldConn) + found = true + break + } + } + if !found { + // New connection didn't exist, so add it to our list of new conns. + c.errorf("elastic: %s joined the cluster", conn.URL()) + newConns = append(newConns, conn) + } + } + + c.conns = newConns + c.cindex = -1 + c.connsMu.Unlock() +} + +// healthchecker periodically runs healthcheck. +func (c *Client) healthchecker() { + for { + c.mu.RLock() + timeout := c.healthcheckTimeout + ticker := time.After(c.healthcheckInterval) + c.mu.RUnlock() + + select { + case <-c.healthcheckStop: + // we are asked to stop, so we signal back that we're stopping now + c.healthcheckStop <- true + return + case <-ticker: + c.healthcheck(timeout, false) + } + } +} + +// healthcheck does a health check on all nodes in the cluster. Depending on +// the node state, it marks connections as dead, sets them alive etc. +// If healthchecks are disabled and force is false, this is a no-op. +// The timeout specifies how long to wait for a response from Elasticsearch. +func (c *Client) healthcheck(timeout time.Duration, force bool) { + c.mu.RLock() + if !c.healthcheckEnabled && !force { + c.mu.RUnlock() + return + } + basicAuth := c.basicAuth + basicAuthUsername := c.basicAuthUsername + basicAuthPassword := c.basicAuthPassword + c.mu.RUnlock() + + c.connsMu.RLock() + conns := c.conns + c.connsMu.RUnlock() + + timeoutInMillis := int64(timeout / time.Millisecond) + + for _, conn := range conns { + params := make(url.Values) + params.Set("timeout", fmt.Sprintf("%dms", timeoutInMillis)) + req, err := NewRequest("HEAD", conn.URL()+"/?"+params.Encode()) + if err == nil { + if basicAuth { + req.SetBasicAuth(basicAuthUsername, basicAuthPassword) + } + res, err := c.c.Do((*http.Request)(req)) + if err == nil { + if res.Body != nil { + defer res.Body.Close() + } + if res.StatusCode >= 200 && res.StatusCode < 300 { + conn.MarkAsAlive() + } else { + conn.MarkAsDead() + c.errorf("elastic: %s is dead [status=%d]", conn.URL(), res.StatusCode) + } + } else { + c.errorf("elastic: %s is dead", conn.URL()) + conn.MarkAsDead() + } + } else { + c.errorf("elastic: %s is dead", conn.URL()) + conn.MarkAsDead() + } + } +} + +// startupHealthcheck is used at startup to check if the server is available +// at all. +func (c *Client) startupHealthcheck(timeout time.Duration) error { + c.mu.Lock() + urls := c.urls + basicAuth := c.basicAuth + basicAuthUsername := c.basicAuthUsername + basicAuthPassword := c.basicAuthPassword + c.mu.Unlock() + + // If we don't get a connection after "timeout", we bail. + start := time.Now() + for { + cl := &http.Client{Timeout: timeout} + for _, url := range urls { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return err + } + if basicAuth { + req.SetBasicAuth(basicAuthUsername, basicAuthPassword) + } + res, err := cl.Do(req) + if err == nil && res != nil && res.StatusCode >= 200 && res.StatusCode < 300 { + return nil + } + } + time.Sleep(1 * time.Second) + if time.Now().Sub(start) > timeout { + break + } + } + return ErrNoClient +} + +// next returns the next available connection, or ErrNoClient. +func (c *Client) next() (*conn, error) { + // We do round-robin here. + // TODO(oe) This should be a pluggable strategy, like the Selector in the official clients. + c.connsMu.Lock() + defer c.connsMu.Unlock() + + i := 0 + numConns := len(c.conns) + for { + i += 1 + if i > numConns { + break // we visited all conns: they all seem to be dead + } + c.cindex += 1 + if c.cindex >= numConns { + c.cindex = 0 + } + conn := c.conns[c.cindex] + if !conn.IsDead() { + return conn, nil + } + } + + // We have a deadlock here: All nodes are marked as dead. + // If sniffing is disabled, connections will never be marked alive again. + // So we are marking them as alive--if sniffing is disabled. + // They'll then be picked up in the next call to PerformRequest. + if !c.snifferEnabled { + c.errorf("elastic: all %d nodes marked as dead; resurrecting them to prevent deadlock", len(c.conns)) + for _, conn := range c.conns { + conn.MarkAsAlive() + } + } + + // We tried hard, but there is no node available + return nil, ErrNoClient +} + +// mustActiveConn returns nil if there is an active connection, +// otherwise ErrNoClient is returned. +func (c *Client) mustActiveConn() error { + c.connsMu.Lock() + defer c.connsMu.Unlock() + + for _, c := range c.conns { + if !c.IsDead() { + return nil + } + } + return ErrNoClient +} + +// PerformRequest does a HTTP request to Elasticsearch. +// It returns a response and an error on failure. +// +// Optionally, a list of HTTP error codes to ignore can be passed. +// This is necessary for services that expect e.g. HTTP status 404 as a +// valid outcome (Exists, IndicesExists, IndicesTypeExists). +func (c *Client) PerformRequest(method, path string, params url.Values, body interface{}, ignoreErrors ...int) (*Response, error) { + start := time.Now().UTC() + + c.mu.RLock() + timeout := c.healthcheckTimeout + retries := c.maxRetries + basicAuth := c.basicAuth + basicAuthUsername := c.basicAuthUsername + basicAuthPassword := c.basicAuthPassword + sendGetBodyAs := c.sendGetBodyAs + gzipEnabled := c.gzipEnabled + c.mu.RUnlock() + + var err error + var conn *conn + var req *Request + var resp *Response + var retried bool + + // We wait between retries, using simple exponential back-off. + // TODO: Make this configurable, including the jitter. + retryWaitMsec := int64(100 + (rand.Intn(20) - 10)) + + // Change method if sendGetBodyAs is specified. + if method == "GET" && body != nil && sendGetBodyAs != "GET" { + method = sendGetBodyAs + } + + for { + pathWithParams := path + if len(params) > 0 { + pathWithParams += "?" + params.Encode() + } + + // Get a connection + conn, err = c.next() + if err == ErrNoClient { + if !retried { + // Force a healtcheck as all connections seem to be dead. + c.healthcheck(timeout, false) + } + retries -= 1 + if retries <= 0 { + return nil, err + } + retried = true + time.Sleep(time.Duration(retryWaitMsec) * time.Millisecond) + retryWaitMsec += retryWaitMsec + continue // try again + } + if err != nil { + c.errorf("elastic: cannot get connection from pool") + return nil, err + } + + req, err = NewRequest(method, conn.URL()+pathWithParams) + if err != nil { + c.errorf("elastic: cannot create request for %s %s: %v", strings.ToUpper(method), conn.URL()+pathWithParams, err) + return nil, err + } + + if basicAuth { + req.SetBasicAuth(basicAuthUsername, basicAuthPassword) + } + + // Set body + if body != nil { + err = req.SetBody(body, gzipEnabled) + if err != nil { + c.errorf("elastic: couldn't set body %+v for request: %v", body, err) + return nil, err + } + } + + // Tracing + c.dumpRequest((*http.Request)(req)) + + // Get response + res, err := c.c.Do((*http.Request)(req)) + if err != nil { + retries -= 1 + if retries <= 0 { + c.errorf("elastic: %s is dead", conn.URL()) + conn.MarkAsDead() + return nil, err + } + retried = true + time.Sleep(time.Duration(retryWaitMsec) * time.Millisecond) + retryWaitMsec += retryWaitMsec + continue // try again + } + if res.Body != nil { + defer res.Body.Close() + } + + // Check for errors + if err := checkResponse((*http.Request)(req), res, ignoreErrors...); err != nil { + // No retry if request succeeded + return nil, err + } + + // Tracing + c.dumpResponse(res) + + // We successfully made a request with this connection + conn.MarkAsHealthy() + + resp, err = c.newResponse(res) + if err != nil { + return nil, err + } + + break + } + + duration := time.Now().UTC().Sub(start) + c.infof("%s %s [status:%d, request:%.3fs]", + strings.ToUpper(method), + req.URL, + resp.StatusCode, + float64(int64(duration/time.Millisecond))/1000) + + return resp, nil +} + +// -- Document APIs -- + +// Index a document. +func (c *Client) Index() *IndexService { + return NewIndexService(c) +} + +// Get a document. +func (c *Client) Get() *GetService { + return NewGetService(c) +} + +// MultiGet retrieves multiple documents in one roundtrip. +func (c *Client) MultiGet() *MgetService { + return NewMgetService(c) +} + +// Mget retrieves multiple documents in one roundtrip. +func (c *Client) Mget() *MgetService { + return NewMgetService(c) +} + +// Delete a document. +func (c *Client) Delete() *DeleteService { + return NewDeleteService(c) +} + +// DeleteByQuery deletes documents as found by a query. +func (c *Client) DeleteByQuery(indices ...string) *DeleteByQueryService { + return NewDeleteByQueryService(c).Index(indices...) +} + +// Update a document. +func (c *Client) Update() *UpdateService { + return NewUpdateService(c) +} + +// Bulk is the entry point to mass insert/update/delete documents. +func (c *Client) Bulk() *BulkService { + return NewBulkService(c) +} + +// BulkProcessor allows setting up a concurrent processor of bulk requests. +func (c *Client) BulkProcessor() *BulkProcessorService { + return NewBulkProcessorService(c) +} + +// TODO Term Vectors +// TODO Multi termvectors API + +// -- Search APIs -- + +// Search is the entry point for searches. +func (c *Client) Search(indices ...string) *SearchService { + return NewSearchService(c).Index(indices...) +} + +// Suggest returns a service to return suggestions. +func (c *Client) Suggest(indices ...string) *SuggestService { + return NewSuggestService(c).Index(indices...) +} + +// MultiSearch is the entry point for multi searches. +func (c *Client) MultiSearch() *MultiSearchService { + return NewMultiSearchService(c) +} + +// Count documents. +func (c *Client) Count(indices ...string) *CountService { + return NewCountService(c).Index(indices...) +} + +// Explain computes a score explanation for a query and a specific document. +func (c *Client) Explain(index, typ, id string) *ExplainService { + return NewExplainService(c).Index(index).Type(typ).Id(id) +} + +// Percolate allows to send a document and return matching queries. +// See http://www.elastic.co/guide/en/elasticsearch/reference/current/search-percolate.html. +func (c *Client) Percolate() *PercolateService { + return NewPercolateService(c) +} + +// TODO Search Template +// TODO Search Shards API +// TODO Search Exists API +// TODO Validate API +// TODO Field Stats API + +// Exists checks if a document exists. +func (c *Client) Exists() *ExistsService { + return NewExistsService(c) +} + +// Scan through documents. Use this to iterate inside a server process +// where the results will be processed without returning them to a client. +func (c *Client) Scan(indices ...string) *ScanService { + return NewScanService(c).Index(indices...) +} + +// Scroll through documents. Use this to efficiently scroll through results +// while returning the results to a client. Use Scan when you don't need +// to return requests to a client (i.e. not paginating via request/response). +func (c *Client) Scroll(indices ...string) *ScrollService { + return NewScrollService(c).Index(indices...) +} + +// ClearScroll can be used to clear search contexts manually. +func (c *Client) ClearScroll(scrollIds ...string) *ClearScrollService { + return NewClearScrollService(c).ScrollId(scrollIds...) +} + +// -- Indices APIs -- + +// CreateIndex returns a service to create a new index. +func (c *Client) CreateIndex(name string) *IndicesCreateService { + return NewIndicesCreateService(c).Index(name) +} + +// DeleteIndex returns a service to delete an index. +func (c *Client) DeleteIndex(indices ...string) *IndicesDeleteService { + return NewIndicesDeleteService(c).Index(indices) +} + +// IndexExists allows to check if an index exists. +func (c *Client) IndexExists(indices ...string) *IndicesExistsService { + return NewIndicesExistsService(c).Index(indices) +} + +// TypeExists allows to check if one or more types exist in one or more indices. +func (c *Client) TypeExists() *IndicesExistsTypeService { + return NewIndicesExistsTypeService(c) +} + +// IndexStats provides statistics on different operations happining +// in one or more indices. +func (c *Client) IndexStats(indices ...string) *IndicesStatsService { + return NewIndicesStatsService(c).Index(indices...) +} + +// OpenIndex opens an index. +func (c *Client) OpenIndex(name string) *IndicesOpenService { + return NewIndicesOpenService(c).Index(name) +} + +// CloseIndex closes an index. +func (c *Client) CloseIndex(name string) *IndicesCloseService { + return NewIndicesCloseService(c).Index(name) +} + +// IndexGet retrieves information about one or more indices. +// IndexGet is only available for Elasticsearch 1.4 or later. +func (c *Client) IndexGet(indices ...string) *IndicesGetService { + return NewIndicesGetService(c).Index(indices...) +} + +// IndexGetSettings retrieves settings of all, one or more indices. +func (c *Client) IndexGetSettings(indices ...string) *IndicesGetSettingsService { + return NewIndicesGetSettingsService(c).Index(indices...) +} + +// IndexPutSettings sets settings for all, one or more indices. +func (c *Client) IndexPutSettings(indices ...string) *IndicesPutSettingsService { + return NewIndicesPutSettingsService(c).Index(indices...) +} + +// Optimize asks Elasticsearch to optimize one or more indices. +// Optimize is deprecated as of Elasticsearch 2.1 and replaced by Forcemerge. +func (c *Client) Optimize(indices ...string) *OptimizeService { + return NewOptimizeService(c).Index(indices...) +} + +// Forcemerge optimizes one or more indices. +// It replaces the deprecated Optimize API. +func (c *Client) Forcemerge(indices ...string) *IndicesForcemergeService { + return NewIndicesForcemergeService(c).Index(indices...) +} + +// Refresh asks Elasticsearch to refresh one or more indices. +func (c *Client) Refresh(indices ...string) *RefreshService { + return NewRefreshService(c).Index(indices...) +} + +// Flush asks Elasticsearch to free memory from the index and +// flush data to disk. +func (c *Client) Flush(indices ...string) *IndicesFlushService { + return NewIndicesFlushService(c).Index(indices...) +} + +// Alias enables the caller to add and/or remove aliases. +func (c *Client) Alias() *AliasService { + return NewAliasService(c) +} + +// Aliases returns aliases by index name(s). +func (c *Client) Aliases() *AliasesService { + return NewAliasesService(c) +} + +// GetTemplate gets a search template. +// Use IndexXXXTemplate funcs to manage index templates. +func (c *Client) GetTemplate() *GetTemplateService { + return NewGetTemplateService(c) +} + +// PutTemplate creates or updates a search template. +// Use IndexXXXTemplate funcs to manage index templates. +func (c *Client) PutTemplate() *PutTemplateService { + return NewPutTemplateService(c) +} + +// DeleteTemplate deletes a search template. +// Use IndexXXXTemplate funcs to manage index templates. +func (c *Client) DeleteTemplate() *DeleteTemplateService { + return NewDeleteTemplateService(c) +} + +// IndexGetTemplate gets an index template. +// Use XXXTemplate funcs to manage search templates. +func (c *Client) IndexGetTemplate(names ...string) *IndicesGetTemplateService { + return NewIndicesGetTemplateService(c).Name(names...) +} + +// IndexTemplateExists gets check if an index template exists. +// Use XXXTemplate funcs to manage search templates. +func (c *Client) IndexTemplateExists(name string) *IndicesExistsTemplateService { + return NewIndicesExistsTemplateService(c).Name(name) +} + +// IndexPutTemplate creates or updates an index template. +// Use XXXTemplate funcs to manage search templates. +func (c *Client) IndexPutTemplate(name string) *IndicesPutTemplateService { + return NewIndicesPutTemplateService(c).Name(name) +} + +// IndexDeleteTemplate deletes an index template. +// Use XXXTemplate funcs to manage search templates. +func (c *Client) IndexDeleteTemplate(name string) *IndicesDeleteTemplateService { + return NewIndicesDeleteTemplateService(c).Name(name) +} + +// GetMapping gets a mapping. +func (c *Client) GetMapping() *IndicesGetMappingService { + return NewIndicesGetMappingService(c) +} + +// PutMapping registers a mapping. +func (c *Client) PutMapping() *IndicesPutMappingService { + return NewIndicesPutMappingService(c) +} + +// GetWarmer gets one or more warmers by name. +func (c *Client) GetWarmer() *IndicesGetWarmerService { + return NewIndicesGetWarmerService(c) +} + +// PutWarmer registers a warmer. +func (c *Client) PutWarmer() *IndicesPutWarmerService { + return NewIndicesPutWarmerService(c) +} + +// DeleteWarmer deletes one or more warmers. +func (c *Client) DeleteWarmer() *IndicesDeleteWarmerService { + return NewIndicesDeleteWarmerService(c) +} + +// -- cat APIs -- + +// TODO cat aliases +// TODO cat allocation +// TODO cat count +// TODO cat fielddata +// TODO cat health +// TODO cat indices +// TODO cat master +// TODO cat nodes +// TODO cat pending tasks +// TODO cat plugins +// TODO cat recovery +// TODO cat thread pool +// TODO cat shards +// TODO cat segments + +// -- Cluster APIs -- + +// ClusterHealth retrieves the health of the cluster. +func (c *Client) ClusterHealth() *ClusterHealthService { + return NewClusterHealthService(c) +} + +// ClusterState retrieves the state of the cluster. +func (c *Client) ClusterState() *ClusterStateService { + return NewClusterStateService(c) +} + +// ClusterStats retrieves cluster statistics. +func (c *Client) ClusterStats() *ClusterStatsService { + return NewClusterStatsService(c) +} + +// NodesInfo retrieves one or more or all of the cluster nodes information. +func (c *Client) NodesInfo() *NodesInfoService { + return NewNodesInfoService(c) +} + +// TODO Pending cluster tasks +// TODO Cluster Reroute +// TODO Cluster Update Settings +// TODO Nodes Stats +// TODO Nodes hot_threads + +// -- Snapshot and Restore -- + +// TODO Snapshot Create +// TODO Snapshot Create Repository +// TODO Snapshot Delete +// TODO Snapshot Delete Repository +// TODO Snapshot Get +// TODO Snapshot Get Repository +// TODO Snapshot Restore +// TODO Snapshot Status +// TODO Snapshot Verify Repository + +// -- Helpers and shortcuts -- + +// ElasticsearchVersion returns the version number of Elasticsearch +// running on the given URL. +func (c *Client) ElasticsearchVersion(url string) (string, error) { + res, _, err := c.Ping(url).Do() + if err != nil { + return "", err + } + return res.Version.Number, nil +} + +// IndexNames returns the names of all indices in the cluster. +func (c *Client) IndexNames() ([]string, error) { + res, err := c.IndexGetSettings().Index("_all").Do() + if err != nil { + return nil, err + } + var names []string + for name, _ := range res { + names = append(names, name) + } + return names, nil +} + +// Ping checks if a given node in a cluster exists and (optionally) +// returns some basic information about the Elasticsearch server, +// e.g. the Elasticsearch version number. +// +// Notice that you need to specify a URL here explicitly. +func (c *Client) Ping(url string) *PingService { + return NewPingService(c).URL(url) +} + +// Reindex returns a service that will reindex documents from a source +// index into a target index. See +// http://www.elastic.co/guide/en/elasticsearch/guide/current/reindex.html +// for more information about reindexing. +func (c *Client) Reindex(sourceIndex, targetIndex string) *Reindexer { + return NewReindexer(c, sourceIndex, CopyToTargetIndex(targetIndex)) +} + +// WaitForStatus waits for the cluster to have the given status. +// This is a shortcut method for the ClusterHealth service. +// +// WaitForStatus waits for the specified timeout, e.g. "10s". +// If the cluster will have the given state within the timeout, nil is returned. +// If the request timed out, ErrTimeout is returned. +func (c *Client) WaitForStatus(status string, timeout string) error { + health, err := c.ClusterHealth().WaitForStatus(status).Timeout(timeout).Do() + if err != nil { + return err + } + if health.TimedOut { + return ErrTimeout + } + return nil +} + +// WaitForGreenStatus waits for the cluster to have the "green" status. +// See WaitForStatus for more details. +func (c *Client) WaitForGreenStatus(timeout string) error { + return c.WaitForStatus("green", timeout) +} + +// WaitForYellowStatus waits for the cluster to have the "yellow" status. +// See WaitForStatus for more details. +func (c *Client) WaitForYellowStatus(timeout string) error { + return c.WaitForStatus("yellow", timeout) +} + +// TermVectors returns information and statistics on terms in the fields +// of a particular document. +func (c *Client) TermVectors(index, typ string) *TermvectorsService { + builder := NewTermvectorsService(c) + builder = builder.Index(index).Type(typ) + return builder +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/client_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/client_test.go new file mode 100644 index 000000000..7bdcd2287 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/client_test.go @@ -0,0 +1,899 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "log" + "net/http" + "regexp" + "strings" + "testing" + "time" +) + +func findConn(s string, slice ...*conn) (int, bool) { + for i, t := range slice { + if s == t.URL() { + return i, true + } + } + return -1, false +} + +// -- NewClient -- + +func TestClientDefaults(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + if client.healthcheckEnabled != true { + t.Errorf("expected health checks to be enabled, got: %v", client.healthcheckEnabled) + } + if client.healthcheckTimeoutStartup != DefaultHealthcheckTimeoutStartup { + t.Errorf("expected health checks timeout on startup = %v, got: %v", DefaultHealthcheckTimeoutStartup, client.healthcheckTimeoutStartup) + } + if client.healthcheckTimeout != DefaultHealthcheckTimeout { + t.Errorf("expected health checks timeout = %v, got: %v", DefaultHealthcheckTimeout, client.healthcheckTimeout) + } + if client.healthcheckInterval != DefaultHealthcheckInterval { + t.Errorf("expected health checks interval = %v, got: %v", DefaultHealthcheckInterval, client.healthcheckInterval) + } + if client.snifferEnabled != true { + t.Errorf("expected sniffing to be enabled, got: %v", client.snifferEnabled) + } + if client.snifferTimeoutStartup != DefaultSnifferTimeoutStartup { + t.Errorf("expected sniffer timeout on startup = %v, got: %v", DefaultSnifferTimeoutStartup, client.snifferTimeoutStartup) + } + if client.snifferTimeout != DefaultSnifferTimeout { + t.Errorf("expected sniffer timeout = %v, got: %v", DefaultSnifferTimeout, client.snifferTimeout) + } + if client.snifferInterval != DefaultSnifferInterval { + t.Errorf("expected sniffer interval = %v, got: %v", DefaultSnifferInterval, client.snifferInterval) + } + if client.basicAuth != false { + t.Errorf("expected no basic auth; got: %v", client.basicAuth) + } + if client.basicAuthUsername != "" { + t.Errorf("expected no basic auth username; got: %q", client.basicAuthUsername) + } + if client.basicAuthPassword != "" { + t.Errorf("expected no basic auth password; got: %q", client.basicAuthUsername) + } + if client.sendGetBodyAs != "GET" { + t.Errorf("expected sendGetBodyAs to be GET; got: %q", client.sendGetBodyAs) + } +} + +func TestClientWithoutURL(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + // Two things should happen here: + // 1. The client starts sniffing the cluster on DefaultURL + // 2. The sniffing process should find (at least) one node in the cluster, i.e. the DefaultURL + if len(client.conns) == 0 { + t.Fatalf("expected at least 1 node in the cluster, got: %d (%v)", len(client.conns), client.conns) + } + if !isTravis() { + if _, found := findConn(DefaultURL, client.conns...); !found { + t.Errorf("expected to find node with default URL of %s in %v", DefaultURL, client.conns) + } + } +} + +func TestClientWithSingleURL(t *testing.T) { + client, err := NewClient(SetURL("http://127.0.0.1:9200")) + if err != nil { + t.Fatal(err) + } + // Two things should happen here: + // 1. The client starts sniffing the cluster on DefaultURL + // 2. The sniffing process should find (at least) one node in the cluster, i.e. the DefaultURL + if len(client.conns) == 0 { + t.Fatalf("expected at least 1 node in the cluster, got: %d (%v)", len(client.conns), client.conns) + } + if !isTravis() { + if _, found := findConn(DefaultURL, client.conns...); !found { + t.Errorf("expected to find node with default URL of %s in %v", DefaultURL, client.conns) + } + } +} + +func TestClientWithMultipleURLs(t *testing.T) { + client, err := NewClient(SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201")) + if err != nil { + t.Fatal(err) + } + // The client should sniff both URLs, but only 127.0.0.1:9200 should return nodes. + if len(client.conns) != 1 { + t.Fatalf("expected exactly 1 node in the local cluster, got: %d (%v)", len(client.conns), client.conns) + } + if !isTravis() { + if client.conns[0].URL() != DefaultURL { + t.Errorf("expected to find node with default URL of %s in %v", DefaultURL, client.conns) + } + } +} + +func TestClientWithBasicAuth(t *testing.T) { + client, err := NewClient(SetBasicAuth("user", "secret")) + if err != nil { + t.Fatal(err) + } + if client.basicAuth != true { + t.Errorf("expected basic auth; got: %v", client.basicAuth) + } + if got, want := client.basicAuthUsername, "user"; got != want { + t.Errorf("expected basic auth username %q; got: %q", want, got) + } + if got, want := client.basicAuthPassword, "secret"; got != want { + t.Errorf("expected basic auth password %q; got: %q", want, got) + } +} + +func TestClientSniffSuccess(t *testing.T) { + client, err := NewClient(SetURL("http://127.0.0.1:19200", "http://127.0.0.1:9200")) + if err != nil { + t.Fatal(err) + } + // The client should sniff both URLs, but only 127.0.0.1:9200 should return nodes. + if len(client.conns) != 1 { + t.Fatalf("expected exactly 1 node in the local cluster, got: %d (%v)", len(client.conns), client.conns) + } +} + +func TestClientSniffFailure(t *testing.T) { + _, err := NewClient(SetURL("http://127.0.0.1:19200", "http://127.0.0.1:19201")) + if err == nil { + t.Fatalf("expected cluster to fail with no nodes found") + } +} + +func TestClientSniffDisabled(t *testing.T) { + client, err := NewClient(SetSniff(false), SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201")) + if err != nil { + t.Fatal(err) + } + // The client should not sniff, so it should have two connections. + if len(client.conns) != 2 { + t.Fatalf("expected 2 nodes, got: %d (%v)", len(client.conns), client.conns) + } + // Make two requests, so that both connections are being used + for i := 0; i < len(client.conns); i++ { + client.Flush().Do() + } + // The first connection (127.0.0.1:9200) should now be okay. + if i, found := findConn("http://127.0.0.1:9200", client.conns...); !found { + t.Fatalf("expected connection to %q to be found", "http://127.0.0.1:9200") + } else { + if conn := client.conns[i]; conn.IsDead() { + t.Fatal("expected connection to be alive, but it is dead") + } + } + // The second connection (127.0.0.1:9201) should now be marked as dead. + if i, found := findConn("http://127.0.0.1:9201", client.conns...); !found { + t.Fatalf("expected connection to %q to be found", "http://127.0.0.1:9201") + } else { + if conn := client.conns[i]; !conn.IsDead() { + t.Fatal("expected connection to be dead, but it is alive") + } + } +} + +func TestClientWillMarkConnectionsAsAliveWhenAllAreDead(t *testing.T) { + client, err := NewClient(SetURL("http://127.0.0.1:9201"), + SetSniff(false), SetHealthcheck(false), SetMaxRetries(0)) + if err != nil { + t.Fatal(err) + } + // We should have a connection. + if len(client.conns) != 1 { + t.Fatalf("expected 1 node, got: %d (%v)", len(client.conns), client.conns) + } + + // Make a request, so that the connections is marked as dead. + client.Flush().Do() + + // The connection should now be marked as dead. + if i, found := findConn("http://127.0.0.1:9201", client.conns...); !found { + t.Fatalf("expected connection to %q to be found", "http://127.0.0.1:9201") + } else { + if conn := client.conns[i]; !conn.IsDead() { + t.Fatalf("expected connection to be dead, got: %v", conn) + } + } + + // Now send another request and the connection should be marked as alive again. + client.Flush().Do() + + if i, found := findConn("http://127.0.0.1:9201", client.conns...); !found { + t.Fatalf("expected connection to %q to be found", "http://127.0.0.1:9201") + } else { + if conn := client.conns[i]; conn.IsDead() { + t.Fatalf("expected connection to be alive, got: %v", conn) + } + } +} + +func TestClientWithRequiredPlugins(t *testing.T) { + _, err := NewClient(SetRequiredPlugins("no-such-plugin")) + if err == nil { + t.Fatal("expected error when creating client") + } + if got, want := err.Error(), "elastic: plugin no-such-plugin not found"; got != want { + t.Fatalf("expected error %q; got: %q", want, got) + } +} + +func TestClientHealthcheckStartupTimeout(t *testing.T) { + start := time.Now() + _, err := NewClient(SetURL("http://localhost:9299"), SetHealthcheckTimeoutStartup(5*time.Second)) + duration := time.Now().Sub(start) + if err != ErrNoClient { + t.Fatal(err) + } + if duration < 5*time.Second { + t.Fatalf("expected a timeout in more than 5 seconds; got: %v", duration) + } +} + +// -- NewSimpleClient -- + +func TestSimpleClientDefaults(t *testing.T) { + client, err := NewSimpleClient() + if err != nil { + t.Fatal(err) + } + if client.healthcheckEnabled != false { + t.Errorf("expected health checks to be disabled, got: %v", client.healthcheckEnabled) + } + if client.healthcheckTimeoutStartup != off { + t.Errorf("expected health checks timeout on startup = %v, got: %v", off, client.healthcheckTimeoutStartup) + } + if client.healthcheckTimeout != off { + t.Errorf("expected health checks timeout = %v, got: %v", off, client.healthcheckTimeout) + } + if client.healthcheckInterval != off { + t.Errorf("expected health checks interval = %v, got: %v", off, client.healthcheckInterval) + } + if client.snifferEnabled != false { + t.Errorf("expected sniffing to be disabled, got: %v", client.snifferEnabled) + } + if client.snifferTimeoutStartup != off { + t.Errorf("expected sniffer timeout on startup = %v, got: %v", off, client.snifferTimeoutStartup) + } + if client.snifferTimeout != off { + t.Errorf("expected sniffer timeout = %v, got: %v", off, client.snifferTimeout) + } + if client.snifferInterval != off { + t.Errorf("expected sniffer interval = %v, got: %v", off, client.snifferInterval) + } + if client.basicAuth != false { + t.Errorf("expected no basic auth; got: %v", client.basicAuth) + } + if client.basicAuthUsername != "" { + t.Errorf("expected no basic auth username; got: %q", client.basicAuthUsername) + } + if client.basicAuthPassword != "" { + t.Errorf("expected no basic auth password; got: %q", client.basicAuthUsername) + } + if client.sendGetBodyAs != "GET" { + t.Errorf("expected sendGetBodyAs to be GET; got: %q", client.sendGetBodyAs) + } +} + +// -- Start and stop -- + +func TestClientStartAndStop(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + + running := client.IsRunning() + if !running { + t.Fatalf("expected background processes to run; got: %v", running) + } + + // Stop + client.Stop() + running = client.IsRunning() + if running { + t.Fatalf("expected background processes to be stopped; got: %v", running) + } + + // Stop again => no-op + client.Stop() + running = client.IsRunning() + if running { + t.Fatalf("expected background processes to be stopped; got: %v", running) + } + + // Start + client.Start() + running = client.IsRunning() + if !running { + t.Fatalf("expected background processes to run; got: %v", running) + } + + // Start again => no-op + client.Start() + running = client.IsRunning() + if !running { + t.Fatalf("expected background processes to run; got: %v", running) + } +} + +func TestClientStartAndStopWithSnifferAndHealthchecksDisabled(t *testing.T) { + client, err := NewClient(SetSniff(false), SetHealthcheck(false)) + if err != nil { + t.Fatal(err) + } + + running := client.IsRunning() + if !running { + t.Fatalf("expected background processes to run; got: %v", running) + } + + // Stop + client.Stop() + running = client.IsRunning() + if running { + t.Fatalf("expected background processes to be stopped; got: %v", running) + } + + // Stop again => no-op + client.Stop() + running = client.IsRunning() + if running { + t.Fatalf("expected background processes to be stopped; got: %v", running) + } + + // Start + client.Start() + running = client.IsRunning() + if !running { + t.Fatalf("expected background processes to run; got: %v", running) + } + + // Start again => no-op + client.Start() + running = client.IsRunning() + if !running { + t.Fatalf("expected background processes to run; got: %v", running) + } +} + +// -- Sniffing -- + +func TestClientSniffNode(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + + ch := make(chan []*conn) + go func() { ch <- client.sniffNode(DefaultURL) }() + + select { + case nodes := <-ch: + if len(nodes) != 1 { + t.Fatalf("expected %d nodes; got: %d", 1, len(nodes)) + } + pattern := `http:\/\/[\d\.]+:9200` + matched, err := regexp.MatchString(pattern, nodes[0].URL()) + if err != nil { + t.Fatal(err) + } + if !matched { + t.Fatalf("expected node URL pattern %q; got: %q", pattern, nodes[0].URL()) + } + case <-time.After(2 * time.Second): + t.Fatal("expected no timeout in sniff node") + break + } +} + +func TestClientSniffOnDefaultURL(t *testing.T) { + client, _ := NewClient() + if client == nil { + t.Fatal("no client returned") + } + + ch := make(chan error, 1) + go func() { + ch <- client.sniff(DefaultSnifferTimeoutStartup) + }() + + select { + case err := <-ch: + if err != nil { + t.Fatalf("expected sniff to succeed; got: %v", err) + } + if len(client.conns) != 1 { + t.Fatalf("expected %d nodes; got: %d", 1, len(client.conns)) + } + pattern := `http:\/\/[\d\.]+:9200` + matched, err := regexp.MatchString(pattern, client.conns[0].URL()) + if err != nil { + t.Fatal(err) + } + if !matched { + t.Fatalf("expected node URL pattern %q; got: %q", pattern, client.conns[0].URL()) + } + case <-time.After(2 * time.Second): + t.Fatal("expected no timeout in sniff") + break + } +} + +// -- Selector -- + +func TestClientSelectConnHealthy(t *testing.T) { + client, err := NewClient( + SetSniff(false), + SetHealthcheck(false), + SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201")) + if err != nil { + t.Fatal(err) + } + + // Both are healthy, so we should get both URLs in round-robin + client.conns[0].MarkAsHealthy() + client.conns[1].MarkAsHealthy() + + // #1: Return 1st + c, err := client.next() + if err != nil { + t.Fatal(err) + } + if c.URL() != client.conns[0].URL() { + t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL()) + } + // #2: Return 2nd + c, err = client.next() + if err != nil { + t.Fatal(err) + } + if c.URL() != client.conns[1].URL() { + t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL()) + } + // #3: Return 1st + c, err = client.next() + if err != nil { + t.Fatal(err) + } + if c.URL() != client.conns[0].URL() { + t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL()) + } +} + +func TestClientSelectConnHealthyAndDead(t *testing.T) { + client, err := NewClient( + SetSniff(false), + SetHealthcheck(false), + SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201")) + if err != nil { + t.Fatal(err) + } + + // 1st is healthy, second is dead + client.conns[0].MarkAsHealthy() + client.conns[1].MarkAsDead() + + // #1: Return 1st + c, err := client.next() + if err != nil { + t.Fatal(err) + } + if c.URL() != client.conns[0].URL() { + t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL()) + } + // #2: Return 1st again + c, err = client.next() + if err != nil { + t.Fatal(err) + } + if c.URL() != client.conns[0].URL() { + t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL()) + } + // #3: Return 1st again and again + c, err = client.next() + if err != nil { + t.Fatal(err) + } + if c.URL() != client.conns[0].URL() { + t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL()) + } +} + +func TestClientSelectConnDeadAndHealthy(t *testing.T) { + client, err := NewClient( + SetSniff(false), + SetHealthcheck(false), + SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201")) + if err != nil { + t.Fatal(err) + } + + // 1st is dead, 2nd is healthy + client.conns[0].MarkAsDead() + client.conns[1].MarkAsHealthy() + + // #1: Return 2nd + c, err := client.next() + if err != nil { + t.Fatal(err) + } + if c.URL() != client.conns[1].URL() { + t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL()) + } + // #2: Return 2nd again + c, err = client.next() + if err != nil { + t.Fatal(err) + } + if c.URL() != client.conns[1].URL() { + t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL()) + } + // #3: Return 2nd again and again + c, err = client.next() + if err != nil { + t.Fatal(err) + } + if c.URL() != client.conns[1].URL() { + t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL()) + } +} + +func TestClientSelectConnAllDead(t *testing.T) { + client, err := NewClient( + SetSniff(false), + SetHealthcheck(false), + SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201")) + if err != nil { + t.Fatal(err) + } + + // Both are dead + client.conns[0].MarkAsDead() + client.conns[1].MarkAsDead() + + // If all connections are dead, next should make them alive again, but + // still return ErrNoClient when it first finds out. + c, err := client.next() + if err != ErrNoClient { + t.Fatal(err) + } + if c != nil { + t.Fatalf("expected no connection; got: %v", c) + } + // Return a connection + c, err = client.next() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if c == nil { + t.Fatalf("expected connection; got: %v", c) + } + // Return a connection + c, err = client.next() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if c == nil { + t.Fatalf("expected connection; got: %v", c) + } +} + +// -- ElasticsearchVersion -- + +func TestElasticsearchVersion(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + version, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + if version == "" { + t.Errorf("expected a version number, got: %q", version) + } +} + +// -- IndexNames -- + +func TestIndexNames(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + names, err := client.IndexNames() + if err != nil { + t.Fatal(err) + } + if len(names) == 0 { + t.Fatalf("expected some index names, got: %d", len(names)) + } + var found bool + for _, name := range names { + if name == testIndexName { + found = true + break + } + } + if !found { + t.Fatalf("expected to find index %q; got: %v", testIndexName, found) + } +} + +// -- PerformRequest -- + +func TestPerformRequest(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + res, err := client.PerformRequest("GET", "/", nil, nil) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected response to be != nil") + } + + ret := new(PingResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + t.Fatalf("expected no error on decode; got: %v", err) + } + if ret.ClusterName == "" { + t.Errorf("expected cluster name; got: %q", ret.ClusterName) + } +} + +func TestPerformRequestWithSimpleClient(t *testing.T) { + client, err := NewSimpleClient() + if err != nil { + t.Fatal(err) + } + res, err := client.PerformRequest("GET", "/", nil, nil) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected response to be != nil") + } + + ret := new(PingResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + t.Fatalf("expected no error on decode; got: %v", err) + } + if ret.ClusterName == "" { + t.Errorf("expected cluster name; got: %q", ret.ClusterName) + } +} + +func TestPerformRequestWithLogger(t *testing.T) { + var w bytes.Buffer + out := log.New(&w, "LOGGER ", log.LstdFlags) + + client, err := NewClient(SetInfoLog(out)) + if err != nil { + t.Fatal(err) + } + + res, err := client.PerformRequest("GET", "/", nil, nil) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected response to be != nil") + } + + ret := new(PingResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + t.Fatalf("expected no error on decode; got: %v", err) + } + if ret.ClusterName == "" { + t.Errorf("expected cluster name; got: %q", ret.ClusterName) + } + + got := w.String() + pattern := `^LOGGER \d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} GET http://.*/ \[status:200, request:\d+\.\d{3}s\]\n` + matched, err := regexp.MatchString(pattern, got) + if err != nil { + t.Fatalf("expected log line to match %q; got: %v", pattern, err) + } + if !matched { + t.Errorf("expected log line to match %q; got: %v", pattern, got) + } +} + +func TestPerformRequestWithLoggerAndTracer(t *testing.T) { + var lw bytes.Buffer + lout := log.New(&lw, "LOGGER ", log.LstdFlags) + + var tw bytes.Buffer + tout := log.New(&tw, "TRACER ", log.LstdFlags) + + client, err := NewClient(SetInfoLog(lout), SetTraceLog(tout)) + if err != nil { + t.Fatal(err) + } + + res, err := client.PerformRequest("GET", "/", nil, nil) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected response to be != nil") + } + + ret := new(PingResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + t.Fatalf("expected no error on decode; got: %v", err) + } + if ret.ClusterName == "" { + t.Errorf("expected cluster name; got: %q", ret.ClusterName) + } + + lgot := lw.String() + if lgot == "" { + t.Errorf("expected logger output; got: %q", lgot) + } + + tgot := tw.String() + if tgot == "" { + t.Errorf("expected tracer output; got: %q", tgot) + } +} + +type customLogger struct { + out bytes.Buffer +} + +func (l *customLogger) Printf(format string, v ...interface{}) { + l.out.WriteString(fmt.Sprintf(format, v...) + "\n") +} + +func TestPerformRequestWithCustomLogger(t *testing.T) { + logger := &customLogger{} + + client, err := NewClient(SetInfoLog(logger)) + if err != nil { + t.Fatal(err) + } + + res, err := client.PerformRequest("GET", "/", nil, nil) + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected response to be != nil") + } + + ret := new(PingResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + t.Fatalf("expected no error on decode; got: %v", err) + } + if ret.ClusterName == "" { + t.Errorf("expected cluster name; got: %q", ret.ClusterName) + } + + got := logger.out.String() + pattern := `^GET http://.*/ \[status:200, request:\d+\.\d{3}s\]\n` + matched, err := regexp.MatchString(pattern, got) + if err != nil { + t.Fatalf("expected log line to match %q; got: %v", pattern, err) + } + if !matched { + t.Errorf("expected log line to match %q; got: %v", pattern, got) + } +} + +// failingTransport will run a fail callback if it sees a given URL path prefix. +type failingTransport struct { + path string // path prefix to look for + fail func(*http.Request) (*http.Response, error) // call when path prefix is found + next http.RoundTripper // next round-tripper (use http.DefaultTransport if nil) +} + +// RoundTrip implements a failing transport. +func (tr *failingTransport) RoundTrip(r *http.Request) (*http.Response, error) { + if strings.HasPrefix(r.URL.Path, tr.path) && tr.fail != nil { + return tr.fail(r) + } + if tr.next != nil { + return tr.next.RoundTrip(r) + } + return http.DefaultTransport.RoundTrip(r) +} + +func TestPerformRequestRetryOnHttpError(t *testing.T) { + var numFailedReqs int + fail := func(r *http.Request) (*http.Response, error) { + numFailedReqs += 1 + //return &http.Response{Request: r, StatusCode: 400}, nil + return nil, errors.New("request failed") + } + + // Run against a failing endpoint and see if PerformRequest + // retries correctly. + tr := &failingTransport{path: "/fail", fail: fail} + httpClient := &http.Client{Transport: tr} + + client, err := NewClient(SetHttpClient(httpClient), SetMaxRetries(5)) + if err != nil { + t.Fatal(err) + } + + res, err := client.PerformRequest("GET", "/fail", nil, nil) + if err == nil { + t.Fatal("expected error") + } + if res != nil { + t.Fatal("expected no response") + } + // Connection should be marked as dead after it failed + if numFailedReqs != 5 { + t.Errorf("expected %d failed requests; got: %d", 5, numFailedReqs) + } +} + +func TestPerformRequestNoRetryOnValidButUnsuccessfulHttpStatus(t *testing.T) { + var numFailedReqs int + fail := func(r *http.Request) (*http.Response, error) { + numFailedReqs += 1 + return &http.Response{Request: r, StatusCode: 500}, nil + } + + // Run against a failing endpoint and see if PerformRequest + // retries correctly. + tr := &failingTransport{path: "/fail", fail: fail} + httpClient := &http.Client{Transport: tr} + + client, err := NewClient(SetHttpClient(httpClient), SetMaxRetries(5)) + if err != nil { + t.Fatal(err) + } + + res, err := client.PerformRequest("GET", "/fail", nil, nil) + if err == nil { + t.Fatal("expected error") + } + if res != nil { + t.Fatal("expected no response") + } + // Retry should not have triggered additional requests because + if numFailedReqs != 1 { + t.Errorf("expected %d failed requests; got: %d", 1, numFailedReqs) + } +} + +// failingBody will return an error when json.Marshal is called on it. +type failingBody struct{} + +// MarshalJSON implements the json.Marshaler interface and always returns an error. +func (fb failingBody) MarshalJSON() ([]byte, error) { + return nil, errors.New("failing to marshal") +} + +func TestPerformRequestWithSetBodyError(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + res, err := client.PerformRequest("GET", "/", nil, failingBody{}) + if err == nil { + t.Fatal("expected error") + } + if res != nil { + t.Fatal("expected no response") + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster-test/Makefile b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster-test/Makefile new file mode 100644 index 000000000..cc6261db5 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster-test/Makefile @@ -0,0 +1,16 @@ +.PHONY: build run-omega-cluster-test + +default: build + +build: + go build cluster-test.go + +run-omega-cluster-test: + go run -race cluster-test.go \ + -nodes=http://192.168.2.65:8200,http://192.168.2.64:8200 \ + -n=5 \ + -retries=5 \ + -sniff=true -sniffer=10s \ + -healthcheck=true -healthchecker=5s \ + -errorlog=errors.log + diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster-test/README.md b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster-test/README.md new file mode 100644 index 000000000..f10748cc2 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster-test/README.md @@ -0,0 +1,63 @@ +# Cluster Test + +This directory contains a program you can use to test a cluster. + +Here's how: + +First, install a cluster of Elasticsearch nodes. You can install them on +different computers, or start several nodes on a single machine. + +Build cluster-test by `go build cluster-test.go` (or build with `make`). + +Run `./cluster-test -h` to get a list of flags: + +```sh +$ ./cluster-test -h +Usage of ./cluster-test: + -errorlog="": error log file + -healthcheck=true: enable or disable healthchecks + -healthchecker=1m0s: healthcheck interval + -index="twitter": name of ES index to use + -infolog="": info log file + -n=5: number of goroutines that run searches + -nodes="": comma-separated list of ES URLs (e.g. 'http://192.168.2.10:9200,http://192.168.2.11:9200') + -retries=0: number of retries + -sniff=true: enable or disable sniffer + -sniffer=15m0s: sniffer interval + -tracelog="": trace log file +``` + +Example: + +```sh +$ ./cluster-test -nodes=http://127.0.0.1:9200,http://127.0.0.1:9201,http://127.0.0.1:9202 -n=5 -index=twitter -retries=5 -sniff=true -sniffer=10s -healthcheck=true -healthchecker=5s -errorlog=error.log +``` + +The above example will create an index and start some search jobs on the +cluster defined by http://127.0.0.1:9200, http://127.0.0.1:9201, +and http://127.0.0.1:9202. + +* It will create an index called `twitter` on the cluster (`-index=twitter`) +* It will run 5 search jobs in parallel (`-n=5`). +* It will retry failed requests 5 times (`-retries=5`). +* It will sniff the cluster periodically (`-sniff=true`). +* It will sniff the cluster every 10 seconds (`-sniffer=10s`). +* It will perform health checks periodically (`-healthcheck=true`). +* It will perform health checks on the nodes every 5 seconds (`-healthchecker=5s`). +* It will write an error log file (`-errorlog=error.log`). + +If you want to test Elastic with nodes going up and down, you can use a +chaos monkey script like this and run it on the nodes of your cluster: + +```sh +#!/bin/bash +while true +do + echo "Starting ES node" + elasticsearch -d -Xmx4g -Xms1g -Des.config=elasticsearch.yml -p es.pid + sleep `jot -r 1 10 300` # wait for 10-300s + echo "Stopping ES node" + kill -TERM `cat es.pid` + sleep `jot -r 1 10 60` # wait for 10-60s +done +``` diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster-test/cluster-test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster-test/cluster-test.go new file mode 100644 index 000000000..8880992ef --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster-test/cluster-test.go @@ -0,0 +1,356 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package main + +import ( + "encoding/json" + "errors" + "flag" + "fmt" + "log" + "math/rand" + "os" + "runtime" + "strings" + "sync/atomic" + "time" + + "gopkg.in/olivere/elastic.v3" +) + +type Tweet struct { + User string `json:"user"` + Message string `json:"message"` + Retweets int `json:"retweets"` + Image string `json:"image,omitempty"` + Created time.Time `json:"created,omitempty"` + Tags []string `json:"tags,omitempty"` + Location string `json:"location,omitempty"` + Suggest *elastic.SuggestField `json:"suggest_field,omitempty"` +} + +var ( + nodes = flag.String("nodes", "", "comma-separated list of ES URLs (e.g. 'http://192.168.2.10:9200,http://192.168.2.11:9200')") + n = flag.Int("n", 5, "number of goroutines that run searches") + index = flag.String("index", "twitter", "name of ES index to use") + errorlogfile = flag.String("errorlog", "", "error log file") + infologfile = flag.String("infolog", "", "info log file") + tracelogfile = flag.String("tracelog", "", "trace log file") + retries = flag.Int("retries", elastic.DefaultMaxRetries, "number of retries") + sniff = flag.Bool("sniff", elastic.DefaultSnifferEnabled, "enable or disable sniffer") + sniffer = flag.Duration("sniffer", elastic.DefaultSnifferInterval, "sniffer interval") + healthcheck = flag.Bool("healthcheck", elastic.DefaultHealthcheckEnabled, "enable or disable healthchecks") + healthchecker = flag.Duration("healthchecker", elastic.DefaultHealthcheckInterval, "healthcheck interval") +) + +func main() { + flag.Parse() + + runtime.GOMAXPROCS(runtime.NumCPU()) + + if *nodes == "" { + log.Fatal("no nodes specified") + } + urls := strings.SplitN(*nodes, ",", -1) + + testcase, err := NewTestCase(*index, urls) + if err != nil { + log.Fatal(err) + } + + testcase.SetErrorLogFile(*errorlogfile) + testcase.SetInfoLogFile(*infologfile) + testcase.SetTraceLogFile(*tracelogfile) + testcase.SetMaxRetries(*retries) + testcase.SetHealthcheck(*healthcheck) + testcase.SetHealthcheckInterval(*healthchecker) + testcase.SetSniff(*sniff) + testcase.SetSnifferInterval(*sniffer) + + if err := testcase.Run(*n); err != nil { + log.Fatal(err) + } + + select {} +} + +type RunInfo struct { + Success bool +} + +type TestCase struct { + nodes []string + client *elastic.Client + runs int64 + failures int64 + runCh chan RunInfo + index string + errorlogfile string + infologfile string + tracelogfile string + maxRetries int + healthcheck bool + healthcheckInterval time.Duration + sniff bool + snifferInterval time.Duration +} + +func NewTestCase(index string, nodes []string) (*TestCase, error) { + if index == "" { + return nil, errors.New("no index name specified") + } + + return &TestCase{ + index: index, + nodes: nodes, + runCh: make(chan RunInfo), + }, nil +} + +func (t *TestCase) SetIndex(name string) { + t.index = name +} + +func (t *TestCase) SetErrorLogFile(name string) { + t.errorlogfile = name +} + +func (t *TestCase) SetInfoLogFile(name string) { + t.infologfile = name +} + +func (t *TestCase) SetTraceLogFile(name string) { + t.tracelogfile = name +} + +func (t *TestCase) SetMaxRetries(n int) { + t.maxRetries = n +} + +func (t *TestCase) SetSniff(enabled bool) { + t.sniff = enabled +} + +func (t *TestCase) SetSnifferInterval(d time.Duration) { + t.snifferInterval = d +} + +func (t *TestCase) SetHealthcheck(enabled bool) { + t.healthcheck = enabled +} + +func (t *TestCase) SetHealthcheckInterval(d time.Duration) { + t.healthcheckInterval = d +} + +func (t *TestCase) Run(n int) error { + if err := t.setup(); err != nil { + return err + } + + for i := 1; i < n; i++ { + go t.search() + } + + go t.monitor() + + return nil +} + +func (t *TestCase) monitor() { + print := func() { + fmt.Printf("\033[32m%5d\033[0m; \033[31m%5d\033[0m: %s%s\r", t.runs, t.failures, t.client.String(), " ") + } + + for { + select { + case run := <-t.runCh: + atomic.AddInt64(&t.runs, 1) + if !run.Success { + atomic.AddInt64(&t.failures, 1) + fmt.Println() + } + print() + case <-time.After(5 * time.Second): + // Print stats after some inactivity + print() + break + } + } +} + +func (t *TestCase) setup() error { + var errorlogger *log.Logger + if t.errorlogfile != "" { + f, err := os.OpenFile(t.errorlogfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664) + if err != nil { + return err + } + errorlogger = log.New(f, "", log.Ltime|log.Lmicroseconds|log.Lshortfile) + } + + var infologger *log.Logger + if t.infologfile != "" { + f, err := os.OpenFile(t.infologfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664) + if err != nil { + return err + } + infologger = log.New(f, "", log.LstdFlags) + } + + // Trace request and response details like this + var tracelogger *log.Logger + if t.tracelogfile != "" { + f, err := os.OpenFile(t.tracelogfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664) + if err != nil { + return err + } + tracelogger = log.New(f, "", log.LstdFlags) + } + + client, err := elastic.NewClient( + elastic.SetURL(t.nodes...), + elastic.SetErrorLog(errorlogger), + elastic.SetInfoLog(infologger), + elastic.SetTraceLog(tracelogger), + elastic.SetMaxRetries(t.maxRetries), + elastic.SetSniff(t.sniff), + elastic.SetSnifferInterval(t.snifferInterval), + elastic.SetHealthcheck(t.healthcheck), + elastic.SetHealthcheckInterval(t.healthcheckInterval)) + if err != nil { + // Handle error + return err + } + t.client = client + + // Use the IndexExists service to check if a specified index exists. + exists, err := t.client.IndexExists(t.index).Do() + if err != nil { + return err + } + if exists { + deleteIndex, err := t.client.DeleteIndex(t.index).Do() + if err != nil { + return err + } + if !deleteIndex.Acknowledged { + return errors.New("delete index not acknowledged") + } + } + + // Create a new index. + createIndex, err := t.client.CreateIndex(t.index).Do() + if err != nil { + return err + } + if !createIndex.Acknowledged { + return errors.New("create index not acknowledged") + } + + // Index a tweet (using JSON serialization) + tweet1 := Tweet{User: "olivere", Message: "Take Five", Retweets: 0} + _, err = t.client.Index(). + Index(t.index). + Type("tweet"). + Id("1"). + BodyJson(tweet1). + Do() + if err != nil { + return err + } + + // Index a second tweet (by string) + tweet2 := `{"user" : "olivere", "message" : "It's a Raggy Waltz"}` + _, err = t.client.Index(). + Index(t.index). + Type("tweet"). + Id("2"). + BodyString(tweet2). + Do() + if err != nil { + return err + } + + // Flush to make sure the documents got written. + _, err = t.client.Flush().Index(t.index).Do() + if err != nil { + return err + } + + return nil +} + +func (t *TestCase) search() { + // Loop forever to check for connection issues + for { + // Get tweet with specified ID + get1, err := t.client.Get(). + Index(t.index). + Type("tweet"). + Id("1"). + Do() + if err != nil { + //failf("Get failed: %v", err) + t.runCh <- RunInfo{Success: false} + continue + } + if !get1.Found { + //log.Printf("Document %s not found\n", "1") + //fmt.Printf("Got document %s in version %d from index %s, type %s\n", get1.Id, get1.Version, get1.Index, get1.Type) + t.runCh <- RunInfo{Success: false} + continue + } + + // Search with a term query + searchResult, err := t.client.Search(). + Index(t.index). // search in index t.index + Query(elastic.NewTermQuery("user", "olivere")). // specify the query + Sort("user", true). // sort by "user" field, ascending + From(0).Size(10). // take documents 0-9 + Pretty(true). // pretty print request and response JSON + Do() // execute + if err != nil { + //failf("Search failed: %v\n", err) + t.runCh <- RunInfo{Success: false} + continue + } + + // searchResult is of type SearchResult and returns hits, suggestions, + // and all kinds of other information from Elasticsearch. + //fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis) + + // Number of hits + if searchResult.Hits != nil { + //fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits) + + // Iterate through results + for _, hit := range searchResult.Hits.Hits { + // hit.Index contains the name of the index + + // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}). + var tweet Tweet + err := json.Unmarshal(*hit.Source, &tweet) + if err != nil { + // Deserialization failed + //failf("Deserialize failed: %v\n", err) + t.runCh <- RunInfo{Success: false} + continue + } + + // Work with tweet + //fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } + } else { + // No hits + //fmt.Print("Found no tweets\n") + } + + t.runCh <- RunInfo{Success: true} + + // Sleep some time + time.Sleep(time.Duration(rand.Intn(500)) * time.Millisecond) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster_health.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster_health.go new file mode 100644 index 000000000..0c51c6041 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster_health.go @@ -0,0 +1,244 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// ClusterHealthService allows to get a very simple status on the health of the cluster. +// +// See http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html +// for details. +type ClusterHealthService struct { + client *Client + pretty bool + indices []string + level string + local *bool + masterTimeout string + timeout string + waitForActiveShards *int + waitForNodes string + waitForRelocatingShards *int + waitForStatus string +} + +// NewClusterHealthService creates a new ClusterHealthService. +func NewClusterHealthService(client *Client) *ClusterHealthService { + return &ClusterHealthService{ + client: client, + indices: make([]string, 0), + } +} + +// Index limits the information returned to specific indices. +func (s *ClusterHealthService) Index(indices ...string) *ClusterHealthService { + s.indices = append(s.indices, indices...) + return s +} + +// Level specifies the level of detail for returned information. +func (s *ClusterHealthService) Level(level string) *ClusterHealthService { + s.level = level + return s +} + +// Local indicates whether to return local information. If it is true, +// we do not retrieve the state from master node (default: false). +func (s *ClusterHealthService) Local(local bool) *ClusterHealthService { + s.local = &local + return s +} + +// MasterTimeout specifies an explicit operation timeout for connection to master node. +func (s *ClusterHealthService) MasterTimeout(masterTimeout string) *ClusterHealthService { + s.masterTimeout = masterTimeout + return s +} + +// Timeout specifies an explicit operation timeout. +func (s *ClusterHealthService) Timeout(timeout string) *ClusterHealthService { + s.timeout = timeout + return s +} + +// WaitForActiveShards can be used to wait until the specified number of shards are active. +func (s *ClusterHealthService) WaitForActiveShards(waitForActiveShards int) *ClusterHealthService { + s.waitForActiveShards = &waitForActiveShards + return s +} + +// WaitForNodes can be used to wait until the specified number of nodes are available. +// Example: "12" to wait for exact values, ">12" and "<12" for ranges. +func (s *ClusterHealthService) WaitForNodes(waitForNodes string) *ClusterHealthService { + s.waitForNodes = waitForNodes + return s +} + +// WaitForRelocatingShards can be used to wait until the specified number of relocating shards is finished. +func (s *ClusterHealthService) WaitForRelocatingShards(waitForRelocatingShards int) *ClusterHealthService { + s.waitForRelocatingShards = &waitForRelocatingShards + return s +} + +// WaitForStatus can be used to wait until the cluster is in a specific state. +// Valid values are: green, yellow, or red. +func (s *ClusterHealthService) WaitForStatus(waitForStatus string) *ClusterHealthService { + s.waitForStatus = waitForStatus + return s +} + +// WaitForGreenStatus will wait for the "green" state. +func (s *ClusterHealthService) WaitForGreenStatus() *ClusterHealthService { + return s.WaitForStatus("green") +} + +// WaitForYellowStatus will wait for the "yellow" state. +func (s *ClusterHealthService) WaitForYellowStatus() *ClusterHealthService { + return s.WaitForStatus("yellow") +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ClusterHealthService) Pretty(pretty bool) *ClusterHealthService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *ClusterHealthService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if len(s.indices) > 0 { + path, err = uritemplates.Expand("/_cluster/health/{index}", map[string]string{ + "index": strings.Join(s.indices, ","), + }) + } else { + path = "/_cluster/health" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.level != "" { + params.Set("level", s.level) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.waitForActiveShards != nil { + params.Set("wait_for_active_shards", fmt.Sprintf("%v", s.waitForActiveShards)) + } + if s.waitForNodes != "" { + params.Set("wait_for_nodes", s.waitForNodes) + } + if s.waitForRelocatingShards != nil { + params.Set("wait_for_relocating_shards", fmt.Sprintf("%v", s.waitForRelocatingShards)) + } + if s.waitForStatus != "" { + params.Set("wait_for_status", s.waitForStatus) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ClusterHealthService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *ClusterHealthService) Do() (*ClusterHealthResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + resp := new(ClusterHealthResponse) + if err := json.Unmarshal(res.Body, resp); err != nil { + return nil, err + } + return resp, nil +} + +// ClusterHealthResponse is the response of ClusterHealthService.Do. +type ClusterHealthResponse struct { + ClusterName string `json:"cluster_name"` + Status string `json:"status"` + TimedOut bool `json:"timed_out"` + NumberOfNodes int `json:"number_of_nodes"` + NumberOfDataNodes int `json:"number_of_data_nodes"` + ActivePrimaryShards int `json:"active_primary_shards"` + ActiveShards int `json:"active_shards"` + RelocatingShards int `json:"relocating_shards"` + InitializingShards int `json:"initializing_shards"` + UnassignedShards int `json:"unassigned_shards"` + DelayedUnassignedShards int `json:"delayed_unassigned_shards"` + NumberOfPendingTasks int `json:"number_of_pending_tasks"` + NumberOfInFlightFetch int `json:"number_of_in_flight_fetch"` + TaskMaxWaitTimeInQueueInMillis int `json:"task_max_waiting_in_queue_millis"` + ActiveShardsPercentAsNumber float64 `json:"active_shards_percent_as_number"` + + // Validation failures -> index name -> array of validation failures + ValidationFailures []map[string][]string `json:"validation_failures"` + + // Index name -> index health + Indices map[string]*ClusterIndexHealth `json:"indices"` +} + +// ClusterIndexHealth will be returned as part of ClusterHealthResponse. +type ClusterIndexHealth struct { + Status string `json:"status"` + NumberOfShards int `json:"number_of_shards"` + NumberOfReplicas int `json:"number_of_replicas"` + ActivePrimaryShards int `json:"active_primary_shards"` + ActiveShards int `json:"active_shards"` + RelocatingShards int `json:"relocating_shards"` + InitializingShards int `json:"initializing_shards"` + UnassignedShards int `json:"unassigned_shards"` + // Validation failures + ValidationFailures []string `json:"validation_failures"` + // Shards by id, e.g. "0" or "1" + Shards map[string]*ClusterShardHealth `json:"shards"` +} + +// ClusterShardHealth will be returned as part of ClusterHealthResponse. +type ClusterShardHealth struct { + Status string `json:"status"` + PrimaryActive bool `json:"primary_active"` + ActiveShards int `json:"active_shards"` + RelocatingShards int `json:"relocating_shards"` + InitializingShards int `json:"initializing_shards"` + UnassignedShards int `json:"unassigned_shards"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster_health_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster_health_test.go new file mode 100644 index 000000000..fcb612f19 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster_health_test.go @@ -0,0 +1,109 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "net/url" + "testing" +) + +func TestClusterHealth(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Get cluster health + res, err := client.ClusterHealth().Index(testIndexName).Level("shards").Pretty(true).Do() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatalf("expected res to be != nil; got: %v", res) + } + if res.Status != "green" && res.Status != "red" && res.Status != "yellow" { + t.Fatalf("expected status \"green\", \"red\", or \"yellow\"; got: %q", res.Status) + } +} + +func TestClusterHealthURLs(t *testing.T) { + tests := []struct { + Service *ClusterHealthService + ExpectedPath string + ExpectedParams url.Values + }{ + { + Service: &ClusterHealthService{ + indices: []string{}, + }, + ExpectedPath: "/_cluster/health", + }, + { + Service: &ClusterHealthService{ + indices: []string{"twitter"}, + }, + ExpectedPath: "/_cluster/health/twitter", + }, + { + Service: &ClusterHealthService{ + indices: []string{"twitter", "gplus"}, + }, + ExpectedPath: "/_cluster/health/twitter%2Cgplus", + }, + { + Service: &ClusterHealthService{ + indices: []string{"twitter"}, + waitForStatus: "yellow", + }, + ExpectedPath: "/_cluster/health/twitter", + ExpectedParams: url.Values{"wait_for_status": []string{"yellow"}}, + }, + } + + for _, test := range tests { + gotPath, gotParams, err := test.Service.buildURL() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if gotPath != test.ExpectedPath { + t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath) + } + if gotParams.Encode() != test.ExpectedParams.Encode() { + t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams) + } + } +} + +func TestClusterHealthWaitForStatus(t *testing.T) { + client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) + + // Cluster health on an index that does not exist should never get to yellow + health, err := client.ClusterHealth().Index("no-such-index").WaitForStatus("yellow").Timeout("1s").Do() + if err == nil { + t.Fatalf("expected timeout error; got: %v", err) + } + if !IsTimeout(err) { + t.Fatalf("expected timeout error; got: %v", err) + } + if health != nil { + t.Fatalf("expected no response; got: %v", health) + } + + // Cluster wide health + health, err = client.ClusterHealth().WaitForGreenStatus().Timeout("10s").Do() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if health.TimedOut != false { + t.Fatalf("expected no timeout; got: %v "+ + "(does your local cluster contain unassigned shards?)", health.TimedOut) + } + if health.Status != "green" { + t.Fatalf("expected health = %q; got: %q", "green", health.Status) + } + + // Cluster wide health via shortcut on client + err = client.WaitForGreenStatus("10s") + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster_state.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster_state.go new file mode 100644 index 000000000..9c3678c75 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster_state.go @@ -0,0 +1,284 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// ClusterStateService allows to get a comprehensive state information of the whole cluster. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-state.html +// for details. +type ClusterStateService struct { + client *Client + pretty bool + indices []string + metrics []string + allowNoIndices *bool + expandWildcards string + flatSettings *bool + ignoreUnavailable *bool + local *bool + masterTimeout string +} + +// NewClusterStateService creates a new ClusterStateService. +func NewClusterStateService(client *Client) *ClusterStateService { + return &ClusterStateService{ + client: client, + indices: make([]string, 0), + metrics: make([]string, 0), + } +} + +// Index is a list of index names. Use _all or an empty string to +// perform the operation on all indices. +func (s *ClusterStateService) Index(indices ...string) *ClusterStateService { + s.indices = append(s.indices, indices...) + return s +} + +// Metric limits the information returned to the specified metric. +// It can be one of: version, master_node, nodes, routing_table, metadata, +// blocks, or customs. +func (s *ClusterStateService) Metric(metrics ...string) *ClusterStateService { + s.metrics = append(s.metrics, metrics...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *ClusterStateService) AllowNoIndices(allowNoIndices bool) *ClusterStateService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both.. +func (s *ClusterStateService) ExpandWildcards(expandWildcards string) *ClusterStateService { + s.expandWildcards = expandWildcards + return s +} + +// FlatSettings, when set, returns settings in flat format (default: false). +func (s *ClusterStateService) FlatSettings(flatSettings bool) *ClusterStateService { + s.flatSettings = &flatSettings + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *ClusterStateService) IgnoreUnavailable(ignoreUnavailable bool) *ClusterStateService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Local indicates whether to return local information. When set, it does not +// retrieve the state from master node (default: false). +func (s *ClusterStateService) Local(local bool) *ClusterStateService { + s.local = &local + return s +} + +// MasterTimeout specifies timeout for connection to master. +func (s *ClusterStateService) MasterTimeout(masterTimeout string) *ClusterStateService { + s.masterTimeout = masterTimeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ClusterStateService) Pretty(pretty bool) *ClusterStateService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *ClusterStateService) buildURL() (string, url.Values, error) { + // Build URL + metrics := strings.Join(s.metrics, ",") + if metrics == "" { + metrics = "_all" + } + indices := strings.Join(s.indices, ",") + if indices == "" { + indices = "_all" + } + path, err := uritemplates.Expand("/_cluster/state/{metrics}/{indices}", map[string]string{ + "metrics": metrics, + "indices": indices, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ClusterStateService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *ClusterStateService) Do() (*ClusterStateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(ClusterStateResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// ClusterStateResponse is the response of ClusterStateService.Do. +type ClusterStateResponse struct { + ClusterName string `json:"cluster_name"` + Version int64 `json:"version"` + StateUUID string `json:"state_uuid"` + MasterNode string `json:"master_node"` + Blocks map[string]*clusterBlocks `json:"blocks"` + Nodes map[string]*discoveryNode `json:"nodes"` + Metadata *clusterStateMetadata `json:"metadata"` + RoutingTable map[string]*clusterStateRoutingTable `json:"routing_table"` + RoutingNodes *clusterStateRoutingNode `json:"routing_nodes"` + Customs map[string]interface{} `json:"customs"` +} + +type clusterBlocks struct { + Global map[string]*clusterBlock `json:"global"` // id -> cluster block + Indices map[string]*clusterBlock `json:"indices"` // index name -> cluster block +} + +type clusterBlock struct { + Description string `json:"description"` + Retryable bool `json:"retryable"` + DisableStatePersistence bool `json:"disable_state_persistence"` + Levels []string `json:"levels"` +} + +type clusterStateMetadata struct { + ClusterUUID string `json:"cluster_uuid"` + Templates map[string]*indexTemplateMetaData `json:"templates"` // template name -> index template metadata + Indices map[string]*indexMetaData `json:"indices"` // index name _> meta data + RoutingTable struct { + Indices map[string]*indexRoutingTable `json:"indices"` // index name -> routing table + } `json:"routing_table"` + RoutingNodes struct { + Unassigned []*shardRouting `json:"unassigned"` + Nodes []*shardRouting `json:"nodes"` + } `json:"routing_nodes"` + Customs map[string]interface{} `json:"customs"` +} + +type discoveryNode struct { + Name string `json:"name"` // server name, e.g. "es1" + TransportAddress string `json:"transport_address"` // e.g. inet[/1.2.3.4:9300] + Attributes map[string]interface{} `json:"attributes"` // e.g. { "data": true, "master": true } +} + +type clusterStateRoutingTable struct { + Indices map[string]interface{} `json:"indices"` +} + +type clusterStateRoutingNode struct { + Unassigned []*shardRouting `json:"unassigned"` + // Node Id -> shardRouting + Nodes map[string][]*shardRouting `json:"nodes"` +} + +type indexTemplateMetaData struct { + Template string `json:"template"` // e.g. "store-*" + Order int `json:"order"` + Settings map[string]interface{} `json:"settings"` // index settings + Mappings map[string]interface{} `json:"mappings"` // type name -> mapping +} + +type indexMetaData struct { + State string `json:"state"` + Settings map[string]interface{} `json:"settings"` + Mappings map[string]interface{} `json:"mappings"` + Aliases []string `json:"aliases"` // e.g. [ "alias1", "alias2" ] +} + +type indexRoutingTable struct { + Shards map[string]*shardRouting `json:"shards"` +} + +type shardRouting struct { + State string `json:"state"` + Primary bool `json:"primary"` + Node string `json:"node"` + RelocatingNode string `json:"relocating_node"` + Shard int `json:"shard"` + Index string `json:"index"` + Version int64 `json:"state"` + RestoreSource *RestoreSource `json:"restore_source"` + AllocationId *allocationId `json:"allocation_id"` + UnassignedInfo *unassignedInfo `json:"unassigned_info"` +} + +type RestoreSource struct { + Repository string `json:"repository"` + Snapshot string `json:"snapshot"` + Version string `json:"version"` + Index string `json:"index"` +} + +type allocationId struct { + Id string `json:"id"` + RelocationId string `json:"relocation_id"` +} + +type unassignedInfo struct { + Reason string `json:"reason"` + At string `json:"at"` + Details string `json:"details"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster_state_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster_state_test.go new file mode 100644 index 000000000..e73a8eeb7 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster_state_test.go @@ -0,0 +1,92 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "net/url" + "testing" +) + +func TestClusterState(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Get cluster state + res, err := client.ClusterState().Index("_all").Metric("_all").Pretty(true).Do() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatalf("expected res to be != nil; got: %v", res) + } + if res.ClusterName == "" { + t.Fatalf("expected a cluster name; got: %q", res.ClusterName) + } +} + +func TestClusterStateURLs(t *testing.T) { + tests := []struct { + Service *ClusterStateService + ExpectedPath string + ExpectedParams url.Values + }{ + { + Service: &ClusterStateService{ + indices: []string{}, + metrics: []string{}, + }, + ExpectedPath: "/_cluster/state/_all/_all", + }, + { + Service: &ClusterStateService{ + indices: []string{"twitter"}, + metrics: []string{}, + }, + ExpectedPath: "/_cluster/state/_all/twitter", + }, + { + Service: &ClusterStateService{ + indices: []string{"twitter", "gplus"}, + metrics: []string{}, + }, + ExpectedPath: "/_cluster/state/_all/twitter%2Cgplus", + }, + { + Service: &ClusterStateService{ + indices: []string{}, + metrics: []string{"nodes"}, + }, + ExpectedPath: "/_cluster/state/nodes/_all", + }, + { + Service: &ClusterStateService{ + indices: []string{"twitter"}, + metrics: []string{"nodes"}, + }, + ExpectedPath: "/_cluster/state/nodes/twitter", + }, + { + Service: &ClusterStateService{ + indices: []string{"twitter"}, + metrics: []string{"nodes"}, + masterTimeout: "1s", + }, + ExpectedPath: "/_cluster/state/nodes/twitter", + ExpectedParams: url.Values{"master_timeout": []string{"1s"}}, + }, + } + + for _, test := range tests { + gotPath, gotParams, err := test.Service.buildURL() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if gotPath != test.ExpectedPath { + t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath) + } + if gotParams.Encode() != test.ExpectedParams.Encode() { + t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams) + } + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster_stats.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster_stats.go new file mode 100644 index 000000000..1f0430592 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster_stats.go @@ -0,0 +1,349 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// ClusterStatsService is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/cluster-stats.html. +type ClusterStatsService struct { + client *Client + pretty bool + nodeId []string + flatSettings *bool + human *bool +} + +// NewClusterStatsService creates a new ClusterStatsService. +func NewClusterStatsService(client *Client) *ClusterStatsService { + return &ClusterStatsService{ + client: client, + nodeId: make([]string, 0), + } +} + +// NodeId is documented as: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes. +func (s *ClusterStatsService) NodeId(nodeId []string) *ClusterStatsService { + s.nodeId = nodeId + return s +} + +// FlatSettings is documented as: Return settings in flat format (default: false). +func (s *ClusterStatsService) FlatSettings(flatSettings bool) *ClusterStatsService { + s.flatSettings = &flatSettings + return s +} + +// Human is documented as: Whether to return time and byte values in human-readable format.. +func (s *ClusterStatsService) Human(human bool) *ClusterStatsService { + s.human = &human + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ClusterStatsService) Pretty(pretty bool) *ClusterStatsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *ClusterStatsService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + + if len(s.nodeId) > 0 { + path, err = uritemplates.Expand("/_cluster/stats/nodes/{node_id}", map[string]string{ + "node_id": strings.Join(s.nodeId, ","), + }) + if err != nil { + return "", url.Values{}, err + } + } else { + path, err = uritemplates.Expand("/_cluster/stats", map[string]string{}) + if err != nil { + return "", url.Values{}, err + } + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.human != nil { + params.Set("human", fmt.Sprintf("%v", *s.human)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ClusterStatsService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *ClusterStatsService) Do() (*ClusterStatsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(ClusterStatsResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// ClusterStatsResponse is the response of ClusterStatsService.Do. +type ClusterStatsResponse struct { + Timestamp int64 `json:"timestamp"` + ClusterName string `json:"cluster_name"` + ClusterUUID string `json:"uuid"` + Status string `json:"status"` + Indices *ClusterStatsIndices `json:"indices"` + Nodes *ClusterStatsNodes `json:"nodes"` +} + +type ClusterStatsIndices struct { + Count int `json:"count"` + Shards *ClusterStatsIndicesShards `json:"shards"` + Docs *ClusterStatsIndicesDocs `json:"docs"` + Store *ClusterStatsIndicesStore `json:"store"` + FieldData *ClusterStatsIndicesFieldData `json:"fielddata"` + FilterCache *ClusterStatsIndicesFilterCache `json:"filter_cache"` + IdCache *ClusterStatsIndicesIdCache `json:"id_cache"` + Completion *ClusterStatsIndicesCompletion `json:"completion"` + Segments *ClusterStatsIndicesSegments `json:"segments"` + Percolate *ClusterStatsIndicesPercolate `json:"percolate"` +} + +type ClusterStatsIndicesShards struct { + Total int `json:"total"` + Primaries int `json:"primaries"` + Replication float64 `json:"replication"` + Index *ClusterStatsIndicesShardsIndex `json:"index"` +} + +type ClusterStatsIndicesShardsIndex struct { + Shards *ClusterStatsIndicesShardsIndexIntMinMax `json:"shards"` + Primaries *ClusterStatsIndicesShardsIndexIntMinMax `json:"primaries"` + Replication *ClusterStatsIndicesShardsIndexFloat64MinMax `json:"replication"` +} + +type ClusterStatsIndicesShardsIndexIntMinMax struct { + Min int `json:"min"` + Max int `json:"max"` + Avg float64 `json:"avg"` +} + +type ClusterStatsIndicesShardsIndexFloat64MinMax struct { + Min float64 `json:"min"` + Max float64 `json:"max"` + Avg float64 `json:"avg"` +} + +type ClusterStatsIndicesDocs struct { + Count int `json:"count"` + Deleted int `json:"deleted"` +} + +type ClusterStatsIndicesStore struct { + Size string `json:"size"` // e.g. "5.3gb" + SizeInBytes int64 `json:"size_in_bytes"` + ThrottleTime string `json:"throttle_time"` // e.g. "0s" + ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"` +} + +type ClusterStatsIndicesFieldData struct { + MemorySize string `json:"memory_size"` // e.g. "61.3kb" + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + Evictions int64 `json:"evictions"` + Fields map[string]struct { + MemorySize string `json:"memory_size"` // e.g. "61.3kb" + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + } `json:"fields"` +} + +type ClusterStatsIndicesFilterCache struct { + MemorySize string `json:"memory_size"` // e.g. "61.3kb" + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + Evictions int64 `json:"evictions"` +} + +type ClusterStatsIndicesIdCache struct { + MemorySize string `json:"memory_size"` // e.g. "61.3kb" + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` +} + +type ClusterStatsIndicesCompletion struct { + Size string `json:"size"` // e.g. "61.3kb" + SizeInBytes int64 `json:"size_in_bytes"` + Fields map[string]struct { + Size string `json:"size"` // e.g. "61.3kb" + SizeInBytes int64 `json:"size_in_bytes"` + } `json:"fields"` +} + +type ClusterStatsIndicesSegments struct { + Count int64 `json:"count"` + Memory string `json:"memory"` // e.g. "61.3kb" + MemoryInBytes int64 `json:"memory_in_bytes"` + IndexWriterMemory string `json:"index_writer_memory"` // e.g. "61.3kb" + IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes"` + IndexWriterMaxMemory string `json:"index_writer_max_memory"` // e.g. "61.3kb" + IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes"` + VersionMapMemory string `json:"version_map_memory"` // e.g. "61.3kb" + VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes"` + FixedBitSet string `json:"fixed_bit_set"` // e.g. "61.3kb" + FixedBitSetInBytes int64 `json:"fixed_bit_set_memory_in_bytes"` +} + +type ClusterStatsIndicesPercolate struct { + Total int64 `json:"total"` + // TODO(oe) The JSON tag here is wrong as of ES 1.5.2 it seems + Time string `json:"get_time"` // e.g. "1s" + TimeInBytes int64 `json:"time_in_millis"` + Current int64 `json:"current"` + MemorySize string `json:"memory_size"` // e.g. "61.3kb" + MemorySizeInBytes int64 `json:"memory_sitze_in_bytes"` + Queries int64 `json:"queries"` +} + +// --- + +type ClusterStatsNodes struct { + Count *ClusterStatsNodesCounts `json:"counts"` + Versions []string `json:"versions"` + OS *ClusterStatsNodesOsStats `json:"os"` + Process *ClusterStatsNodesProcessStats `json:"process"` + JVM *ClusterStatsNodesJvmStats `json:"jvm"` + FS *ClusterStatsNodesFsStats `json:"fs"` + Plugins []*ClusterStatsNodesPlugin `json:"plugins"` +} + +type ClusterStatsNodesCounts struct { + Total int `json:"total"` + MasterOnly int `json:"master_only"` + DataOnly int `json:"data_only"` + MasterData int `json:"master_data"` + Client int `json:"client"` +} + +type ClusterStatsNodesOsStats struct { + AvailableProcessors int `json:"available_processors"` + Mem *ClusterStatsNodesOsStatsMem `json:"mem"` + CPU []*ClusterStatsNodesOsStatsCPU `json:"cpu"` +} + +type ClusterStatsNodesOsStatsMem struct { + Total string `json:"total"` // e.g. "16gb" + TotalInBytes int64 `json:"total_in_bytes"` +} + +type ClusterStatsNodesOsStatsCPU struct { + Vendor string `json:"vendor"` + Model string `json:"model"` + MHz int `json:"mhz"` + TotalCores int `json:"total_cores"` + TotalSockets int `json:"total_sockets"` + CoresPerSocket int `json:"cores_per_socket"` + CacheSize string `json:"cache_size"` // e.g. "256b" + CacheSizeInBytes int64 `json:"cache_size_in_bytes"` + Count int `json:"count"` +} + +type ClusterStatsNodesProcessStats struct { + CPU *ClusterStatsNodesProcessStatsCPU `json:"cpu"` + OpenFileDescriptors *ClusterStatsNodesProcessStatsOpenFileDescriptors `json:"open_file_descriptors"` +} + +type ClusterStatsNodesProcessStatsCPU struct { + Percent float64 `json:"percent"` +} + +type ClusterStatsNodesProcessStatsOpenFileDescriptors struct { + Min int64 `json:"min"` + Max int64 `json:"max"` + Avg int64 `json:"avg"` +} + +type ClusterStatsNodesJvmStats struct { + MaxUptime string `json:"max_uptime"` // e.g. "5h" + MaxUptimeInMillis int64 `json:"max_uptime_in_millis"` + Versions []*ClusterStatsNodesJvmStatsVersion `json:"versions"` + Mem *ClusterStatsNodesJvmStatsMem `json:"mem"` + Threads int64 `json:"threads"` +} + +type ClusterStatsNodesJvmStatsVersion struct { + Version string `json:"version"` // e.g. "1.8.0_45" + VMName string `json:"vm_name"` // e.g. "Java HotSpot(TM) 64-Bit Server VM" + VMVersion string `json:"vm_version"` // e.g. "25.45-b02" + VMVendor string `json:"vm_vendor"` // e.g. "Oracle Corporation" + Count int `json:"count"` +} + +type ClusterStatsNodesJvmStatsMem struct { + HeapUsed string `json:"heap_used"` + HeapUsedInBytes int64 `json:"heap_used_in_bytes"` + HeapMax string `json:"heap_max"` + HeapMaxInBytes int64 `json:"heap_max_in_bytes"` +} + +type ClusterStatsNodesFsStats struct { + Path string `json:"path"` + Mount string `json:"mount"` + Dev string `json:"dev"` + Total string `json:"total"` // e.g. "930.7gb"` + TotalInBytes int64 `json:"total_in_bytes"` + Free string `json:"free"` // e.g. "930.7gb"` + FreeInBytes int64 `json:"free_in_bytes"` + Available string `json:"available"` // e.g. "930.7gb"` + AvailableInBytes int64 `json:"available_in_bytes"` + DiskReads int64 `json:"disk_reads"` + DiskWrites int64 `json:"disk_writes"` + DiskIOOp int64 `json:"disk_io_op"` + DiskReadSize string `json:"disk_read_size"` // e.g. "0b"` + DiskReadSizeInBytes int64 `json:"disk_read_size_in_bytes"` + DiskWriteSize string `json:"disk_write_size"` // e.g. "0b"` + DiskWriteSizeInBytes int64 `json:"disk_write_size_in_bytes"` + DiskIOSize string `json:"disk_io_size"` // e.g. "0b"` + DiskIOSizeInBytes int64 `json:"disk_io_size_in_bytes"` + DiskQueue string `json:"disk_queue"` + DiskServiceTime string `json:"disk_service_time"` +} + +type ClusterStatsNodesPlugin struct { + Name string `json:"name"` + Version string `json:"version"` + Description string `json:"description"` + URL string `json:"url"` + JVM bool `json:"jvm"` + Site bool `json:"site"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster_stats_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster_stats_test.go new file mode 100644 index 000000000..74326a6e7 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/cluster_stats_test.go @@ -0,0 +1,85 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "net/url" + "testing" +) + +func TestClusterStats(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Get cluster stats + res, err := client.ClusterStats().Do() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatalf("expected res to be != nil; got: %v", res) + } + if res.ClusterName == "" { + t.Fatalf("expected a cluster name; got: %q", res.ClusterName) + } +} + +func TestClusterStatsURLs(t *testing.T) { + fFlag := false + tFlag := true + + tests := []struct { + Service *ClusterStatsService + ExpectedPath string + ExpectedParams url.Values + }{ + { + Service: &ClusterStatsService{ + nodeId: []string{}, + }, + ExpectedPath: "/_cluster/stats", + }, + { + Service: &ClusterStatsService{ + nodeId: []string{"node1"}, + }, + ExpectedPath: "/_cluster/stats/nodes/node1", + }, + { + Service: &ClusterStatsService{ + nodeId: []string{"node1", "node2"}, + }, + ExpectedPath: "/_cluster/stats/nodes/node1%2Cnode2", + }, + { + Service: &ClusterStatsService{ + nodeId: []string{}, + flatSettings: &tFlag, + }, + ExpectedPath: "/_cluster/stats", + ExpectedParams: url.Values{"flat_settings": []string{"true"}}, + }, + { + Service: &ClusterStatsService{ + nodeId: []string{"node1"}, + flatSettings: &fFlag, + }, + ExpectedPath: "/_cluster/stats/nodes/node1", + ExpectedParams: url.Values{"flat_settings": []string{"false"}}, + }, + } + + for _, test := range tests { + gotPath, gotParams, err := test.Service.buildURL() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if gotPath != test.ExpectedPath { + t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath) + } + if gotParams.Encode() != test.ExpectedParams.Encode() { + t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams) + } + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/config/elasticsearch.yml b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/config/elasticsearch.yml new file mode 100644 index 000000000..b571a064c --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/config/elasticsearch.yml @@ -0,0 +1,103 @@ +# ======================== Elasticsearch Configuration ========================= +# +# NOTE: Elasticsearch comes with reasonable defaults for most settings. +# Before you set out to tweak and tune the configuration, make sure you +# understand what are you trying to accomplish and the consequences. +# +# The primary way of configuring a node is via this file. This template lists +# the most important settings you may want to configure for a production cluster. +# +# Please see the documentation for further information on configuration options: +# +# +# ---------------------------------- Cluster ----------------------------------- +# +# Use a descriptive name for your cluster: +# +# cluster.name: my-application +# +# ------------------------------------ Node ------------------------------------ +# +# Use a descriptive name for the node: +# +# node.name: node-1 +# +# Add custom attributes to the node: +# +# node.rack: r1 +# +# ----------------------------------- Paths ------------------------------------ +# +# Path to directory where to store the data (separate multiple locations by comma): +# +# path.data: /path/to/data +# +# Path to log files: +# +# path.logs: /path/to/logs +# +# ----------------------------------- Memory ----------------------------------- +# +# Lock the memory on startup: +# +# bootstrap.mlockall: true +# +# Make sure that the `ES_HEAP_SIZE` environment variable is set to about half the memory +# available on the system and that the owner of the process is allowed to use this limit. +# +# Elasticsearch performs poorly when the system is swapping the memory. +# +# ---------------------------------- Network ----------------------------------- +# +# Set the bind adress to a specific IP (IPv4 or IPv6): +# +# network.host: 192.168.0.1 +# +# Set a custom port for HTTP: +# +# http.port: 9200 +# +# For more information, see the documentation at: +# +# +# ---------------------------------- Gateway ----------------------------------- +# +# Block initial recovery after a full cluster restart until N nodes are started: +# +# gateway.recover_after_nodes: 3 +# +# For more information, see the documentation at: +# +# +# --------------------------------- Discovery ---------------------------------- +# +# Elasticsearch nodes will find each other via multicast, by default. +# +# To use the unicast discovery, disable the multicast discovery: +# +# discovery.zen.ping.multicast.enabled: false +# +# Pass an initial list of hosts to perform discovery when new node is started: +# +# discovery.zen.ping.unicast.hosts: ["host1", "host2"] +# +# Prevent the "split brain" by configuring the majority of nodes (total number of nodes / 2 + 1): +# +# discovery.zen.minimum_master_nodes: 3 +# +# For more information, see the documentation at: +# +# +# ---------------------------------- Various ----------------------------------- +# +# Disable starting multiple nodes on a single system: +# +# node.max_local_storage_nodes: 1 +# +# Require explicit names when deleting indices: +# +# action.destructive_requires_name: true + +# Enable scripting as described here: https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html +script.inline: on +script.indexed: on diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/connection.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/connection.go new file mode 100644 index 000000000..b8b5bf8aa --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/connection.go @@ -0,0 +1,90 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "sync" + "time" +) + +// conn represents a single connection to a node in a cluster. +type conn struct { + sync.RWMutex + nodeID string // node ID + url string + failures int + dead bool + deadSince *time.Time +} + +// newConn creates a new connection to the given URL. +func newConn(nodeID, url string) *conn { + c := &conn{ + nodeID: nodeID, + url: url, + } + return c +} + +// String returns a representation of the connection status. +func (c *conn) String() string { + c.RLock() + defer c.RUnlock() + return fmt.Sprintf("%s [dead=%v,failures=%d,deadSince=%v]", c.url, c.dead, c.failures, c.deadSince) +} + +// NodeID returns the ID of the node of this connection. +func (c *conn) NodeID() string { + c.RLock() + defer c.RUnlock() + return c.nodeID +} + +// URL returns the URL of this connection. +func (c *conn) URL() string { + c.RLock() + defer c.RUnlock() + return c.url +} + +// IsDead returns true if this connection is marked as dead, i.e. a previous +// request to the URL has been unsuccessful. +func (c *conn) IsDead() bool { + c.RLock() + defer c.RUnlock() + return c.dead +} + +// MarkAsDead marks this connection as dead, increments the failures +// counter and stores the current time in dead since. +func (c *conn) MarkAsDead() { + c.Lock() + c.dead = true + if c.deadSince == nil { + utcNow := time.Now().UTC() + c.deadSince = &utcNow + } + c.failures += 1 + c.Unlock() +} + +// MarkAsAlive marks this connection as eligible to be returned from the +// pool of connections by the selector. +func (c *conn) MarkAsAlive() { + c.Lock() + c.dead = false + c.Unlock() +} + +// MarkAsHealthy marks this connection as healthy, i.e. a request has been +// successfully performed with it. +func (c *conn) MarkAsHealthy() { + c.Lock() + c.dead = false + c.deadSince = nil + c.failures = 0 + c.Unlock() +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/count.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/count.go new file mode 100644 index 000000000..ebc878b2d --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/count.go @@ -0,0 +1,310 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// CountService is a convenient service for determining the +// number of documents in an index. Use SearchService with +// a SearchType of count for counting with queries etc. +type CountService struct { + client *Client + pretty bool + index []string + typ []string + allowNoIndices *bool + analyzeWildcard *bool + analyzer string + defaultOperator string + df string + expandWildcards string + ignoreUnavailable *bool + lenient *bool + lowercaseExpandedTerms *bool + minScore interface{} + preference string + q string + query Query + routing string + bodyJson interface{} + bodyString string +} + +// NewCountService creates a new CountService. +func NewCountService(client *Client) *CountService { + return &CountService{ + client: client, + } +} + +// Index sets the names of the indices to restrict the results. +func (s *CountService) Index(index ...string) *CountService { + if s.index == nil { + s.index = make([]string, 0) + } + s.index = append(s.index, index...) + return s +} + +// Type sets the types to use to restrict the results. +func (s *CountService) Type(typ ...string) *CountService { + if s.typ == nil { + s.typ = make([]string, 0) + } + s.typ = append(s.typ, typ...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. (This includes "_all" string +// or when no indices have been specified). +func (s *CountService) AllowNoIndices(allowNoIndices bool) *CountService { + s.allowNoIndices = &allowNoIndices + return s +} + +// AnalyzeWildcard specifies whether wildcard and prefix queries should be +// analyzed (default: false). +func (s *CountService) AnalyzeWildcard(analyzeWildcard bool) *CountService { + s.analyzeWildcard = &analyzeWildcard + return s +} + +// Analyzer specifies the analyzer to use for the query string. +func (s *CountService) Analyzer(analyzer string) *CountService { + s.analyzer = analyzer + return s +} + +// DefaultOperator specifies the default operator for query string query (AND or OR). +func (s *CountService) DefaultOperator(defaultOperator string) *CountService { + s.defaultOperator = defaultOperator + return s +} + +// Df specifies the field to use as default where no field prefix is given +// in the query string. +func (s *CountService) Df(df string) *CountService { + s.df = df + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *CountService) ExpandWildcards(expandWildcards string) *CountService { + s.expandWildcards = expandWildcards + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *CountService) IgnoreUnavailable(ignoreUnavailable bool) *CountService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Lenient specifies whether format-based query failures (such as +// providing text to a numeric field) should be ignored. +func (s *CountService) Lenient(lenient bool) *CountService { + s.lenient = &lenient + return s +} + +// LowercaseExpandedTerms specifies whether query terms should be lowercased. +func (s *CountService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *CountService { + s.lowercaseExpandedTerms = &lowercaseExpandedTerms + return s +} + +// MinScore indicates to include only documents with a specific `_score` +// value in the result. +func (s *CountService) MinScore(minScore interface{}) *CountService { + s.minScore = minScore + return s +} + +// Preference specifies the node or shard the operation should be +// performed on (default: random). +func (s *CountService) Preference(preference string) *CountService { + s.preference = preference + return s +} + +// Q in the Lucene query string syntax. You can also use Query to pass +// a Query struct. +func (s *CountService) Q(q string) *CountService { + s.q = q + return s +} + +// Query specifies the query to pass. You can also pass a query string with Q. +func (s *CountService) Query(query Query) *CountService { + s.query = query + return s +} + +// Routing specifies the routing value. +func (s *CountService) Routing(routing string) *CountService { + s.routing = routing + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *CountService) Pretty(pretty bool) *CountService { + s.pretty = pretty + return s +} + +// BodyJson specifies the query to restrict the results specified with the +// Query DSL (optional). The interface{} will be serialized to a JSON document, +// so use a map[string]interface{}. +func (s *CountService) BodyJson(body interface{}) *CountService { + s.bodyJson = body + return s +} + +// Body specifies a query to restrict the results specified with +// the Query DSL (optional). +func (s *CountService) BodyString(body string) *CountService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *CountService) buildURL() (string, url.Values, error) { + var err error + var path string + + if len(s.index) > 0 && len(s.typ) > 0 { + path, err = uritemplates.Expand("/{index}/{type}/_count", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + }) + } else if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_count", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else if len(s.typ) > 0 { + path, err = uritemplates.Expand("/_all/{type}/_count", map[string]string{ + "type": strings.Join(s.typ, ","), + }) + } else { + path = "/_all/_count" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.analyzeWildcard != nil { + params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard)) + } + if s.analyzer != "" { + params.Set("analyzer", s.analyzer) + } + if s.defaultOperator != "" { + params.Set("default_operator", s.defaultOperator) + } + if s.df != "" { + params.Set("df", s.df) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.lenient != nil { + params.Set("lenient", fmt.Sprintf("%v", *s.lenient)) + } + if s.lowercaseExpandedTerms != nil { + params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms)) + } + if s.minScore != nil { + params.Set("min_score", fmt.Sprintf("%v", s.minScore)) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.q != "" { + params.Set("q", s.q) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *CountService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *CountService) Do() (int64, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return 0, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return 0, err + } + + // Setup HTTP request body + var body interface{} + if s.query != nil { + src, err := s.query.Source() + if err != nil { + return 0, err + } + query := make(map[string]interface{}) + query["query"] = src + body = query + } else if s.bodyJson != nil { + body = s.bodyJson + } else if s.bodyString != "" { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest("POST", path, params, body) + if err != nil { + return 0, err + } + + // Return result + ret := new(CountResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return 0, err + } + if ret != nil { + return ret.Count, nil + } + + return int64(0), nil +} + +// CountResponse is the response of using the Count API. +type CountResponse struct { + Count int64 `json:"count"` + Shards shardsInfo `json:"_shards,omitempty"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/count_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/count_test.go new file mode 100644 index 000000000..bfc2a2955 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/count_test.go @@ -0,0 +1,124 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestCountURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Types []string + Expected string + }{ + { + []string{}, + []string{}, + "/_all/_count", + }, + { + []string{}, + []string{"tweet"}, + "/_all/tweet/_count", + }, + { + []string{"twitter-*"}, + []string{"tweet", "follower"}, + "/twitter-%2A/tweet%2Cfollower/_count", + }, + { + []string{"twitter-2014", "twitter-2015"}, + []string{"tweet", "follower"}, + "/twitter-2014%2Ctwitter-2015/tweet%2Cfollower/_count", + }, + } + + for _, test := range tests { + path, _, err := client.Count().Index(test.Indices...).Type(test.Types...).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} + +func TestCount(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Count documents + count, err := client.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if count != 3 { + t.Errorf("expected Count = %d; got %d", 3, count) + } + + // Count documents + count, err = client.Count(testIndexName).Type("tweet").Do() + if err != nil { + t.Fatal(err) + } + if count != 3 { + t.Errorf("expected Count = %d; got %d", 3, count) + } + + // Count documents + count, err = client.Count(testIndexName).Type("gezwitscher").Do() + if err != nil { + t.Fatal(err) + } + if count != 0 { + t.Errorf("expected Count = %d; got %d", 0, count) + } + + // Count with query + query := NewTermQuery("user", "olivere") + count, err = client.Count(testIndexName).Query(query).Do() + if err != nil { + t.Fatal(err) + } + if count != 2 { + t.Errorf("expected Count = %d; got %d", 2, count) + } + + // Count with query and type + query = NewTermQuery("user", "olivere") + count, err = client.Count(testIndexName).Type("tweet").Query(query).Do() + if err != nil { + t.Fatal(err) + } + if count != 2 { + t.Errorf("expected Count = %d; got %d", 2, count) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/decoder.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/decoder.go new file mode 100644 index 000000000..765a5be30 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/decoder.go @@ -0,0 +1,26 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" +) + +// Decoder is used to decode responses from Elasticsearch. +// Users of elastic can implement their own marshaler for advanced purposes +// and set them per Client (see SetDecoder). If none is specified, +// DefaultDecoder is used. +type Decoder interface { + Decode(data []byte, v interface{}) error +} + +// DefaultDecoder uses json.Unmarshal from the Go standard library +// to decode JSON data. +type DefaultDecoder struct{} + +// Decode decodes with json.Unmarshal from the Go standard library. +func (u *DefaultDecoder) Decode(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/decoder_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/decoder_test.go new file mode 100644 index 000000000..5cfce9f5d --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/decoder_test.go @@ -0,0 +1,49 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "bytes" + "encoding/json" + "sync/atomic" + "testing" +) + +type decoder struct { + dec json.Decoder + + N int64 +} + +func (d *decoder) Decode(data []byte, v interface{}) error { + atomic.AddInt64(&d.N, 1) + dec := json.NewDecoder(bytes.NewReader(data)) + dec.UseNumber() + return dec.Decode(v) +} + +func TestDecoder(t *testing.T) { + dec := &decoder{} + client := setupTestClientAndCreateIndex(t, SetDecoder(dec), SetMaxRetries(0)) + + tweet := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + + // Add a document + indexResult, err := client.Index(). + Index(testIndexName). + Type("tweet"). + Id("1"). + BodyJson(&tweet). + Do() + if err != nil { + t.Fatal(err) + } + if indexResult == nil { + t.Errorf("expected result to be != nil; got: %v", indexResult) + } + if dec.N <= 0 { + t.Errorf("expected at least 1 call of decoder; got: %d", dec.N) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/delete.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/delete.go new file mode 100644 index 000000000..dca135ee1 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/delete.go @@ -0,0 +1,214 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// DeleteService allows to delete a typed JSON document from a specified +// index based on its id. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html +// for details. +type DeleteService struct { + client *Client + pretty bool + id string + index string + typ string + routing string + timeout string + version interface{} + versionType string + consistency string + parent string + refresh *bool + replication string +} + +// NewDeleteService creates a new DeleteService. +func NewDeleteService(client *Client) *DeleteService { + return &DeleteService{ + client: client, + } +} + +// Type is the type of the document. +func (s *DeleteService) Type(typ string) *DeleteService { + s.typ = typ + return s +} + +// Id is the document ID. +func (s *DeleteService) Id(id string) *DeleteService { + s.id = id + return s +} + +// Index is the name of the index. +func (s *DeleteService) Index(index string) *DeleteService { + s.index = index + return s +} + +// Replication specifies a replication type. +func (s *DeleteService) Replication(replication string) *DeleteService { + s.replication = replication + return s +} + +// Routing is a specific routing value. +func (s *DeleteService) Routing(routing string) *DeleteService { + s.routing = routing + return s +} + +// Timeout is an explicit operation timeout. +func (s *DeleteService) Timeout(timeout string) *DeleteService { + s.timeout = timeout + return s +} + +// Version is an explicit version number for concurrency control. +func (s *DeleteService) Version(version interface{}) *DeleteService { + s.version = version + return s +} + +// VersionType is a specific version type. +func (s *DeleteService) VersionType(versionType string) *DeleteService { + s.versionType = versionType + return s +} + +// Consistency defines a specific write consistency setting for the operation. +func (s *DeleteService) Consistency(consistency string) *DeleteService { + s.consistency = consistency + return s +} + +// Parent is the ID of parent document. +func (s *DeleteService) Parent(parent string) *DeleteService { + s.parent = parent + return s +} + +// Refresh the index after performing the operation. +func (s *DeleteService) Refresh(refresh bool) *DeleteService { + s.refresh = &refresh + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *DeleteService) Pretty(pretty bool) *DeleteService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *DeleteService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ + "index": s.index, + "type": s.typ, + "id": s.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.refresh != nil { + params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) + } + if s.replication != "" { + params.Set("replication", s.replication) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + if s.consistency != "" { + params.Set("consistency", s.consistency) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *DeleteService) Validate() error { + var invalid []string + if s.typ == "" { + invalid = append(invalid, "Type") + } + if s.id == "" { + invalid = append(invalid, "Id") + } + if s.index == "" { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *DeleteService) Do() (*DeleteResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("DELETE", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(DeleteResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a delete request. + +// DeleteResponse is the outcome of running DeleteService.Do. +type DeleteResponse struct { + // TODO _shards { total, failed, successful } + Found bool `json:"found"` + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + Version int64 `json:"_version"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/delete_by_query.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/delete_by_query.go new file mode 100644 index 000000000..3db9c0ce8 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/delete_by_query.go @@ -0,0 +1,302 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// DeleteByQueryService deletes documents that match a query. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/docs-delete-by-query.html. +type DeleteByQueryService struct { + client *Client + indices []string + types []string + analyzer string + consistency string + defaultOper string + df string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string + replication string + routing string + timeout string + pretty bool + q string + query Query +} + +// NewDeleteByQueryService creates a new DeleteByQueryService. +// You typically use the client's DeleteByQuery to get a reference to +// the service. +func NewDeleteByQueryService(client *Client) *DeleteByQueryService { + builder := &DeleteByQueryService{ + client: client, + } + return builder +} + +// Index sets the indices on which to perform the delete operation. +func (s *DeleteByQueryService) Index(indices ...string) *DeleteByQueryService { + if s.indices == nil { + s.indices = make([]string, 0) + } + s.indices = append(s.indices, indices...) + return s +} + +// Type limits the delete operation to the given types. +func (s *DeleteByQueryService) Type(types ...string) *DeleteByQueryService { + if s.types == nil { + s.types = make([]string, 0) + } + s.types = append(s.types, types...) + return s +} + +// Analyzer to use for the query string. +func (s *DeleteByQueryService) Analyzer(analyzer string) *DeleteByQueryService { + s.analyzer = analyzer + return s +} + +// Consistency represents the specific write consistency setting for the operation. +// It can be one, quorum, or all. +func (s *DeleteByQueryService) Consistency(consistency string) *DeleteByQueryService { + s.consistency = consistency + return s +} + +// DefaultOperator for query string query (AND or OR). +func (s *DeleteByQueryService) DefaultOperator(defaultOperator string) *DeleteByQueryService { + s.defaultOper = defaultOperator + return s +} + +// DF is the field to use as default where no field prefix is given in the query string. +func (s *DeleteByQueryService) DF(defaultField string) *DeleteByQueryService { + s.df = defaultField + return s +} + +// DefaultField is the field to use as default where no field prefix is given in the query string. +// It is an alias to the DF func. +func (s *DeleteByQueryService) DefaultField(defaultField string) *DeleteByQueryService { + s.df = defaultField + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *DeleteByQueryService) IgnoreUnavailable(ignore bool) *DeleteByQueryService { + s.ignoreUnavailable = &ignore + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices (including the _all string +// or when no indices have been specified). +func (s *DeleteByQueryService) AllowNoIndices(allow bool) *DeleteByQueryService { + s.allowNoIndices = &allow + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. It can be "open" or "closed". +func (s *DeleteByQueryService) ExpandWildcards(expand string) *DeleteByQueryService { + s.expandWildcards = expand + return s +} + +// Replication sets a specific replication type (sync or async). +func (s *DeleteByQueryService) Replication(replication string) *DeleteByQueryService { + s.replication = replication + return s +} + +// Q specifies the query in Lucene query string syntax. You can also use +// Query to programmatically specify the query. +func (s *DeleteByQueryService) Q(query string) *DeleteByQueryService { + s.q = query + return s +} + +// QueryString is an alias to Q. Notice that you can also use Query to +// programmatically set the query. +func (s *DeleteByQueryService) QueryString(query string) *DeleteByQueryService { + s.q = query + return s +} + +// Routing sets a specific routing value. +func (s *DeleteByQueryService) Routing(routing string) *DeleteByQueryService { + s.routing = routing + return s +} + +// Timeout sets an explicit operation timeout, e.g. "1s" or "10000ms". +func (s *DeleteByQueryService) Timeout(timeout string) *DeleteByQueryService { + s.timeout = timeout + return s +} + +// Pretty indents the JSON output from Elasticsearch. +func (s *DeleteByQueryService) Pretty(pretty bool) *DeleteByQueryService { + s.pretty = pretty + return s +} + +// Query sets the query programmatically. +func (s *DeleteByQueryService) Query(query Query) *DeleteByQueryService { + s.query = query + return s +} + +// Do executes the delete-by-query operation. +func (s *DeleteByQueryService) Do() (*DeleteByQueryResult, error) { + var err error + + // Build url + path := "/" + + // Indices part + indexPart := make([]string, 0) + for _, index := range s.indices { + index, err = uritemplates.Expand("{index}", map[string]string{ + "index": index, + }) + if err != nil { + return nil, err + } + indexPart = append(indexPart, index) + } + if len(indexPart) > 0 { + path += strings.Join(indexPart, ",") + } + + // Types part + typesPart := make([]string, 0) + for _, typ := range s.types { + typ, err = uritemplates.Expand("{type}", map[string]string{ + "type": typ, + }) + if err != nil { + return nil, err + } + typesPart = append(typesPart, typ) + } + if len(typesPart) > 0 { + path += "/" + strings.Join(typesPart, ",") + } + + // Search + path += "/_query" + + // Parameters + params := make(url.Values) + if s.analyzer != "" { + params.Set("analyzer", s.analyzer) + } + if s.consistency != "" { + params.Set("consistency", s.consistency) + } + if s.defaultOper != "" { + params.Set("default_operator", s.defaultOper) + } + if s.df != "" { + params.Set("df", s.df) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.replication != "" { + params.Set("replication", s.replication) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + if s.q != "" { + params.Set("q", s.q) + } + + // Set body if there is a query set + var body interface{} + if s.query != nil { + src, err := s.query.Source() + if err != nil { + return nil, err + } + query := make(map[string]interface{}) + query["query"] = src + body = query + } + + // Get response + res, err := s.client.PerformRequest("DELETE", path, params, body) + if err != nil { + return nil, err + } + + // Return result + ret := new(DeleteByQueryResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// DeleteByQueryResult is the outcome of executing Do with DeleteByQueryService. +type DeleteByQueryResult struct { + Took int64 `json:"took"` + TimedOut bool `json:"timed_out"` + Indices map[string]IndexDeleteByQueryResult `json:"_indices"` + Failures []shardOperationFailure `json:"failures"` +} + +// IndexNames returns the names of the indices the DeleteByQuery touched. +func (res DeleteByQueryResult) IndexNames() []string { + var indices []string + for index, _ := range res.Indices { + indices = append(indices, index) + } + return indices +} + +// All returns the index delete-by-query result of all indices. +func (res DeleteByQueryResult) All() IndexDeleteByQueryResult { + all, _ := res.Indices["_all"] + return all +} + +// IndexDeleteByQueryResult is the result of a delete-by-query for a specific +// index. +type IndexDeleteByQueryResult struct { + // Found documents, matching the query. + Found int `json:"found"` + // Deleted documents, successfully, from the given index. + Deleted int `json:"deleted"` + // Missing documents when trying to delete them. + Missing int `json:"missing"` + // Failed documents to be deleted for the given index. + Failed int `json:"failed"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/delete_by_query_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/delete_by_query_test.go new file mode 100644 index 000000000..71b786f6e --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/delete_by_query_test.go @@ -0,0 +1,114 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestDeleteByQuery(t *testing.T) { + client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) + + found, err := client.HasPlugin("delete-by-query") + if err != nil { + t.Fatal(err) + } + if !found { + t.Skip("DeleteByQuery in 2.0 is now a plugin (delete-by-query) and must be " + + "loaded in the configuration") + } + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Count documents + count, err := client.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if count != 3 { + t.Fatalf("expected count = %d; got: %d", 3, count) + } + + // Delete all documents by sandrae + q := NewTermQuery("user", "sandrae") + res, err := client.DeleteByQuery().Index(testIndexName).Type("tweet").Query(q).Do() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatalf("expected response != nil; got: %v", res) + } + + // Check response + if got, want := len(res.IndexNames()), 2; got != want { + t.Fatalf("expected %d indices; got: %d", want, got) + } + idx, found := res.Indices["_all"] + if !found { + t.Fatalf("expected to find index %q", "_all") + } + if got, want := idx.Found, 1; got != want { + t.Fatalf("expected Found = %v; got: %v", want, got) + } + if got, want := idx.Deleted, 1; got != want { + t.Fatalf("expected Deleted = %v; got: %v", want, got) + } + if got, want := idx.Missing, 0; got != want { + t.Fatalf("expected Missing = %v; got: %v", want, got) + } + if got, want := idx.Failed, 0; got != want { + t.Fatalf("expected Failed = %v; got: %v", want, got) + } + idx, found = res.Indices[testIndexName] + if !found { + t.Errorf("expected Found = true; got: %v", found) + } + if got, want := idx.Found, 1; got != want { + t.Fatalf("expected Found = %v; got: %v", want, got) + } + if got, want := idx.Deleted, 1; got != want { + t.Fatalf("expected Deleted = %v; got: %v", want, got) + } + if got, want := idx.Missing, 0; got != want { + t.Fatalf("expected Missing = %v; got: %v", want, got) + } + if got, want := idx.Failed, 0; got != want { + t.Fatalf("expected Failed = %v; got: %v", want, got) + } + + // Flush and check count + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + count, err = client.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if count != 2 { + t.Fatalf("expected Count = %d; got: %d", 2, count) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/delete_template.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/delete_template.go new file mode 100644 index 000000000..b8d0223f6 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/delete_template.go @@ -0,0 +1,118 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// DeleteTemplateService deletes a search template. More information can +// be found at http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html. +type DeleteTemplateService struct { + client *Client + pretty bool + id string + version *int + versionType string +} + +// NewDeleteTemplateService creates a new DeleteTemplateService. +func NewDeleteTemplateService(client *Client) *DeleteTemplateService { + return &DeleteTemplateService{ + client: client, + } +} + +// Id is the template ID. +func (s *DeleteTemplateService) Id(id string) *DeleteTemplateService { + s.id = id + return s +} + +// Version an explicit version number for concurrency control. +func (s *DeleteTemplateService) Version(version int) *DeleteTemplateService { + s.version = &version + return s +} + +// VersionType specifies a version type. +func (s *DeleteTemplateService) VersionType(versionType string) *DeleteTemplateService { + s.versionType = versionType + return s +} + +// buildURL builds the URL for the operation. +func (s *DeleteTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{ + "id": s.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.version != nil { + params.Set("version", fmt.Sprintf("%d", *s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *DeleteTemplateService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *DeleteTemplateService) Do() (*DeleteTemplateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("DELETE", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(DeleteTemplateResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// DeleteTemplateResponse is the response of DeleteTemplateService.Do. +type DeleteTemplateResponse struct { + Found bool `json:"found"` + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + Version int `json:"_version"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/delete_template_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/delete_template_test.go new file mode 100644 index 000000000..85bb7ad55 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/delete_template_test.go @@ -0,0 +1,22 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestDeleteTemplateValidate(t *testing.T) { + client := setupTestClient(t) + + // No template id -> fail with error + res, err := NewDeleteTemplateService(client).Do() + if err == nil { + t.Fatalf("expected Delete to fail without index name") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/delete_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/delete_test.go new file mode 100644 index 000000000..418fdec7d --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/delete_test.go @@ -0,0 +1,118 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestDelete(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Count documents + count, err := client.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if count != 3 { + t.Errorf("expected Count = %d; got %d", 3, count) + } + + // Delete document 1 + res, err := client.Delete().Index(testIndexName).Type("tweet").Id("1").Do() + if err != nil { + t.Fatal(err) + } + if res.Found != true { + t.Errorf("expected Found = true; got %v", res.Found) + } + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + count, err = client.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if count != 2 { + t.Errorf("expected Count = %d; got %d", 2, count) + } + + // Delete non existent document 99 + res, err = client.Delete().Index(testIndexName).Type("tweet").Id("99").Refresh(true).Do() + if err == nil { + t.Fatalf("expected error; got: %v", err) + } + if !IsNotFound(err) { + t.Errorf("expected NotFound error; got %v", err) + } + if res != nil { + t.Fatalf("expected no response; got: %v", res) + } + + count, err = client.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if count != 2 { + t.Errorf("expected Count = %d; got %d", 2, count) + } +} + +func TestDeleteValidate(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) + + // No index name -> fail with error + res, err := NewDeleteService(client).Type("tweet").Id("1").Do() + if err == nil { + t.Fatalf("expected Delete to fail without index name") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } + + // No type -> fail with error + res, err = NewDeleteService(client).Index(testIndexName).Id("1").Do() + if err == nil { + t.Fatalf("expected Delete to fail without type") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } + + // No id -> fail with error + res, err = NewDeleteService(client).Index(testIndexName).Type("tweet").Do() + if err == nil { + t.Fatalf("expected Delete to fail without id") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/doc.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/doc.go new file mode 100644 index 000000000..336a734de --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/doc.go @@ -0,0 +1,51 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +/* +Package elastic provides an interface to the Elasticsearch server +(http://www.elasticsearch.org/). + +The first thing you do is to create a Client. If you have Elasticsearch +installed and running with its default settings +(i.e. available at http://127.0.0.1:9200), all you need to do is: + + client, err := elastic.NewClient() + if err != nil { + // Handle error + } + +If your Elasticsearch server is running on a different IP and/or port, +just provide a URL to NewClient: + + // Create a client and connect to http://192.168.2.10:9201 + client, err := elastic.NewClient(elastic.SetURL("http://192.168.2.10:9201")) + if err != nil { + // Handle error + } + +You can pass many more configuration parameters to NewClient. Review the +documentation of NewClient for more information. + +If no Elasticsearch server is available, services will fail when creating +a new request and will return ErrNoClient. + +A Client provides services. The services usually come with a variety of +methods to prepare the query and a Do function to execute it against the +Elasticsearch REST interface and return a response. Here is an example +of the IndexExists service that checks if a given index already exists. + + exists, err := client.IndexExists("twitter").Do() + if err != nil { + // Handle error + } + if !exists { + // Index does not exist yet. + } + +Look up the documentation for Client to get an idea of the services provided +and what kinds of responses you get when executing the Do function of a service. +Also see the wiki on Github for more details. + +*/ +package elastic diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/errors.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/errors.go new file mode 100644 index 000000000..93c2c6de5 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/errors.go @@ -0,0 +1,141 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" +) + +// checkResponse will return an error if the request/response indicates +// an error returned from Elasticsearch. +// +// HTTP status codes between in the range [200..299] are considered successful. +// All other errors are considered errors except they are specified in +// ignoreErrors. This is necessary because for some services, HTTP status 404 +// is a valid response from Elasticsearch (e.g. the Exists service). +// +// The func tries to parse error details as returned from Elasticsearch +// and encapsulates them in type elastic.Error. +func checkResponse(req *http.Request, res *http.Response, ignoreErrors ...int) error { + // 200-299 are valid status codes + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + // Ignore certain errors? + for _, code := range ignoreErrors { + if code == res.StatusCode { + return nil + } + } + return createResponseError(res) +} + +// createResponseError creates an Error structure from the HTTP response, +// its status code and the error information sent by Elasticsearch. +func createResponseError(res *http.Response) error { + if res.Body == nil { + return &Error{Status: res.StatusCode} + } + data, err := ioutil.ReadAll(res.Body) + if err != nil { + return &Error{Status: res.StatusCode} + } + errReply := new(Error) + err = json.Unmarshal(data, errReply) + if err != nil { + return &Error{Status: res.StatusCode} + } + if errReply != nil { + if errReply.Status == 0 { + errReply.Status = res.StatusCode + } + return errReply + } + return &Error{Status: res.StatusCode} +} + +// Error encapsulates error details as returned from Elasticsearch. +type Error struct { + Status int `json:"status"` + Details *ErrorDetails `json:"error,omitempty"` +} + +// ErrorDetails encapsulate error details from Elasticsearch. +// It is used in e.g. elastic.Error and elastic.BulkResponseItem. +type ErrorDetails struct { + Type string `json:"type"` + Reason string `json:"reason"` + ResourceType string `json:"resource.type,omitempty"` + ResourceId string `json:"resource.id,omitempty"` + Index string `json:"index,omitempty"` + Phase string `json:"phase,omitempty"` + Grouped bool `json:"grouped,omitempty"` + CausedBy map[string]interface{} `json:"caused_by,omitempty"` + RootCause []*ErrorDetails `json:"root_cause,omitempty"` + FailedShards []map[string]interface{} `json:"failed_shards,omitempty"` +} + +// Error returns a string representation of the error. +func (e *Error) Error() string { + if e.Details != nil && e.Details.Reason != "" { + return fmt.Sprintf("elastic: Error %d (%s): %s [type=%s]", e.Status, http.StatusText(e.Status), e.Details.Reason, e.Details.Type) + } else { + return fmt.Sprintf("elastic: Error %d (%s)", e.Status, http.StatusText(e.Status)) + } +} + +// IsNotFound returns true if the given error indicates that Elasticsearch +// returned HTTP status 404. The err parameter can be of type *elastic.Error, +// elastic.Error, *http.Response or int (indicating the HTTP status code). +func IsNotFound(err interface{}) bool { + switch e := err.(type) { + case *http.Response: + return e.StatusCode == http.StatusNotFound + case *Error: + return e.Status == http.StatusNotFound + case Error: + return e.Status == http.StatusNotFound + case int: + return e == http.StatusNotFound + } + return false +} + +// IsTimeout returns true if the given error indicates that Elasticsearch +// returned HTTP status 408. The err parameter can be of type *elastic.Error, +// elastic.Error, *http.Response or int (indicating the HTTP status code). +func IsTimeout(err interface{}) bool { + switch e := err.(type) { + case *http.Response: + return e.StatusCode == http.StatusRequestTimeout + case *Error: + return e.Status == http.StatusRequestTimeout + case Error: + return e.Status == http.StatusRequestTimeout + case int: + return e == http.StatusRequestTimeout + } + return false +} + +// -- General errors -- + +// shardsInfo represents information from a shard. +type shardsInfo struct { + Total int `json:"total"` + Successful int `json:"successful"` + Failed int `json:"failed"` +} + +// shardOperationFailure represents a shard failure. +type shardOperationFailure struct { + Shard int `json:"shard"` + Index string `json:"index"` + Status string `json:"status"` + // "reason" +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/errors_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/errors_test.go new file mode 100644 index 000000000..c33dc2d6d --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/errors_test.go @@ -0,0 +1,202 @@ +package elastic + +import ( + "bufio" + "fmt" + "net/http" + "strings" + "testing" +) + +func TestResponseError(t *testing.T) { + raw := "HTTP/1.1 404 Not Found\r\n" + + "\r\n" + + `{"error":{"root_cause":[{"type":"index_missing_exception","reason":"no such index","index":"elastic-test"}],"type":"index_missing_exception","reason":"no such index","index":"elastic-test"},"status":404}` + "\r\n" + r := bufio.NewReader(strings.NewReader(raw)) + + req, err := http.NewRequest("GET", "/", nil) + if err != nil { + t.Fatal(err) + } + + resp, err := http.ReadResponse(r, nil) + if err != nil { + t.Fatal(err) + } + err = checkResponse(req, resp) + if err == nil { + t.Fatalf("expected error; got: %v", err) + } + + // Check for correct error message + expected := fmt.Sprintf("elastic: Error %d (%s): no such index [type=index_missing_exception]", resp.StatusCode, http.StatusText(resp.StatusCode)) + got := err.Error() + if got != expected { + t.Fatalf("expected %q; got: %q", expected, got) + } + + // Check that error is of type *elastic.Error, which contains additional information + e, ok := err.(*Error) + if !ok { + t.Fatal("expected error to be of type *elastic.Error") + } + if e.Status != resp.StatusCode { + t.Fatalf("expected status code %d; got: %d", resp.StatusCode, e.Status) + } + if e.Details == nil { + t.Fatalf("expected error details; got: %v", e.Details) + } + if got, want := e.Details.Index, "elastic-test"; got != want { + t.Fatalf("expected error details index %q; got: %q", want, got) + } + if got, want := e.Details.Type, "index_missing_exception"; got != want { + t.Fatalf("expected error details type %q; got: %q", want, got) + } + if got, want := e.Details.Reason, "no such index"; got != want { + t.Fatalf("expected error details reason %q; got: %q", want, got) + } + if got, want := len(e.Details.RootCause), 1; got != want { + t.Fatalf("expected %d error details root causes; got: %d", want, got) + } + + if got, want := e.Details.RootCause[0].Index, "elastic-test"; got != want { + t.Fatalf("expected root cause index %q; got: %q", want, got) + } + if got, want := e.Details.RootCause[0].Type, "index_missing_exception"; got != want { + t.Fatalf("expected root cause type %q; got: %q", want, got) + } + if got, want := e.Details.RootCause[0].Reason, "no such index"; got != want { + t.Fatalf("expected root cause reason %q; got: %q", want, got) + } +} + +func TestResponseErrorHTML(t *testing.T) { + raw := "HTTP/1.1 413 Request Entity Too Large\r\n" + + "\r\n" + + ` +413 Request Entity Too Large + +

413 Request Entity Too Large

+
nginx/1.6.2
+ +` + "\r\n" + r := bufio.NewReader(strings.NewReader(raw)) + + req, err := http.NewRequest("GET", "/", nil) + if err != nil { + t.Fatal(err) + } + + resp, err := http.ReadResponse(r, nil) + if err != nil { + t.Fatal(err) + } + err = checkResponse(req, resp) + if err == nil { + t.Fatalf("expected error; got: %v", err) + } + + // Check for correct error message + expected := fmt.Sprintf("elastic: Error %d (%s)", http.StatusRequestEntityTooLarge, http.StatusText(http.StatusRequestEntityTooLarge)) + got := err.Error() + if got != expected { + t.Fatalf("expected %q; got: %q", expected, got) + } +} + +func TestResponseErrorWithIgnore(t *testing.T) { + raw := "HTTP/1.1 404 Not Found\r\n" + + "\r\n" + + `{"some":"response"}` + "\r\n" + r := bufio.NewReader(strings.NewReader(raw)) + + req, err := http.NewRequest("HEAD", "/", nil) + if err != nil { + t.Fatal(err) + } + + resp, err := http.ReadResponse(r, nil) + if err != nil { + t.Fatal(err) + } + err = checkResponse(req, resp) + if err == nil { + t.Fatalf("expected error; got: %v", err) + } + err = checkResponse(req, resp, 404) // ignore 404 errors + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } +} + +func TestIsNotFound(t *testing.T) { + if got, want := IsNotFound(nil), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsNotFound(""), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsNotFound(200), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsNotFound(404), true; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + + if got, want := IsNotFound(&Error{Status: 404}), true; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsNotFound(&Error{Status: 200}), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + + if got, want := IsNotFound(Error{Status: 404}), true; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsNotFound(Error{Status: 200}), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + + if got, want := IsNotFound(&http.Response{StatusCode: 404}), true; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsNotFound(&http.Response{StatusCode: 200}), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } +} + +func TestIsTimeout(t *testing.T) { + if got, want := IsTimeout(nil), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsTimeout(""), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsTimeout(200), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsTimeout(408), true; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + + if got, want := IsTimeout(&Error{Status: 408}), true; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsTimeout(&Error{Status: 200}), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + + if got, want := IsTimeout(Error{Status: 408}), true; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsTimeout(Error{Status: 200}), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + + if got, want := IsTimeout(&http.Response{StatusCode: 408}), true; got != want { + t.Errorf("expected %v; got: %v", want, got) + } + if got, want := IsTimeout(&http.Response{StatusCode: 200}), false; got != want { + t.Errorf("expected %v; got: %v", want, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/example_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/example_test.go new file mode 100644 index 000000000..8fc03ec1a --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/example_test.go @@ -0,0 +1,547 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic_test + +import ( + "encoding/json" + "fmt" + "log" + "os" + "reflect" + "time" + + "gopkg.in/olivere/elastic.v3" +) + +type Tweet struct { + User string `json:"user"` + Message string `json:"message"` + Retweets int `json:"retweets"` + Image string `json:"image,omitempty"` + Created time.Time `json:"created,omitempty"` + Tags []string `json:"tags,omitempty"` + Location string `json:"location,omitempty"` + Suggest *elastic.SuggestField `json:"suggest_field,omitempty"` +} + +func Example() { + errorlog := log.New(os.Stdout, "APP ", log.LstdFlags) + + // Obtain a client. You can provide your own HTTP client here. + client, err := elastic.NewClient(elastic.SetErrorLog(errorlog)) + if err != nil { + // Handle error + panic(err) + } + + // Trace request and response details like this + //client.SetTracer(log.New(os.Stdout, "", 0)) + + // Ping the Elasticsearch server to get e.g. the version number + info, code, err := client.Ping("http://127.0.0.1:9200").Do() + if err != nil { + // Handle error + panic(err) + } + fmt.Printf("Elasticsearch returned with code %d and version %s", code, info.Version.Number) + + // Getting the ES version number is quite common, so there's a shortcut + esversion, err := client.ElasticsearchVersion("http://127.0.0.1:9200") + if err != nil { + // Handle error + panic(err) + } + fmt.Printf("Elasticsearch version %s", esversion) + + // Use the IndexExists service to check if a specified index exists. + exists, err := client.IndexExists("twitter").Do() + if err != nil { + // Handle error + panic(err) + } + if !exists { + // Create a new index. + createIndex, err := client.CreateIndex("twitter").Do() + if err != nil { + // Handle error + panic(err) + } + if !createIndex.Acknowledged { + // Not acknowledged + } + } + + // Index a tweet (using JSON serialization) + tweet1 := Tweet{User: "olivere", Message: "Take Five", Retweets: 0} + put1, err := client.Index(). + Index("twitter"). + Type("tweet"). + Id("1"). + BodyJson(tweet1). + Do() + if err != nil { + // Handle error + panic(err) + } + fmt.Printf("Indexed tweet %s to index %s, type %s\n", put1.Id, put1.Index, put1.Type) + + // Index a second tweet (by string) + tweet2 := `{"user" : "olivere", "message" : "It's a Raggy Waltz"}` + put2, err := client.Index(). + Index("twitter"). + Type("tweet"). + Id("2"). + BodyString(tweet2). + Do() + if err != nil { + // Handle error + panic(err) + } + fmt.Printf("Indexed tweet %s to index %s, type %s\n", put2.Id, put2.Index, put2.Type) + + // Get tweet with specified ID + get1, err := client.Get(). + Index("twitter"). + Type("tweet"). + Id("1"). + Do() + if err != nil { + // Handle error + panic(err) + } + if get1.Found { + fmt.Printf("Got document %s in version %d from index %s, type %s\n", get1.Id, get1.Version, get1.Index, get1.Type) + } + + // Flush to make sure the documents got written. + _, err = client.Flush().Index("twitter").Do() + if err != nil { + panic(err) + } + + // Search with a term query + termQuery := elastic.NewTermQuery("user", "olivere") + searchResult, err := client.Search(). + Index("twitter"). // search in index "twitter" + Query(termQuery). // specify the query + Sort("user", true). // sort by "user" field, ascending + From(0).Size(10). // take documents 0-9 + Pretty(true). // pretty print request and response JSON + Do() // execute + if err != nil { + // Handle error + panic(err) + } + + // searchResult is of type SearchResult and returns hits, suggestions, + // and all kinds of other information from Elasticsearch. + fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis) + + // Each is a convenience function that iterates over hits in a search result. + // It makes sure you don't need to check for nil values in the response. + // However, it ignores errors in serialization. If you want full control + // over iterating the hits, see below. + var ttyp Tweet + for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) { + t := item.(Tweet) + fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } + // TotalHits is another convenience function that works even when something goes wrong. + fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits()) + + // Here's how you iterate through results with full control over each step. + if searchResult.Hits != nil { + fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits) + + // Iterate through results + for _, hit := range searchResult.Hits.Hits { + // hit.Index contains the name of the index + + // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}). + var t Tweet + err := json.Unmarshal(*hit.Source, &t) + if err != nil { + // Deserialization failed + } + + // Work with tweet + fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } + } else { + // No hits + fmt.Print("Found no tweets\n") + } + + // Update a tweet by the update API of Elasticsearch. + // We just increment the number of retweets. + script := elastic.NewScript("ctx._source.retweets += num").Param("num", 1) + update, err := client.Update().Index("twitter").Type("tweet").Id("1"). + Script(script). + Upsert(map[string]interface{}{"retweets": 0}). + Do() + if err != nil { + // Handle error + panic(err) + } + fmt.Printf("New version of tweet %q is now %d", update.Id, update.Version) + + // ... + + // Delete an index. + deleteIndex, err := client.DeleteIndex("twitter").Do() + if err != nil { + // Handle error + panic(err) + } + if !deleteIndex.Acknowledged { + // Not acknowledged + } +} + +func ExampleClient_NewClient_default() { + // Obtain a client to the Elasticsearch instance on http://127.0.0.1:9200. + client, err := elastic.NewClient() + if err != nil { + // Handle error + fmt.Printf("connection failed: %v\n", err) + } else { + fmt.Println("connected") + } + _ = client + // Output: + // connected +} + +func ExampleClient_NewClient_cluster() { + // Obtain a client for an Elasticsearch cluster of two nodes, + // running on 10.0.1.1 and 10.0.1.2. + client, err := elastic.NewClient(elastic.SetURL("http://10.0.1.1:9200", "http://10.0.1.2:9200")) + if err != nil { + // Handle error + panic(err) + } + _ = client +} + +func ExampleClient_NewClient_manyOptions() { + // Obtain a client for an Elasticsearch cluster of two nodes, + // running on 10.0.1.1 and 10.0.1.2. Do not run the sniffer. + // Set the healthcheck interval to 10s. When requests fail, + // retry 5 times. Print error messages to os.Stderr and informational + // messages to os.Stdout. + client, err := elastic.NewClient( + elastic.SetURL("http://10.0.1.1:9200", "http://10.0.1.2:9200"), + elastic.SetSniff(false), + elastic.SetHealthcheckInterval(10*time.Second), + elastic.SetMaxRetries(5), + elastic.SetErrorLog(log.New(os.Stderr, "ELASTIC ", log.LstdFlags)), + elastic.SetInfoLog(log.New(os.Stdout, "", log.LstdFlags))) + if err != nil { + // Handle error + panic(err) + } + _ = client +} + +func ExampleIndexExistsService() { + // Get a client to the local Elasticsearch instance. + client, err := elastic.NewClient() + if err != nil { + // Handle error + panic(err) + } + // Use the IndexExists service to check if the index "twitter" exists. + exists, err := client.IndexExists("twitter").Do() + if err != nil { + // Handle error + panic(err) + } + if exists { + // ... + } +} + +func ExampleCreateIndexService() { + // Get a client to the local Elasticsearch instance. + client, err := elastic.NewClient() + if err != nil { + // Handle error + panic(err) + } + // Create a new index. + createIndex, err := client.CreateIndex("twitter").Do() + if err != nil { + // Handle error + panic(err) + } + if !createIndex.Acknowledged { + // Not acknowledged + } +} + +func ExampleDeleteIndexService() { + // Get a client to the local Elasticsearch instance. + client, err := elastic.NewClient() + if err != nil { + // Handle error + panic(err) + } + // Delete an index. + deleteIndex, err := client.DeleteIndex("twitter").Do() + if err != nil { + // Handle error + panic(err) + } + if !deleteIndex.Acknowledged { + // Not acknowledged + } +} + +func ExampleSearchService() { + // Get a client to the local Elasticsearch instance. + client, err := elastic.NewClient() + if err != nil { + // Handle error + panic(err) + } + + // Search with a term query + termQuery := elastic.NewTermQuery("user", "olivere") + searchResult, err := client.Search(). + Index("twitter"). // search in index "twitter" + Query(termQuery). // specify the query + Sort("user", true). // sort by "user" field, ascending + From(0).Size(10). // take documents 0-9 + Pretty(true). // pretty print request and response JSON + Do() // execute + if err != nil { + // Handle error + panic(err) + } + + // searchResult is of type SearchResult and returns hits, suggestions, + // and all kinds of other information from Elasticsearch. + fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis) + + // Number of hits + if searchResult.Hits != nil { + fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits) + + // Iterate through results + for _, hit := range searchResult.Hits.Hits { + // hit.Index contains the name of the index + + // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}). + var t Tweet + err := json.Unmarshal(*hit.Source, &t) + if err != nil { + // Deserialization failed + } + + // Work with tweet + fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } + } else { + // No hits + fmt.Print("Found no tweets\n") + } +} + +func ExampleAggregations() { + // Get a client to the local Elasticsearch instance. + client, err := elastic.NewClient() + if err != nil { + // Handle error + panic(err) + } + + // Create an aggregation for users and a sub-aggregation for a date histogram of tweets (per year). + timeline := elastic.NewTermsAggregation().Field("user").Size(10).OrderByCountDesc() + histogram := elastic.NewDateHistogramAggregation().Field("created").Interval("year") + timeline = timeline.SubAggregation("history", histogram) + + // Search with a term query + searchResult, err := client.Search(). + Index("twitter"). // search in index "twitter" + Query(elastic.NewMatchAllQuery()). // return all results, but ... + SearchType("count"). // ... do not return hits, just the count + Aggregation("timeline", timeline). // add our aggregation to the query + Pretty(true). // pretty print request and response JSON + Do() // execute + if err != nil { + // Handle error + panic(err) + } + + // Access "timeline" aggregate in search result. + agg, found := searchResult.Aggregations.Terms("timeline") + if !found { + log.Fatalf("we sould have a terms aggregation called %q", "timeline") + } + for _, userBucket := range agg.Buckets { + // Every bucket should have the user field as key. + user := userBucket.Key + + // The sub-aggregation history should have the number of tweets per year. + histogram, found := userBucket.DateHistogram("history") + if found { + for _, year := range histogram.Buckets { + fmt.Printf("user %q has %d tweets in %q\n", user, year.DocCount, year.KeyAsString) + } + } + } +} + +func ExampleSearchResult() { + client, err := elastic.NewClient() + if err != nil { + panic(err) + } + + // Do a search + searchResult, err := client.Search().Index("twitter").Query(elastic.NewMatchAllQuery()).Do() + if err != nil { + panic(err) + } + + // searchResult is of type SearchResult and returns hits, suggestions, + // and all kinds of other information from Elasticsearch. + fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis) + + // Each is a utility function that iterates over hits in a search result. + // It makes sure you don't need to check for nil values in the response. + // However, it ignores errors in serialization. If you want full control + // over iterating the hits, see below. + var ttyp Tweet + for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) { + t := item.(Tweet) + fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } + fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits()) + + // Here's how you iterate hits with full control. + if searchResult.Hits != nil { + fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits) + + // Iterate through results + for _, hit := range searchResult.Hits.Hits { + // hit.Index contains the name of the index + + // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}). + var t Tweet + err := json.Unmarshal(*hit.Source, &t) + if err != nil { + // Deserialization failed + } + + // Work with tweet + fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } + } else { + // No hits + fmt.Print("Found no tweets\n") + } +} + +func ExamplePutTemplateService() { + client, err := elastic.NewClient() + if err != nil { + panic(err) + } + + // Create search template + tmpl := `{"template":{"query":{"match":{"title":"{{query_string}}"}}}}` + + // Create template + resp, err := client.PutTemplate(). + Id("my-search-template"). // Name of the template + BodyString(tmpl). // Search template itself + Do() // Execute + if err != nil { + panic(err) + } + if resp.Created { + fmt.Println("search template created") + } +} + +func ExampleGetTemplateService() { + client, err := elastic.NewClient() + if err != nil { + panic(err) + } + + // Get template stored under "my-search-template" + resp, err := client.GetTemplate().Id("my-search-template").Do() + if err != nil { + panic(err) + } + fmt.Printf("search template is: %q\n", resp.Template) +} + +func ExampleDeleteTemplateService() { + client, err := elastic.NewClient() + if err != nil { + panic(err) + } + + // Delete template + resp, err := client.DeleteTemplate().Id("my-search-template").Do() + if err != nil { + panic(err) + } + if resp != nil && resp.Found { + fmt.Println("template deleted") + } +} + +func ExampleClusterHealthService() { + client, err := elastic.NewClient() + if err != nil { + panic(err) + } + + // Get cluster health + res, err := client.ClusterHealth().Index("twitter").Do() + if err != nil { + panic(err) + } + if res == nil { + panic(err) + } + fmt.Printf("Cluster status is %q\n", res.Status) +} + +func ExampleClusterHealthService_WaitForGreen() { + client, err := elastic.NewClient() + if err != nil { + panic(err) + } + + // Wait for status green + res, err := client.ClusterHealth().WaitForStatus("green").Timeout("15s").Do() + if err != nil { + panic(err) + } + if res.TimedOut { + fmt.Printf("time out waiting for cluster status %q\n", "green") + } else { + fmt.Printf("cluster status is %q\n", res.Status) + } +} + +func ExampleClusterStateService() { + client, err := elastic.NewClient() + if err != nil { + panic(err) + } + + // Get cluster state + res, err := client.ClusterState().Metric("version").Do() + if err != nil { + panic(err) + } + fmt.Printf("Cluster %q has version %d", res.ClusterName, res.Version) +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/exists.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/exists.go new file mode 100644 index 000000000..7a42d53c9 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/exists.go @@ -0,0 +1,175 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/http" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// ExistsService checks for the existence of a document using HEAD. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html +// for details. +type ExistsService struct { + client *Client + pretty bool + id string + index string + typ string + preference string + realtime *bool + refresh *bool + routing string + parent string +} + +// NewExistsService creates a new ExistsService. +func NewExistsService(client *Client) *ExistsService { + return &ExistsService{ + client: client, + } +} + +// Id is the document ID. +func (s *ExistsService) Id(id string) *ExistsService { + s.id = id + return s +} + +// Index is the name of the index. +func (s *ExistsService) Index(index string) *ExistsService { + s.index = index + return s +} + +// Type is the type of the document (use `_all` to fetch the first document +// matching the ID across all types). +func (s *ExistsService) Type(typ string) *ExistsService { + s.typ = typ + return s +} + +// Preference specifies the node or shard the operation should be performed on (default: random). +func (s *ExistsService) Preference(preference string) *ExistsService { + s.preference = preference + return s +} + +// Realtime specifies whether to perform the operation in realtime or search mode. +func (s *ExistsService) Realtime(realtime bool) *ExistsService { + s.realtime = &realtime + return s +} + +// Refresh the shard containing the document before performing the operation. +func (s *ExistsService) Refresh(refresh bool) *ExistsService { + s.refresh = &refresh + return s +} + +// Routing is a specific routing value. +func (s *ExistsService) Routing(routing string) *ExistsService { + s.routing = routing + return s +} + +// Parent is the ID of the parent document. +func (s *ExistsService) Parent(parent string) *ExistsService { + s.parent = parent + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ExistsService) Pretty(pretty bool) *ExistsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *ExistsService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ + "id": s.id, + "index": s.index, + "type": s.typ, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.realtime != nil { + params.Set("realtime", fmt.Sprintf("%v", *s.realtime)) + } + if s.refresh != nil { + params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ExistsService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *ExistsService) Do() (bool, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return false, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return false, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("HEAD", path, params, nil, 404) + if err != nil { + return false, err + } + + // Return operation response + switch res.StatusCode { + case http.StatusOK: + return true, nil + case http.StatusNotFound: + return false, nil + default: + return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/exists_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/exists_test.go new file mode 100644 index 000000000..58a4fe707 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/exists_test.go @@ -0,0 +1,52 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestExists(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) + + exists, err := client.Exists().Index(testIndexName).Type("comment").Id("1").Parent("tweet").Do() + if err != nil { + t.Fatal(err) + } + if !exists { + t.Fatal("expected document to exist") + } +} + +func TestExistsValidate(t *testing.T) { + client := setupTestClient(t) + + // No index -> fail with error + res, err := NewExistsService(client).Type("tweet").Id("1").Do() + if err == nil { + t.Fatalf("expected Delete to fail without index name") + } + if res != false { + t.Fatalf("expected result to be false; got: %v", res) + } + + // No type -> fail with error + res, err = NewExistsService(client).Index(testIndexName).Id("1").Do() + if err == nil { + t.Fatalf("expected Delete to fail without index name") + } + if res != false { + t.Fatalf("expected result to be false; got: %v", res) + } + + // No id -> fail with error + res, err = NewExistsService(client).Index(testIndexName).Type("tweet").Do() + if err == nil { + t.Fatalf("expected Delete to fail without index name") + } + if res != false { + t.Fatalf("expected result to be false; got: %v", res) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/explain.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/explain.go new file mode 100644 index 000000000..e922bc9b5 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/explain.go @@ -0,0 +1,330 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "log" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +var ( + _ = fmt.Print + _ = log.Print + _ = strings.Index + _ = uritemplates.Expand + _ = url.Parse +) + +// ExplainService computes a score explanation for a query and +// a specific document. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-explain.html. +type ExplainService struct { + client *Client + pretty bool + id string + index string + typ string + q string + routing string + lenient *bool + analyzer string + df string + fields []string + lowercaseExpandedTerms *bool + xSourceInclude []string + analyzeWildcard *bool + parent string + preference string + xSource []string + defaultOperator string + xSourceExclude []string + source string + bodyJson interface{} + bodyString string +} + +// NewExplainService creates a new ExplainService. +func NewExplainService(client *Client) *ExplainService { + return &ExplainService{ + client: client, + xSource: make([]string, 0), + xSourceExclude: make([]string, 0), + fields: make([]string, 0), + xSourceInclude: make([]string, 0), + } +} + +// Id is the document ID. +func (s *ExplainService) Id(id string) *ExplainService { + s.id = id + return s +} + +// Index is the name of the index. +func (s *ExplainService) Index(index string) *ExplainService { + s.index = index + return s +} + +// Type is the type of the document. +func (s *ExplainService) Type(typ string) *ExplainService { + s.typ = typ + return s +} + +// Source is the URL-encoded query definition (instead of using the request body). +func (s *ExplainService) Source(source string) *ExplainService { + s.source = source + return s +} + +// XSourceExclude is a list of fields to exclude from the returned _source field. +func (s *ExplainService) XSourceExclude(xSourceExclude ...string) *ExplainService { + s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...) + return s +} + +// Lenient specifies whether format-based query failures +// (such as providing text to a numeric field) should be ignored. +func (s *ExplainService) Lenient(lenient bool) *ExplainService { + s.lenient = &lenient + return s +} + +// Query in the Lucene query string syntax. +func (s *ExplainService) Q(q string) *ExplainService { + s.q = q + return s +} + +// Routing sets a specific routing value. +func (s *ExplainService) Routing(routing string) *ExplainService { + s.routing = routing + return s +} + +// AnalyzeWildcard specifies whether wildcards and prefix queries +// in the query string query should be analyzed (default: false). +func (s *ExplainService) AnalyzeWildcard(analyzeWildcard bool) *ExplainService { + s.analyzeWildcard = &analyzeWildcard + return s +} + +// Analyzer is the analyzer for the query string query. +func (s *ExplainService) Analyzer(analyzer string) *ExplainService { + s.analyzer = analyzer + return s +} + +// Df is the default field for query string query (default: _all). +func (s *ExplainService) Df(df string) *ExplainService { + s.df = df + return s +} + +// Fields is a list of fields to return in the response. +func (s *ExplainService) Fields(fields ...string) *ExplainService { + s.fields = append(s.fields, fields...) + return s +} + +// LowercaseExpandedTerms specifies whether query terms should be lowercased. +func (s *ExplainService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *ExplainService { + s.lowercaseExpandedTerms = &lowercaseExpandedTerms + return s +} + +// XSourceInclude is a list of fields to extract and return from the _source field. +func (s *ExplainService) XSourceInclude(xSourceInclude ...string) *ExplainService { + s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...) + return s +} + +// DefaultOperator is the default operator for query string query (AND or OR). +func (s *ExplainService) DefaultOperator(defaultOperator string) *ExplainService { + s.defaultOperator = defaultOperator + return s +} + +// Parent is the ID of the parent document. +func (s *ExplainService) Parent(parent string) *ExplainService { + s.parent = parent + return s +} + +// Preference specifies the node or shard the operation should be performed on (default: random). +func (s *ExplainService) Preference(preference string) *ExplainService { + s.preference = preference + return s +} + +// XSource is true or false to return the _source field or not, or a list of fields to return. +func (s *ExplainService) XSource(xSource ...string) *ExplainService { + s.xSource = append(s.xSource, xSource...) + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ExplainService) Pretty(pretty bool) *ExplainService { + s.pretty = pretty + return s +} + +// Query sets a query definition using the Query DSL. +func (s *ExplainService) Query(query Query) *ExplainService { + src, err := query.Source() + if err != nil { + // Do nothing in case of an error + return s + } + body := make(map[string]interface{}) + body["query"] = src + s.bodyJson = body + return s +} + +// BodyJson sets the query definition using the Query DSL. +func (s *ExplainService) BodyJson(body interface{}) *ExplainService { + s.bodyJson = body + return s +} + +// BodyString sets the query definition using the Query DSL as a string. +func (s *ExplainService) BodyString(body string) *ExplainService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *ExplainService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/{type}/{id}/_explain", map[string]string{ + "id": s.id, + "index": s.index, + "type": s.typ, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if len(s.xSource) > 0 { + params.Set("_source", strings.Join(s.xSource, ",")) + } + if s.defaultOperator != "" { + params.Set("default_operator", s.defaultOperator) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.source != "" { + params.Set("source", s.source) + } + if len(s.xSourceExclude) > 0 { + params.Set("_source_exclude", strings.Join(s.xSourceExclude, ",")) + } + if s.lenient != nil { + params.Set("lenient", fmt.Sprintf("%v", *s.lenient)) + } + if s.q != "" { + params.Set("q", s.q) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if len(s.fields) > 0 { + params.Set("fields", strings.Join(s.fields, ",")) + } + if s.lowercaseExpandedTerms != nil { + params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms)) + } + if len(s.xSourceInclude) > 0 { + params.Set("_source_include", strings.Join(s.xSourceInclude, ",")) + } + if s.analyzeWildcard != nil { + params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard)) + } + if s.analyzer != "" { + params.Set("analyzer", s.analyzer) + } + if s.df != "" { + params.Set("df", s.df) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ExplainService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if s.id == "" { + invalid = append(invalid, "Id") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *ExplainService) Do() (*ExplainResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(ExplainResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// ExplainResponse is the response of ExplainService.Do. +type ExplainResponse struct { + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + Matched bool `json:"matched"` + Explanation map[string]interface{} `json:"explanation"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/explain_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/explain_test.go new file mode 100644 index 000000000..e799d6c52 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/explain_test.go @@ -0,0 +1,41 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestExplain(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + + // Add a document + indexResult, err := client.Index(). + Index(testIndexName). + Type("tweet"). + Id("1"). + BodyJson(&tweet1). + Refresh(true). + Do() + if err != nil { + t.Fatal(err) + } + if indexResult == nil { + t.Errorf("expected result to be != nil; got: %v", indexResult) + } + + // Explain + query := NewTermQuery("user", "olivere") + expl, err := client.Explain(testIndexName, "tweet", "1").Query(query).Do() + if err != nil { + t.Fatal(err) + } + if expl == nil { + t.Fatal("expected to return an explanation") + } + if !expl.Matched { + t.Errorf("expected matched to be %v; got: %v", true, expl.Matched) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/fetch_source_context.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/fetch_source_context.go new file mode 100644 index 000000000..e13c9eb47 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/fetch_source_context.go @@ -0,0 +1,74 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "net/url" + "strings" +) + +type FetchSourceContext struct { + fetchSource bool + transformSource bool + includes []string + excludes []string +} + +func NewFetchSourceContext(fetchSource bool) *FetchSourceContext { + return &FetchSourceContext{ + fetchSource: fetchSource, + includes: make([]string, 0), + excludes: make([]string, 0), + } +} + +func (fsc *FetchSourceContext) FetchSource() bool { + return fsc.fetchSource +} + +func (fsc *FetchSourceContext) SetFetchSource(fetchSource bool) { + fsc.fetchSource = fetchSource +} + +func (fsc *FetchSourceContext) Include(includes ...string) *FetchSourceContext { + fsc.includes = append(fsc.includes, includes...) + return fsc +} + +func (fsc *FetchSourceContext) Exclude(excludes ...string) *FetchSourceContext { + fsc.excludes = append(fsc.excludes, excludes...) + return fsc +} + +func (fsc *FetchSourceContext) TransformSource(transformSource bool) *FetchSourceContext { + fsc.transformSource = transformSource + return fsc +} + +func (fsc *FetchSourceContext) Source() (interface{}, error) { + if !fsc.fetchSource { + return false, nil + } + return map[string]interface{}{ + "includes": fsc.includes, + "excludes": fsc.excludes, + }, nil +} + +// Query returns the parameters in a form suitable for a URL query string. +func (fsc *FetchSourceContext) Query() url.Values { + params := url.Values{} + if !fsc.fetchSource { + params.Add("_source", "false") + return params + } + if len(fsc.includes) > 0 { + params.Add("_source_include", strings.Join(fsc.includes, ",")) + } + if len(fsc.excludes) > 0 { + params.Add("_source_exclude", strings.Join(fsc.excludes, ",")) + } + return params +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/fetch_source_context_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/fetch_source_context_test.go new file mode 100644 index 000000000..2bb683d69 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/fetch_source_context_test.go @@ -0,0 +1,125 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestFetchSourceContextNoFetchSource(t *testing.T) { + builder := NewFetchSourceContext(false) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `false` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFetchSourceContextNoFetchSourceIgnoreIncludesAndExcludes(t *testing.T) { + builder := NewFetchSourceContext(false).Include("a", "b").Exclude("c") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `false` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFetchSourceContextFetchSource(t *testing.T) { + builder := NewFetchSourceContext(true) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"excludes":[],"includes":[]}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFetchSourceContextFetchSourceWithIncludesOnly(t *testing.T) { + builder := NewFetchSourceContext(true).Include("a", "b") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"excludes":[],"includes":["a","b"]}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFetchSourceContextFetchSourceWithIncludesAndExcludes(t *testing.T) { + builder := NewFetchSourceContext(true).Include("a", "b").Exclude("c") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"excludes":["c"],"includes":["a","b"]}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFetchSourceContextQueryDefaults(t *testing.T) { + builder := NewFetchSourceContext(true) + values := builder.Query() + got := values.Encode() + expected := "" + if got != expected { + t.Errorf("expected %q; got: %q", expected, got) + } +} + +func TestFetchSourceContextQueryNoFetchSource(t *testing.T) { + builder := NewFetchSourceContext(false) + values := builder.Query() + got := values.Encode() + expected := "_source=false" + if got != expected { + t.Errorf("expected %q; got: %q", expected, got) + } +} + +func TestFetchSourceContextQueryFetchSourceWithIncludesAndExcludes(t *testing.T) { + builder := NewFetchSourceContext(true).Include("a", "b").Exclude("c") + values := builder.Query() + got := values.Encode() + expected := "_source_exclude=c&_source_include=a%2Cb" + if got != expected { + t.Errorf("expected %q; got: %q", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/geo_point.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/geo_point.go new file mode 100644 index 000000000..a09351ca2 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/geo_point.go @@ -0,0 +1,48 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "strconv" + "strings" +) + +// GeoPoint is a geographic position described via latitude and longitude. +type GeoPoint struct { + Lat float64 `json:"lat"` + Lon float64 `json:"lon"` +} + +// Source returns the object to be serialized in Elasticsearch DSL. +func (pt *GeoPoint) Source() map[string]float64 { + return map[string]float64{ + "lat": pt.Lat, + "lon": pt.Lon, + } +} + +// GeoPointFromLatLon initializes a new GeoPoint by latitude and longitude. +func GeoPointFromLatLon(lat, lon float64) *GeoPoint { + return &GeoPoint{Lat: lat, Lon: lon} +} + +// GeoPointFromString initializes a new GeoPoint by a string that is +// formatted as "{latitude},{longitude}", e.g. "40.10210,-70.12091". +func GeoPointFromString(latLon string) (*GeoPoint, error) { + latlon := strings.SplitN(latLon, ",", 2) + if len(latlon) != 2 { + return nil, fmt.Errorf("elastic: %s is not a valid geo point string", latLon) + } + lat, err := strconv.ParseFloat(latlon[0], 64) + if err != nil { + return nil, err + } + lon, err := strconv.ParseFloat(latlon[1], 64) + if err != nil { + return nil, err + } + return &GeoPoint{Lat: lat, Lon: lon}, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/geo_point_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/geo_point_test.go new file mode 100644 index 000000000..ebc28c2ec --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/geo_point_test.go @@ -0,0 +1,24 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestGeoPointSource(t *testing.T) { + pt := GeoPoint{Lat: 40, Lon: -70} + + data, err := json.Marshal(pt.Source()) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"lat":40,"lon":-70}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/get.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/get.go new file mode 100644 index 000000000..eb2221755 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/get.go @@ -0,0 +1,271 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// GetService allows to get a typed JSON document from the index based +// on its id. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html +// for details. +type GetService struct { + client *Client + pretty bool + index string + typ string + id string + routing string + preference string + fields []string + refresh *bool + realtime *bool + fsc *FetchSourceContext + version interface{} + versionType string + parent string + ignoreErrorsOnGeneratedFields *bool +} + +// NewGetService creates a new GetService. +func NewGetService(client *Client) *GetService { + return &GetService{ + client: client, + typ: "_all", + } +} + +/* +// String returns a string representation of the GetService request. +func (s *GetService) String() string { + return fmt.Sprintf("[%v][%v][%v]: routing [%v]", + s.index, + s.typ, + s.id, + s.routing) +} +*/ + +// Index is the name of the index. +func (s *GetService) Index(index string) *GetService { + s.index = index + return s +} + +// Type is the type of the document (use `_all` to fetch the first document +// matching the ID across all types). +func (s *GetService) Type(typ string) *GetService { + s.typ = typ + return s +} + +// Id is the document ID. +func (s *GetService) Id(id string) *GetService { + s.id = id + return s +} + +// Parent is the ID of the parent document. +func (s *GetService) Parent(parent string) *GetService { + s.parent = parent + return s +} + +// Routing is the specific routing value. +func (s *GetService) Routing(routing string) *GetService { + s.routing = routing + return s +} + +// Preference specifies the node or shard the operation should be performed on (default: random). +func (s *GetService) Preference(preference string) *GetService { + s.preference = preference + return s +} + +// Fields is a list of fields to return in the response. +func (s *GetService) Fields(fields ...string) *GetService { + if s.fields == nil { + s.fields = make([]string, 0) + } + s.fields = append(s.fields, fields...) + return s +} + +func (s *GetService) FetchSource(fetchSource bool) *GetService { + if s.fsc == nil { + s.fsc = NewFetchSourceContext(fetchSource) + } else { + s.fsc.SetFetchSource(fetchSource) + } + return s +} + +func (s *GetService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *GetService { + s.fsc = fetchSourceContext + return s +} + +// Refresh the shard containing the document before performing the operation. +func (s *GetService) Refresh(refresh bool) *GetService { + s.refresh = &refresh + return s +} + +// Realtime specifies whether to perform the operation in realtime or search mode. +func (s *GetService) Realtime(realtime bool) *GetService { + s.realtime = &realtime + return s +} + +// VersionType is the specific version type. +func (s *GetService) VersionType(versionType string) *GetService { + s.versionType = versionType + return s +} + +// Version is an explicit version number for concurrency control. +func (s *GetService) Version(version interface{}) *GetService { + s.version = version + return s +} + +// IgnoreErrorsOnGeneratedFields indicates whether to ignore fields that +// are generated if the transaction log is accessed. +func (s *GetService) IgnoreErrorsOnGeneratedFields(ignore bool) *GetService { + s.ignoreErrorsOnGeneratedFields = &ignore + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *GetService) Pretty(pretty bool) *GetService { + s.pretty = pretty + return s +} + +// Validate checks if the operation is valid. +func (s *GetService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// buildURL builds the URL for the operation. +func (s *GetService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ + "id": s.id, + "index": s.index, + "type": s.typ, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if len(s.fields) > 0 { + params.Set("fields", strings.Join(s.fields, ",")) + } + if s.refresh != nil { + params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + if s.realtime != nil { + params.Set("realtime", fmt.Sprintf("%v", *s.realtime)) + } + if s.ignoreErrorsOnGeneratedFields != nil { + params.Add("ignore_errors_on_generated_fields", fmt.Sprintf("%v", *s.ignoreErrorsOnGeneratedFields)) + } + if s.fsc != nil { + for k, values := range s.fsc.Query() { + params.Add(k, strings.Join(values, ",")) + } + } + return path, params, nil +} + +// Do executes the operation. +func (s *GetService) Do() (*GetResult, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(GetResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a get request. + +// GetResult is the outcome of GetService.Do. +type GetResult struct { + Index string `json:"_index"` // index meta field + Type string `json:"_type"` // type meta field + Id string `json:"_id"` // id meta field + Uid string `json:"_uid"` // uid meta field (see MapperService.java for all meta fields) + Timestamp int64 `json:"_timestamp"` // timestamp meta field + TTL int64 `json:"_ttl"` // ttl meta field + Routing string `json:"_routing"` // routing meta field + Parent string `json:"_parent"` // parent meta field + Version *int64 `json:"_version"` // version number, when Version is set to true in SearchService + Source *json.RawMessage `json:"_source,omitempty"` + Found bool `json:"found,omitempty"` + Fields map[string]interface{} `json:"fields,omitempty"` + //Error string `json:"error,omitempty"` // used only in MultiGet + // TODO double-check that MultiGet now returns details error information + Error *ErrorDetails `json:"error,omitempty"` // only used in MultiGet +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/get_template.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/get_template.go new file mode 100644 index 000000000..328d6e516 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/get_template.go @@ -0,0 +1,113 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// GetTemplateService reads a search template. +// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html. +type GetTemplateService struct { + client *Client + pretty bool + id string + version interface{} + versionType string +} + +// NewGetTemplateService creates a new GetTemplateService. +func NewGetTemplateService(client *Client) *GetTemplateService { + return &GetTemplateService{ + client: client, + } +} + +// Id is the template ID. +func (s *GetTemplateService) Id(id string) *GetTemplateService { + s.id = id + return s +} + +// Version is an explicit version number for concurrency control. +func (s *GetTemplateService) Version(version interface{}) *GetTemplateService { + s.version = version + return s +} + +// VersionType is a specific version type. +func (s *GetTemplateService) VersionType(versionType string) *GetTemplateService { + s.versionType = versionType + return s +} + +// buildURL builds the URL for the operation. +func (s *GetTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{ + "id": s.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *GetTemplateService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation and returns the template. +func (s *GetTemplateService) Do() (*GetTemplateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return result + ret := new(GetTemplateResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +type GetTemplateResponse struct { + Template string `json:"template"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/get_template_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/get_template_test.go new file mode 100644 index 000000000..00aea6899 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/get_template_test.go @@ -0,0 +1,51 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestGetPutDeleteTemplate(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // This is a search template, not an index template! + tmpl := `{ + "template": { + "query" : { "term" : { "{{my_field}}" : "{{my_value}}" } }, + "size" : "{{my_size}}" + }, + "params":{ + "my_field" : "user", + "my_value" : "olivere", + "my_size" : 5 + } +}` + putres, err := client.PutTemplate().Id("elastic-template").BodyString(tmpl).Do() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if putres == nil { + t.Fatalf("expected response; got: %v", putres) + } + if !putres.Created { + t.Fatalf("expected template to be created; got: %v", putres.Created) + } + + // Always delete template + defer client.DeleteTemplate().Id("elastic-template").Do() + + // Get template + getres, err := client.GetTemplate().Id("elastic-template").Do() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if getres == nil { + t.Fatalf("expected response; got: %v", getres) + } + if getres.Template == "" { + t.Errorf("expected template %q; got: %q", tmpl, getres.Template) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/get_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/get_test.go new file mode 100644 index 000000000..25dbe7391 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/get_test.go @@ -0,0 +1,165 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestGet(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + // Get document 1 + res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").Do() + if err != nil { + t.Fatal(err) + } + if res.Found != true { + t.Errorf("expected Found = true; got %v", res.Found) + } + if res.Source == nil { + t.Errorf("expected Source != nil; got %v", res.Source) + } + + // Get non existent document 99 + res, err = client.Get().Index(testIndexName).Type("tweet").Id("99").Do() + if err == nil { + t.Fatalf("expected error; got: %v", err) + } + if !IsNotFound(err) { + t.Errorf("expected NotFound error; got: %v", err) + } + if res != nil { + t.Errorf("expected no response; got: %v", res) + } +} + +func TestGetWithSourceFiltering(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + // Get document 1, without source + res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").FetchSource(false).Do() + if err != nil { + t.Fatal(err) + } + if res.Found != true { + t.Errorf("expected Found = true; got %v", res.Found) + } + if res.Source != nil { + t.Errorf("expected Source == nil; got %v", res.Source) + } + + // Get document 1, exclude Message field + fsc := NewFetchSourceContext(true).Exclude("message") + res, err = client.Get().Index(testIndexName).Type("tweet").Id("1").FetchSourceContext(fsc).Do() + if err != nil { + t.Fatal(err) + } + if res.Found != true { + t.Errorf("expected Found = true; got %v", res.Found) + } + if res.Source == nil { + t.Errorf("expected Source != nil; got %v", res.Source) + } + var tw tweet + err = json.Unmarshal(*res.Source, &tw) + if err != nil { + t.Fatal(err) + } + if tw.User != "olivere" { + t.Errorf("expected user %q; got: %q", "olivere", tw.User) + } + if tw.Message != "" { + t.Errorf("expected message %q; got: %q", "", tw.Message) + } +} + +func TestGetWithFields(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + // Get document 1, specifying fields + res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").Fields("message").Do() + if err != nil { + t.Fatal(err) + } + if res.Found != true { + t.Errorf("expected Found = true; got: %v", res.Found) + } + + // We must NOT have the "user" field + _, ok := res.Fields["user"] + if ok { + t.Fatalf("expected no field %q in document", "user") + } + + // We must have the "message" field + messageField, ok := res.Fields["message"] + if !ok { + t.Fatalf("expected field %q in document", "message") + } + + // Depending on the version of elasticsearch the message field will be returned + // as a string or a slice of strings. This test works in both cases. + + messageString, ok := messageField.(string) + if !ok { + messageArray, ok := messageField.([]interface{}) + if !ok { + t.Fatalf("expected field %q to be a string or a slice of strings; got: %T", "message", messageField) + } else { + messageString, ok = messageArray[0].(string) + if !ok { + t.Fatalf("expected field %q to be a string or a slice of strings; got: %T", "message", messageField) + } + } + } + + if messageString != tweet1.Message { + t.Errorf("expected message %q; got: %q", tweet1.Message, messageString) + } +} + +func TestGetValidate(t *testing.T) { + // Mitigate against http://stackoverflow.com/questions/27491738/elasticsearch-go-index-failures-no-feature-for-name + client := setupTestClientAndCreateIndex(t) + + if _, err := client.Get().Do(); err == nil { + t.Fatal("expected Get to fail") + } + if _, err := client.Get().Index(testIndexName).Do(); err == nil { + t.Fatal("expected Get to fail") + } + if _, err := client.Get().Type("tweet").Do(); err == nil { + t.Fatal("expected Get to fail") + } + if _, err := client.Get().Id("1").Do(); err == nil { + t.Fatal("expected Get to fail") + } + if _, err := client.Get().Index(testIndexName).Type("tweet").Do(); err == nil { + t.Fatal("expected Get to fail") + } + if _, err := client.Get().Type("tweet").Id("1").Do(); err == nil { + t.Fatal("expected Get to fail") + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/highlight.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/highlight.go new file mode 100644 index 000000000..44501a731 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/highlight.go @@ -0,0 +1,455 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// Highlight allows highlighting search results on one or more fields. +// For details, see: +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-highlighting.html +type Highlight struct { + fields []*HighlighterField + tagsSchema *string + highlightFilter *bool + fragmentSize *int + numOfFragments *int + preTags []string + postTags []string + order *string + encoder *string + requireFieldMatch *bool + boundaryMaxScan *int + boundaryChars []rune + highlighterType *string + fragmenter *string + highlightQuery Query + noMatchSize *int + phraseLimit *int + options map[string]interface{} + forceSource *bool + useExplicitFieldOrder bool +} + +func NewHighlight() *Highlight { + hl := &Highlight{ + fields: make([]*HighlighterField, 0), + preTags: make([]string, 0), + postTags: make([]string, 0), + boundaryChars: make([]rune, 0), + options: make(map[string]interface{}), + } + return hl +} + +func (hl *Highlight) Fields(fields ...*HighlighterField) *Highlight { + hl.fields = append(hl.fields, fields...) + return hl +} + +func (hl *Highlight) Field(name string) *Highlight { + field := NewHighlighterField(name) + hl.fields = append(hl.fields, field) + return hl +} + +func (hl *Highlight) TagsSchema(schemaName string) *Highlight { + hl.tagsSchema = &schemaName + return hl +} + +func (hl *Highlight) HighlightFilter(highlightFilter bool) *Highlight { + hl.highlightFilter = &highlightFilter + return hl +} + +func (hl *Highlight) FragmentSize(fragmentSize int) *Highlight { + hl.fragmentSize = &fragmentSize + return hl +} + +func (hl *Highlight) NumOfFragments(numOfFragments int) *Highlight { + hl.numOfFragments = &numOfFragments + return hl +} + +func (hl *Highlight) Encoder(encoder string) *Highlight { + hl.encoder = &encoder + return hl +} + +func (hl *Highlight) PreTags(preTags ...string) *Highlight { + hl.preTags = append(hl.preTags, preTags...) + return hl +} + +func (hl *Highlight) PostTags(postTags ...string) *Highlight { + hl.postTags = append(hl.postTags, postTags...) + return hl +} + +func (hl *Highlight) Order(order string) *Highlight { + hl.order = &order + return hl +} + +func (hl *Highlight) RequireFieldMatch(requireFieldMatch bool) *Highlight { + hl.requireFieldMatch = &requireFieldMatch + return hl +} + +func (hl *Highlight) BoundaryMaxScan(boundaryMaxScan int) *Highlight { + hl.boundaryMaxScan = &boundaryMaxScan + return hl +} + +func (hl *Highlight) BoundaryChars(boundaryChars ...rune) *Highlight { + hl.boundaryChars = append(hl.boundaryChars, boundaryChars...) + return hl +} + +func (hl *Highlight) HighlighterType(highlighterType string) *Highlight { + hl.highlighterType = &highlighterType + return hl +} + +func (hl *Highlight) Fragmenter(fragmenter string) *Highlight { + hl.fragmenter = &fragmenter + return hl +} + +func (hl *Highlight) HighlighQuery(highlightQuery Query) *Highlight { + hl.highlightQuery = highlightQuery + return hl +} + +func (hl *Highlight) NoMatchSize(noMatchSize int) *Highlight { + hl.noMatchSize = &noMatchSize + return hl +} + +func (hl *Highlight) Options(options map[string]interface{}) *Highlight { + hl.options = options + return hl +} + +func (hl *Highlight) ForceSource(forceSource bool) *Highlight { + hl.forceSource = &forceSource + return hl +} + +func (hl *Highlight) UseExplicitFieldOrder(useExplicitFieldOrder bool) *Highlight { + hl.useExplicitFieldOrder = useExplicitFieldOrder + return hl +} + +// Creates the query source for the bool query. +func (hl *Highlight) Source() (interface{}, error) { + // Returns the map inside of "highlight": + // "highlight":{ + // ... this ... + // } + source := make(map[string]interface{}) + if hl.tagsSchema != nil { + source["tags_schema"] = *hl.tagsSchema + } + if hl.preTags != nil && len(hl.preTags) > 0 { + source["pre_tags"] = hl.preTags + } + if hl.postTags != nil && len(hl.postTags) > 0 { + source["post_tags"] = hl.postTags + } + if hl.order != nil { + source["order"] = *hl.order + } + if hl.highlightFilter != nil { + source["highlight_filter"] = *hl.highlightFilter + } + if hl.fragmentSize != nil { + source["fragment_size"] = *hl.fragmentSize + } + if hl.numOfFragments != nil { + source["number_of_fragments"] = *hl.numOfFragments + } + if hl.encoder != nil { + source["encoder"] = *hl.encoder + } + if hl.requireFieldMatch != nil { + source["require_field_match"] = *hl.requireFieldMatch + } + if hl.boundaryMaxScan != nil { + source["boundary_max_scan"] = *hl.boundaryMaxScan + } + if hl.boundaryChars != nil && len(hl.boundaryChars) > 0 { + source["boundary_chars"] = hl.boundaryChars + } + if hl.highlighterType != nil { + source["type"] = *hl.highlighterType + } + if hl.fragmenter != nil { + source["fragmenter"] = *hl.fragmenter + } + if hl.highlightQuery != nil { + src, err := hl.highlightQuery.Source() + if err != nil { + return nil, err + } + source["highlight_query"] = src + } + if hl.noMatchSize != nil { + source["no_match_size"] = *hl.noMatchSize + } + if hl.phraseLimit != nil { + source["phrase_limit"] = *hl.phraseLimit + } + if hl.options != nil && len(hl.options) > 0 { + source["options"] = hl.options + } + if hl.forceSource != nil { + source["force_source"] = *hl.forceSource + } + + if hl.fields != nil && len(hl.fields) > 0 { + if hl.useExplicitFieldOrder { + // Use a slice for the fields + fields := make([]map[string]interface{}, 0) + for _, field := range hl.fields { + src, err := field.Source() + if err != nil { + return nil, err + } + fmap := make(map[string]interface{}) + fmap[field.Name] = src + fields = append(fields, fmap) + } + source["fields"] = fields + } else { + // Use a map for the fields + fields := make(map[string]interface{}, 0) + for _, field := range hl.fields { + src, err := field.Source() + if err != nil { + return nil, err + } + fields[field.Name] = src + } + source["fields"] = fields + } + } + + return source, nil +} + +// HighlighterField specifies a highlighted field. +type HighlighterField struct { + Name string + + preTags []string + postTags []string + fragmentSize int + fragmentOffset int + numOfFragments int + highlightFilter *bool + order *string + requireFieldMatch *bool + boundaryMaxScan int + boundaryChars []rune + highlighterType *string + fragmenter *string + highlightQuery Query + noMatchSize *int + matchedFields []string + phraseLimit *int + options map[string]interface{} + forceSource *bool + + /* + Name string + preTags []string + postTags []string + fragmentSize int + numOfFragments int + fragmentOffset int + highlightFilter *bool + order string + requireFieldMatch *bool + boundaryMaxScan int + boundaryChars []rune + highlighterType string + fragmenter string + highlightQuery Query + noMatchSize *int + matchedFields []string + options map[string]interface{} + forceSource *bool + */ +} + +func NewHighlighterField(name string) *HighlighterField { + return &HighlighterField{ + Name: name, + preTags: make([]string, 0), + postTags: make([]string, 0), + fragmentSize: -1, + fragmentOffset: -1, + numOfFragments: -1, + boundaryMaxScan: -1, + boundaryChars: make([]rune, 0), + matchedFields: make([]string, 0), + options: make(map[string]interface{}), + } +} + +func (f *HighlighterField) PreTags(preTags ...string) *HighlighterField { + f.preTags = append(f.preTags, preTags...) + return f +} + +func (f *HighlighterField) PostTags(postTags ...string) *HighlighterField { + f.postTags = append(f.postTags, postTags...) + return f +} + +func (f *HighlighterField) FragmentSize(fragmentSize int) *HighlighterField { + f.fragmentSize = fragmentSize + return f +} + +func (f *HighlighterField) FragmentOffset(fragmentOffset int) *HighlighterField { + f.fragmentOffset = fragmentOffset + return f +} + +func (f *HighlighterField) NumOfFragments(numOfFragments int) *HighlighterField { + f.numOfFragments = numOfFragments + return f +} + +func (f *HighlighterField) HighlightFilter(highlightFilter bool) *HighlighterField { + f.highlightFilter = &highlightFilter + return f +} + +func (f *HighlighterField) Order(order string) *HighlighterField { + f.order = &order + return f +} + +func (f *HighlighterField) RequireFieldMatch(requireFieldMatch bool) *HighlighterField { + f.requireFieldMatch = &requireFieldMatch + return f +} + +func (f *HighlighterField) BoundaryMaxScan(boundaryMaxScan int) *HighlighterField { + f.boundaryMaxScan = boundaryMaxScan + return f +} + +func (f *HighlighterField) BoundaryChars(boundaryChars ...rune) *HighlighterField { + f.boundaryChars = append(f.boundaryChars, boundaryChars...) + return f +} + +func (f *HighlighterField) HighlighterType(highlighterType string) *HighlighterField { + f.highlighterType = &highlighterType + return f +} + +func (f *HighlighterField) Fragmenter(fragmenter string) *HighlighterField { + f.fragmenter = &fragmenter + return f +} + +func (f *HighlighterField) HighlightQuery(highlightQuery Query) *HighlighterField { + f.highlightQuery = highlightQuery + return f +} + +func (f *HighlighterField) NoMatchSize(noMatchSize int) *HighlighterField { + f.noMatchSize = &noMatchSize + return f +} + +func (f *HighlighterField) Options(options map[string]interface{}) *HighlighterField { + f.options = options + return f +} + +func (f *HighlighterField) MatchedFields(matchedFields ...string) *HighlighterField { + f.matchedFields = append(f.matchedFields, matchedFields...) + return f +} + +func (f *HighlighterField) PhraseLimit(phraseLimit int) *HighlighterField { + f.phraseLimit = &phraseLimit + return f +} + +func (f *HighlighterField) ForceSource(forceSource bool) *HighlighterField { + f.forceSource = &forceSource + return f +} + +func (f *HighlighterField) Source() (interface{}, error) { + source := make(map[string]interface{}) + + if f.preTags != nil && len(f.preTags) > 0 { + source["pre_tags"] = f.preTags + } + if f.postTags != nil && len(f.postTags) > 0 { + source["post_tags"] = f.postTags + } + if f.fragmentSize != -1 { + source["fragment_size"] = f.fragmentSize + } + if f.numOfFragments != -1 { + source["number_of_fragments"] = f.numOfFragments + } + if f.fragmentOffset != -1 { + source["fragment_offset"] = f.fragmentOffset + } + if f.highlightFilter != nil { + source["highlight_filter"] = *f.highlightFilter + } + if f.order != nil { + source["order"] = *f.order + } + if f.requireFieldMatch != nil { + source["require_field_match"] = *f.requireFieldMatch + } + if f.boundaryMaxScan != -1 { + source["boundary_max_scan"] = f.boundaryMaxScan + } + if f.boundaryChars != nil && len(f.boundaryChars) > 0 { + source["boundary_chars"] = f.boundaryChars + } + if f.highlighterType != nil { + source["type"] = *f.highlighterType + } + if f.fragmenter != nil { + source["fragmenter"] = *f.fragmenter + } + if f.highlightQuery != nil { + src, err := f.highlightQuery.Source() + if err != nil { + return nil, err + } + source["highlight_query"] = src + } + if f.noMatchSize != nil { + source["no_match_size"] = *f.noMatchSize + } + if f.matchedFields != nil && len(f.matchedFields) > 0 { + source["matched_fields"] = f.matchedFields + } + if f.phraseLimit != nil { + source["phrase_limit"] = *f.phraseLimit + } + if f.options != nil && len(f.options) > 0 { + source["options"] = f.options + } + if f.forceSource != nil { + source["force_source"] = *f.forceSource + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/highlight_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/highlight_test.go new file mode 100644 index 000000000..be5cd963e --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/highlight_test.go @@ -0,0 +1,192 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + _ "net/http" + "testing" +) + +func TestHighlighterField(t *testing.T) { + field := NewHighlighterField("grade") + src, err := field.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestHighlighterFieldWithOptions(t *testing.T) { + field := NewHighlighterField("grade").FragmentSize(2).NumOfFragments(1) + src, err := field.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"fragment_size":2,"number_of_fragments":1}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestHighlightWithStringField(t *testing.T) { + builder := NewHighlight().Field("grade") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"fields":{"grade":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestHighlightWithFields(t *testing.T) { + gradeField := NewHighlighterField("grade") + builder := NewHighlight().Fields(gradeField) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"fields":{"grade":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestHighlightWithMultipleFields(t *testing.T) { + gradeField := NewHighlighterField("grade") + colorField := NewHighlighterField("color") + builder := NewHighlight().Fields(gradeField, colorField) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"fields":{"color":{},"grade":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestHighlighterWithExplicitFieldOrder(t *testing.T) { + gradeField := NewHighlighterField("grade").FragmentSize(2) + colorField := NewHighlighterField("color").FragmentSize(2).NumOfFragments(1) + builder := NewHighlight().Fields(gradeField, colorField).UseExplicitFieldOrder(true) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"fields":[{"grade":{"fragment_size":2}},{"color":{"fragment_size":2,"number_of_fragments":1}}]}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestHighlightWithTermQuery(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun to do."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Specify highlighter + hl := NewHighlight() + hl = hl.Fields(NewHighlighterField("message")) + hl = hl.PreTags("").PostTags("") + + // Match all should return all documents + query := NewPrefixQuery("message", "golang") + searchResult, err := client.Search(). + Index(testIndexName). + Highlight(hl). + Query(query). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Fatalf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 1 { + t.Fatalf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 1 { + t.Fatalf("expected len(SearchResult.Hits.Hits) = %d; got %d", 1, len(searchResult.Hits.Hits)) + } + + hit := searchResult.Hits.Hits[0] + var tw tweet + if err := json.Unmarshal(*hit.Source, &tw); err != nil { + t.Fatal(err) + } + if hit.Highlight == nil || len(hit.Highlight) == 0 { + t.Fatal("expected hit to have a highlight; got nil") + } + if hl, found := hit.Highlight["message"]; found { + if len(hl) != 1 { + t.Fatalf("expected to have one highlight for field \"message\"; got %d", len(hl)) + } + expected := "Welcome to Golang and Elasticsearch." + if hl[0] != expected { + t.Errorf("expected to have highlight \"%s\"; got \"%s\"", expected, hl[0]) + } + } else { + t.Fatal("expected to have a highlight on field \"message\"; got none") + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/index.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/index.go new file mode 100644 index 000000000..bdaba0560 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/index.go @@ -0,0 +1,284 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndexService adds or updates a typed JSON document in a specified index, +// making it searchable. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html +// for details. +type IndexService struct { + client *Client + pretty bool + id string + index string + typ string + parent string + replication string + routing string + timeout string + timestamp string + ttl string + version interface{} + opType string + versionType string + refresh *bool + consistency string + bodyJson interface{} + bodyString string +} + +// NewIndexService creates a new IndexService. +func NewIndexService(client *Client) *IndexService { + return &IndexService{ + client: client, + } +} + +// Id is the document ID. +func (s *IndexService) Id(id string) *IndexService { + s.id = id + return s +} + +// Index is the name of the index. +func (s *IndexService) Index(index string) *IndexService { + s.index = index + return s +} + +// Type is the type of the document. +func (s *IndexService) Type(typ string) *IndexService { + s.typ = typ + return s +} + +// Consistency is an explicit write consistency setting for the operation. +func (s *IndexService) Consistency(consistency string) *IndexService { + s.consistency = consistency + return s +} + +// Refresh the index after performing the operation. +func (s *IndexService) Refresh(refresh bool) *IndexService { + s.refresh = &refresh + return s +} + +// Ttl is an expiration time for the document. +func (s *IndexService) Ttl(ttl string) *IndexService { + s.ttl = ttl + return s +} + +// TTL is an expiration time for the document (alias for Ttl). +func (s *IndexService) TTL(ttl string) *IndexService { + s.ttl = ttl + return s +} + +// Version is an explicit version number for concurrency control. +func (s *IndexService) Version(version interface{}) *IndexService { + s.version = version + return s +} + +// OpType is an explicit operation type, i.e. "create" or "index" (default). +func (s *IndexService) OpType(opType string) *IndexService { + s.opType = opType + return s +} + +// Parent is the ID of the parent document. +func (s *IndexService) Parent(parent string) *IndexService { + s.parent = parent + return s +} + +// Replication is a specific replication type. +func (s *IndexService) Replication(replication string) *IndexService { + s.replication = replication + return s +} + +// Routing is a specific routing value. +func (s *IndexService) Routing(routing string) *IndexService { + s.routing = routing + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndexService) Timeout(timeout string) *IndexService { + s.timeout = timeout + return s +} + +// Timestamp is an explicit timestamp for the document. +func (s *IndexService) Timestamp(timestamp string) *IndexService { + s.timestamp = timestamp + return s +} + +// VersionType is a specific version type. +func (s *IndexService) VersionType(versionType string) *IndexService { + s.versionType = versionType + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndexService) Pretty(pretty bool) *IndexService { + s.pretty = pretty + return s +} + +// BodyJson is the document as a serializable JSON interface. +func (s *IndexService) BodyJson(body interface{}) *IndexService { + s.bodyJson = body + return s +} + +// BodyString is the document encoded as a string. +func (s *IndexService) BodyString(body string) *IndexService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *IndexService) buildURL() (string, string, url.Values, error) { + var err error + var method, path string + + if s.id != "" { + // Create document with manual id + method = "PUT" + path, err = uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ + "id": s.id, + "index": s.index, + "type": s.typ, + }) + } else { + // Automatic ID generation + // See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#index-creation + method = "POST" + path, err = uritemplates.Expand("/{index}/{type}/", map[string]string{ + "index": s.index, + "type": s.typ, + }) + } + if err != nil { + return "", "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.consistency != "" { + params.Set("consistency", s.consistency) + } + if s.refresh != nil { + params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) + } + if s.opType != "" { + params.Set("op_type", s.opType) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.replication != "" { + params.Set("replication", s.replication) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.timestamp != "" { + params.Set("timestamp", s.timestamp) + } + if s.ttl != "" { + params.Set("ttl", s.ttl) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + return method, path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndexService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndexService) Do() (*IndexResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + method, path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest(method, path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndexResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndexResponse is the result of indexing a document in Elasticsearch. +type IndexResponse struct { + // TODO _shards { total, failed, successful } + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + Version int `json:"_version"` + Created bool `json:"created"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/index_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/index_test.go new file mode 100644 index 000000000..01722b3e3 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/index_test.go @@ -0,0 +1,279 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestIndexLifecycle(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + + // Add a document + indexResult, err := client.Index(). + Index(testIndexName). + Type("tweet"). + Id("1"). + BodyJson(&tweet1). + Do() + if err != nil { + t.Fatal(err) + } + if indexResult == nil { + t.Errorf("expected result to be != nil; got: %v", indexResult) + } + + // Exists + exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do() + if err != nil { + t.Fatal(err) + } + if !exists { + t.Errorf("expected exists %v; got %v", true, exists) + } + + // Get document + getResult, err := client.Get(). + Index(testIndexName). + Type("tweet"). + Id("1"). + Do() + if err != nil { + t.Fatal(err) + } + if getResult.Index != testIndexName { + t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index) + } + if getResult.Type != "tweet" { + t.Errorf("expected GetResult.Type %q; got %q", "tweet", getResult.Type) + } + if getResult.Id != "1" { + t.Errorf("expected GetResult.Id %q; got %q", "1", getResult.Id) + } + if getResult.Source == nil { + t.Errorf("expected GetResult.Source to be != nil; got nil") + } + + // Decode the Source field + var tweetGot tweet + err = json.Unmarshal(*getResult.Source, &tweetGot) + if err != nil { + t.Fatal(err) + } + if tweetGot.User != tweet1.User { + t.Errorf("expected Tweet.User to be %q; got %q", tweet1.User, tweetGot.User) + } + if tweetGot.Message != tweet1.Message { + t.Errorf("expected Tweet.Message to be %q; got %q", tweet1.Message, tweetGot.Message) + } + + // Delete document again + deleteResult, err := client.Delete().Index(testIndexName).Type("tweet").Id("1").Do() + if err != nil { + t.Fatal(err) + } + if deleteResult == nil { + t.Errorf("expected result to be != nil; got: %v", deleteResult) + } + + // Exists + exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("1").Do() + if err != nil { + t.Fatal(err) + } + if exists { + t.Errorf("expected exists %v; got %v", false, exists) + } +} + +func TestIndexLifecycleWithAutomaticIDGeneration(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + + // Add a document + indexResult, err := client.Index(). + Index(testIndexName). + Type("tweet"). + BodyJson(&tweet1). + Do() + if err != nil { + t.Fatal(err) + } + if indexResult == nil { + t.Errorf("expected result to be != nil; got: %v", indexResult) + } + if indexResult.Id == "" { + t.Fatalf("expected Es to generate an automatic ID, got: %v", indexResult.Id) + } + id := indexResult.Id + + // Exists + exists, err := client.Exists().Index(testIndexName).Type("tweet").Id(id).Do() + if err != nil { + t.Fatal(err) + } + if !exists { + t.Errorf("expected exists %v; got %v", true, exists) + } + + // Get document + getResult, err := client.Get(). + Index(testIndexName). + Type("tweet"). + Id(id). + Do() + if err != nil { + t.Fatal(err) + } + if getResult.Index != testIndexName { + t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index) + } + if getResult.Type != "tweet" { + t.Errorf("expected GetResult.Type %q; got %q", "tweet", getResult.Type) + } + if getResult.Id != id { + t.Errorf("expected GetResult.Id %q; got %q", id, getResult.Id) + } + if getResult.Source == nil { + t.Errorf("expected GetResult.Source to be != nil; got nil") + } + + // Decode the Source field + var tweetGot tweet + err = json.Unmarshal(*getResult.Source, &tweetGot) + if err != nil { + t.Fatal(err) + } + if tweetGot.User != tweet1.User { + t.Errorf("expected Tweet.User to be %q; got %q", tweet1.User, tweetGot.User) + } + if tweetGot.Message != tweet1.Message { + t.Errorf("expected Tweet.Message to be %q; got %q", tweet1.Message, tweetGot.Message) + } + + // Delete document again + deleteResult, err := client.Delete().Index(testIndexName).Type("tweet").Id(id).Do() + if err != nil { + t.Fatal(err) + } + if deleteResult == nil { + t.Errorf("expected result to be != nil; got: %v", deleteResult) + } + + // Exists + exists, err = client.Exists().Index(testIndexName).Type("tweet").Id(id).Do() + if err != nil { + t.Fatal(err) + } + if exists { + t.Errorf("expected exists %v; got %v", false, exists) + } +} + +func TestIndexValidate(t *testing.T) { + client := setupTestClient(t) + + tweet := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + + // No index name -> fail with error + res, err := NewIndexService(client).Type("tweet").Id("1").BodyJson(&tweet).Do() + if err == nil { + t.Fatalf("expected Index to fail without index name") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } + + // No index name -> fail with error + res, err = NewIndexService(client).Index(testIndexName).Id("1").BodyJson(&tweet).Do() + if err == nil { + t.Fatalf("expected Index to fail without type") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } +} + +func TestIndexCreateExistsOpenCloseDelete(t *testing.T) { + // TODO: Find out how to make these test robust + t.Skip("test fails regularly with 409 (Conflict): " + + "IndexPrimaryShardNotAllocatedException[[elastic-test] " + + "primary not allocated post api... skipping") + + client := setupTestClient(t) + + // Create index + createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do() + if err != nil { + t.Fatal(err) + } + if createIndex == nil { + t.Fatalf("expected response; got: %v", createIndex) + } + if !createIndex.Acknowledged { + t.Errorf("expected ack for creating index; got: %v", createIndex.Acknowledged) + } + + // Exists + indexExists, err := client.IndexExists(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if !indexExists { + t.Fatalf("expected index exists=%v; got %v", true, indexExists) + } + + // Flush + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Close index + closeIndex, err := client.CloseIndex(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if closeIndex == nil { + t.Fatalf("expected response; got: %v", closeIndex) + } + if !closeIndex.Acknowledged { + t.Errorf("expected ack for closing index; got: %v", closeIndex.Acknowledged) + } + + // Open index + openIndex, err := client.OpenIndex(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if openIndex == nil { + t.Fatalf("expected response; got: %v", openIndex) + } + if !openIndex.Acknowledged { + t.Errorf("expected ack for opening index; got: %v", openIndex.Acknowledged) + } + + // Flush + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Delete index + deleteIndex, err := client.DeleteIndex(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if deleteIndex == nil { + t.Fatalf("expected response; got: %v", deleteIndex) + } + if !deleteIndex.Acknowledged { + t.Errorf("expected ack for deleting index; got %v", deleteIndex.Acknowledged) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_close.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_close.go new file mode 100644 index 000000000..ad344cb26 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_close.go @@ -0,0 +1,153 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesCloseService closes an index. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html +// for details. +type IndicesCloseService struct { + client *Client + pretty bool + index string + timeout string + masterTimeout string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string +} + +// NewIndicesCloseService creates and initializes a new IndicesCloseService. +func NewIndicesCloseService(client *Client) *IndicesCloseService { + return &IndicesCloseService{client: client} +} + +// Index is the name of the index to close. +func (s *IndicesCloseService) Index(index string) *IndicesCloseService { + s.index = index + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesCloseService) Timeout(timeout string) *IndicesCloseService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesCloseService) MasterTimeout(masterTimeout string) *IndicesCloseService { + s.masterTimeout = masterTimeout + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesCloseService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesCloseService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified). +func (s *IndicesCloseService) AllowNoIndices(allowNoIndices bool) *IndicesCloseService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesCloseService) ExpandWildcards(expandWildcards string) *IndicesCloseService { + s.expandWildcards = expandWildcards + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesCloseService) Pretty(pretty bool) *IndicesCloseService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesCloseService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/_close", map[string]string{ + "index": s.index, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesCloseService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesCloseService) Do() (*IndicesCloseResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesCloseResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesCloseResponse is the response of IndicesCloseService.Do. +type IndicesCloseResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_close_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_close_test.go new file mode 100644 index 000000000..7293bb1c4 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_close_test.go @@ -0,0 +1,81 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +// TODO(oe): Find out why this test fails on Travis CI. +/* +func TestIndicesOpenAndClose(t *testing.T) { + client := setupTestClient(t) + + // Create index + createIndex, err := client.CreateIndex(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if !createIndex.Acknowledged { + t.Errorf("expected CreateIndexResult.Acknowledged %v; got %v", true, createIndex.Acknowledged) + } + defer func() { + // Delete index + deleteIndex, err := client.DeleteIndex(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if !deleteIndex.Acknowledged { + t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged) + } + }() + + waitForYellow := func() { + // Wait for status yellow + res, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("15s").Do() + if err != nil { + t.Fatal(err) + } + if res != nil && res.TimedOut { + t.Fatalf("cluster time out waiting for status %q", "yellow") + } + } + + // Wait for cluster + waitForYellow() + + // Close index + cresp, err := client.CloseIndex(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if !cresp.Acknowledged { + t.Fatalf("expected close index of %q to be acknowledged\n", testIndexName) + } + + // Wait for cluster + waitForYellow() + + // Open index again + oresp, err := client.OpenIndex(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if !oresp.Acknowledged { + t.Fatalf("expected open index of %q to be acknowledged\n", testIndexName) + } +} +*/ + +func TestIndicesCloseValidate(t *testing.T) { + client := setupTestClient(t) + + // No index name -> fail with error + res, err := NewIndicesCloseService(client).Do() + if err == nil { + t.Fatalf("expected IndicesClose to fail without index name") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_create.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_create.go new file mode 100644 index 000000000..1e98447ea --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_create.go @@ -0,0 +1,129 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "errors" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesCreateService creates a new index. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html +// for details. +type IndicesCreateService struct { + client *Client + pretty bool + index string + timeout string + masterTimeout string + bodyJson interface{} + bodyString string +} + +// NewIndicesCreateService returns a new IndicesCreateService. +func NewIndicesCreateService(client *Client) *IndicesCreateService { + return &IndicesCreateService{client: client} +} + +// Index is the name of the index to create. +func (b *IndicesCreateService) Index(index string) *IndicesCreateService { + b.index = index + return b +} + +// Timeout the explicit operation timeout, e.g. "5s". +func (s *IndicesCreateService) Timeout(timeout string) *IndicesCreateService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesCreateService) MasterTimeout(masterTimeout string) *IndicesCreateService { + s.masterTimeout = masterTimeout + return s +} + +// Body specifies the configuration of the index as a string. +// It is an alias for BodyString. +func (b *IndicesCreateService) Body(body string) *IndicesCreateService { + b.bodyString = body + return b +} + +// BodyString specifies the configuration of the index as a string. +func (b *IndicesCreateService) BodyString(body string) *IndicesCreateService { + b.bodyString = body + return b +} + +// BodyJson specifies the configuration of the index. The interface{} will +// be serializes as a JSON document, so use a map[string]interface{}. +func (b *IndicesCreateService) BodyJson(body interface{}) *IndicesCreateService { + b.bodyJson = body + return b +} + +// Pretty indicates that the JSON response be indented and human readable. +func (b *IndicesCreateService) Pretty(pretty bool) *IndicesCreateService { + b.pretty = pretty + return b +} + +// Do executes the operation. +func (b *IndicesCreateService) Do() (*IndicesCreateResult, error) { + if b.index == "" { + return nil, errors.New("missing index name") + } + + // Build url + path, err := uritemplates.Expand("/{index}", map[string]string{ + "index": b.index, + }) + if err != nil { + return nil, err + } + + params := make(url.Values) + if b.pretty { + params.Set("pretty", "1") + } + if b.masterTimeout != "" { + params.Set("master_timeout", b.masterTimeout) + } + if b.timeout != "" { + params.Set("timeout", b.timeout) + } + + // Setup HTTP request body + var body interface{} + if b.bodyJson != nil { + body = b.bodyJson + } else { + body = b.bodyString + } + + // Get response + res, err := b.client.PerformRequest("PUT", path, params, body) + if err != nil { + return nil, err + } + + ret := new(IndicesCreateResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a create index request. + +// IndicesCreateResult is the outcome of creating a new index. +type IndicesCreateResult struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_create_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_create_test.go new file mode 100644 index 000000000..b3723950a --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_create_test.go @@ -0,0 +1,60 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestIndicesLifecycle(t *testing.T) { + client := setupTestClient(t) + + // Create index + createIndex, err := client.CreateIndex(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if !createIndex.Acknowledged { + t.Errorf("expected IndicesCreateResult.Acknowledged %v; got %v", true, createIndex.Acknowledged) + } + + // Check if index exists + indexExists, err := client.IndexExists(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if !indexExists { + t.Fatalf("index %s should exist, but doesn't\n", testIndexName) + } + + // Delete index + deleteIndex, err := client.DeleteIndex(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if !deleteIndex.Acknowledged { + t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged) + } + + // Check if index exists + indexExists, err = client.IndexExists(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if indexExists { + t.Fatalf("index %s should not exist, but does\n", testIndexName) + } +} + +func TestIndicesCreateValidate(t *testing.T) { + client := setupTestClient(t) + + // No index name -> fail with error + res, err := NewIndicesCreateService(client).Body(testMapping).Do() + if err == nil { + t.Fatalf("expected IndicesCreate to fail without index name") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_delete.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_delete.go new file mode 100644 index 000000000..e2582dc6f --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_delete.go @@ -0,0 +1,129 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesDeleteService allows to delete existing indices. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html +// for details. +type IndicesDeleteService struct { + client *Client + pretty bool + index []string + timeout string + masterTimeout string +} + +// NewIndicesDeleteService creates and initializes a new IndicesDeleteService. +func NewIndicesDeleteService(client *Client) *IndicesDeleteService { + return &IndicesDeleteService{ + client: client, + index: make([]string, 0), + } +} + +// Index adds the list of indices to delete. +// Use `_all` or `*` string to delete all indices. +func (s *IndicesDeleteService) Index(index []string) *IndicesDeleteService { + s.index = index + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesDeleteService) Timeout(timeout string) *IndicesDeleteService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesDeleteService) MasterTimeout(masterTimeout string) *IndicesDeleteService { + s.masterTimeout = masterTimeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesDeleteService) Pretty(pretty bool) *IndicesDeleteService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesDeleteService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}", map[string]string{ + "index": strings.Join(s.index, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesDeleteService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesDeleteService) Do() (*IndicesDeleteResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("DELETE", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesDeleteResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a delete index request. + +// IndicesDeleteResponse is the response of IndicesDeleteService.Do. +type IndicesDeleteResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_delete_template.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_delete_template.go new file mode 100644 index 000000000..2c62a06cd --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_delete_template.go @@ -0,0 +1,122 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesDeleteTemplateService deletes index templates. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html. +type IndicesDeleteTemplateService struct { + client *Client + pretty bool + name string + timeout string + masterTimeout string +} + +// NewIndicesDeleteTemplateService creates a new IndicesDeleteTemplateService. +func NewIndicesDeleteTemplateService(client *Client) *IndicesDeleteTemplateService { + return &IndicesDeleteTemplateService{ + client: client, + } +} + +// Name is the name of the template. +func (s *IndicesDeleteTemplateService) Name(name string) *IndicesDeleteTemplateService { + s.name = name + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesDeleteTemplateService) Timeout(timeout string) *IndicesDeleteTemplateService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesDeleteTemplateService) MasterTimeout(masterTimeout string) *IndicesDeleteTemplateService { + s.masterTimeout = masterTimeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesDeleteTemplateService) Pretty(pretty bool) *IndicesDeleteTemplateService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesDeleteTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_template/{name}", map[string]string{ + "name": s.name, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesDeleteTemplateService) Validate() error { + var invalid []string + if s.name == "" { + invalid = append(invalid, "Name") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesDeleteTemplateService) Do() (*IndicesDeleteTemplateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("DELETE", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesDeleteTemplateResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesDeleteTemplateResponse is the response of IndicesDeleteTemplateService.Do. +type IndicesDeleteTemplateResponse struct { + Acknowledged bool `json:"acknowledged,omitempty"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_delete_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_delete_test.go new file mode 100644 index 000000000..d84edb8de --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_delete_test.go @@ -0,0 +1,20 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestIndicesDeleteValidate(t *testing.T) { + client := setupTestClient(t) + + // No index name -> fail with error + res, err := NewIndicesDeleteService(client).Do() + if err == nil { + t.Fatalf("expected IndicesDelete to fail without index name") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_delete_warmer.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_delete_warmer.go new file mode 100644 index 000000000..79aa4c2d5 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_delete_warmer.go @@ -0,0 +1,131 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesDeleteWarmerService allows to delete a warmer. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-warmers.html. +type IndicesDeleteWarmerService struct { + client *Client + pretty bool + index []string + name []string + masterTimeout string +} + +// NewIndicesDeleteWarmerService creates a new IndicesDeleteWarmerService. +func NewIndicesDeleteWarmerService(client *Client) *IndicesDeleteWarmerService { + return &IndicesDeleteWarmerService{ + client: client, + index: make([]string, 0), + name: make([]string, 0), + } +} + +// Index is a list of index names the mapping should be added to +// (supports wildcards); use `_all` or omit to add the mapping on all indices. +func (s *IndicesDeleteWarmerService) Index(indices ...string) *IndicesDeleteWarmerService { + s.index = append(s.index, indices...) + return s +} + +// Name is a list of warmer names to delete (supports wildcards); +// use `_all` to delete all warmers in the specified indices. +func (s *IndicesDeleteWarmerService) Name(name ...string) *IndicesDeleteWarmerService { + s.name = append(s.name, name...) + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesDeleteWarmerService) MasterTimeout(masterTimeout string) *IndicesDeleteWarmerService { + s.masterTimeout = masterTimeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesDeleteWarmerService) Pretty(pretty bool) *IndicesDeleteWarmerService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesDeleteWarmerService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/_warmer/{name}", map[string]string{ + "index": strings.Join(s.index, ","), + "name": strings.Join(s.name, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if len(s.name) > 0 { + params.Set("name", strings.Join(s.name, ",")) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesDeleteWarmerService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(s.name) == 0 { + invalid = append(invalid, "Name") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesDeleteWarmerService) Do() (*DeleteWarmerResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("DELETE", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(DeleteWarmerResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// DeleteWarmerResponse is the response of IndicesDeleteWarmerService.Do. +type DeleteWarmerResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_delete_warmer_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_delete_warmer_test.go new file mode 100644 index 000000000..3d811ea59 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_delete_warmer_test.go @@ -0,0 +1,48 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestDeleteWarmerBuildURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Names []string + Expected string + }{ + { + []string{"test"}, + []string{"warmer_1"}, + "/test/_warmer/warmer_1", + }, + { + []string{"*"}, + []string{"warmer_1"}, + "/%2A/_warmer/warmer_1", + }, + { + []string{"_all"}, + []string{"warmer_1"}, + "/_all/_warmer/warmer_1", + }, + { + []string{"index-1", "index-2"}, + []string{"warmer_1", "warmer_2"}, + "/index-1%2Cindex-2/_warmer/warmer_1%2Cwarmer_2", + }, + } + + for _, test := range tests { + path, _, err := client.DeleteWarmer().Index(test.Indices...).Name(test.Names...).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_exists.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_exists.go new file mode 100644 index 000000000..92f9974f2 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_exists.go @@ -0,0 +1,149 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesExistsService checks if an index or indices exist or not. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-exists.html +// for details. +type IndicesExistsService struct { + client *Client + pretty bool + index []string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string + local *bool +} + +// NewIndicesExistsService creates and initializes a new IndicesExistsService. +func NewIndicesExistsService(client *Client) *IndicesExistsService { + return &IndicesExistsService{ + client: client, + index: make([]string, 0), + } +} + +// Index is a list of one or more indices to check. +func (s *IndicesExistsService) Index(index []string) *IndicesExistsService { + s.index = index + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices expression +// resolves into no concrete indices. (This includes `_all` string or +// when no indices have been specified). +func (s *IndicesExistsService) AllowNoIndices(allowNoIndices bool) *IndicesExistsService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesExistsService) ExpandWildcards(expandWildcards string) *IndicesExistsService { + s.expandWildcards = expandWildcards + return s +} + +// Local, when set, returns local information and does not retrieve the state +// from master node (default: false). +func (s *IndicesExistsService) Local(local bool) *IndicesExistsService { + s.local = &local + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesExistsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesExistsService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesExistsService) Pretty(pretty bool) *IndicesExistsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesExistsService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}", map[string]string{ + "index": strings.Join(s.index, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesExistsService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesExistsService) Do() (bool, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return false, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return false, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("HEAD", path, params, nil, 404) + if err != nil { + return false, err + } + + // Return operation response + switch res.StatusCode { + case http.StatusOK: + return true, nil + case http.StatusNotFound: + return false, nil + default: + return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_exists_template.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_exists_template.go new file mode 100644 index 000000000..7587a8786 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_exists_template.go @@ -0,0 +1,112 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/http" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesExistsTemplateService checks if a given template exists. +// See http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html#indices-templates-exists +// for documentation. +type IndicesExistsTemplateService struct { + client *Client + pretty bool + name string + local *bool +} + +// NewIndicesExistsTemplateService creates a new IndicesExistsTemplateService. +func NewIndicesExistsTemplateService(client *Client) *IndicesExistsTemplateService { + return &IndicesExistsTemplateService{ + client: client, + } +} + +// Name is the name of the template. +func (s *IndicesExistsTemplateService) Name(name string) *IndicesExistsTemplateService { + s.name = name + return s +} + +// Local indicates whether to return local information, i.e. do not retrieve +// the state from master node (default: false). +func (s *IndicesExistsTemplateService) Local(local bool) *IndicesExistsTemplateService { + s.local = &local + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesExistsTemplateService) Pretty(pretty bool) *IndicesExistsTemplateService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesExistsTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_template/{name}", map[string]string{ + "name": s.name, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesExistsTemplateService) Validate() error { + var invalid []string + if s.name == "" { + invalid = append(invalid, "Name") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesExistsTemplateService) Do() (bool, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return false, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return false, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("HEAD", path, params, nil, 404) + if err != nil { + return false, err + } + + // Return operation response + switch res.StatusCode { + case http.StatusOK: + return true, nil + case http.StatusNotFound: + return false, nil + default: + return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_exists_template_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_exists_template_test.go new file mode 100644 index 000000000..32fb82ad3 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_exists_template_test.go @@ -0,0 +1,68 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestIndexExistsTemplate(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tmpl := `{ + "template":"elastic-test*", + "settings":{ + "number_of_shards":1, + "number_of_replicas":0 + }, + "mappings":{ + "tweet":{ + "properties":{ + "tags":{ + "type":"string" + }, + "location":{ + "type":"geo_point" + }, + "suggest_field":{ + "type":"completion", + "payloads":true + } + } + } + } +}` + putres, err := client.IndexPutTemplate("elastic-template").BodyString(tmpl).Do() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if putres == nil { + t.Fatalf("expected response; got: %v", putres) + } + if !putres.Acknowledged { + t.Fatalf("expected index template to be ack'd; got: %v", putres.Acknowledged) + } + + // Always delete template + defer client.IndexDeleteTemplate("elastic-template").Do() + + // Check if template exists + exists, err := client.IndexTemplateExists("elastic-template").Do() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if !exists { + t.Fatalf("expected index template %q to exist; got: %v", "elastic-template", exists) + } + + // Get template + getres, err := client.IndexGetTemplate("elastic-template").Do() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if getres == nil { + t.Fatalf("expected to get index template %q; got: %v", "elastic-template", getres) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_exists_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_exists_test.go new file mode 100644 index 000000000..8cb6f5fab --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_exists_test.go @@ -0,0 +1,20 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestIndicesExistsWithoutIndex(t *testing.T) { + client := setupTestClient(t) + + // No index name -> fail with error + res, err := NewIndicesExistsService(client).Do() + if err == nil { + t.Fatalf("expected IndicesExists to fail without index name") + } + if res != false { + t.Fatalf("expected result to be false; got: %v", res) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_exists_type.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_exists_type.go new file mode 100644 index 000000000..631f773fe --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_exists_type.go @@ -0,0 +1,161 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesExistsTypeService checks if one or more types exist in one or more indices. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-types-exists.html +// for details. +type IndicesExistsTypeService struct { + client *Client + pretty bool + typ []string + index []string + expandWildcards string + local *bool + ignoreUnavailable *bool + allowNoIndices *bool +} + +// NewIndicesExistsTypeService creates a new IndicesExistsTypeService. +func NewIndicesExistsTypeService(client *Client) *IndicesExistsTypeService { + return &IndicesExistsTypeService{ + client: client, + index: make([]string, 0), + typ: make([]string, 0), + } +} + +// Index is a list of index names; use `_all` to check the types across all indices. +func (s *IndicesExistsTypeService) Index(indices ...string) *IndicesExistsTypeService { + s.index = append(s.index, indices...) + return s +} + +// Type is a list of document types to check. +func (s *IndicesExistsTypeService) Type(types ...string) *IndicesExistsTypeService { + s.typ = append(s.typ, types...) + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesExistsTypeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesExistsTypeService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *IndicesExistsTypeService) AllowNoIndices(allowNoIndices bool) *IndicesExistsTypeService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesExistsTypeService) ExpandWildcards(expandWildcards string) *IndicesExistsTypeService { + s.expandWildcards = expandWildcards + return s +} + +// Local specifies whether to return local information, i.e. do not retrieve +// the state from master node (default: false). +func (s *IndicesExistsTypeService) Local(local bool) *IndicesExistsTypeService { + s.local = &local + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesExistsTypeService) Pretty(pretty bool) *IndicesExistsTypeService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesExistsTypeService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/{type}", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesExistsTypeService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(s.typ) == 0 { + invalid = append(invalid, "Type") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesExistsTypeService) Do() (bool, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return false, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return false, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("HEAD", path, params, nil, 404) + if err != nil { + return false, err + } + + // Return operation response + switch res.StatusCode { + case http.StatusOK: + return true, nil + case http.StatusNotFound: + return false, nil + default: + return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_exists_type_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_exists_type_test.go new file mode 100644 index 000000000..51721b125 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_exists_type_test.go @@ -0,0 +1,134 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestIndicesExistsTypeBuildURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Types []string + Expected string + ExpectValidateFailure bool + }{ + { + []string{}, + []string{}, + "", + true, + }, + { + []string{"index1"}, + []string{}, + "", + true, + }, + { + []string{}, + []string{"type1"}, + "", + true, + }, + { + []string{"index1"}, + []string{"type1"}, + "/index1/type1", + false, + }, + { + []string{"index1", "index2"}, + []string{"type1"}, + "/index1%2Cindex2/type1", + false, + }, + { + []string{"index1", "index2"}, + []string{"type1", "type2"}, + "/index1%2Cindex2/type1%2Ctype2", + false, + }, + } + + for i, test := range tests { + err := client.TypeExists().Index(test.Indices...).Type(test.Types...).Validate() + if err == nil && test.ExpectValidateFailure { + t.Errorf("case #%d: expected validate to fail", i+1) + continue + } + if err != nil && !test.ExpectValidateFailure { + t.Errorf("case #%d: expected validate to succeed", i+1) + continue + } + if !test.ExpectValidateFailure { + path, _, err := client.TypeExists().Index(test.Indices...).Type(test.Types...).buildURL() + if err != nil { + t.Fatalf("case #%d: %v", i+1, err) + } + if path != test.Expected { + t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + } + } + } +} + +func TestIndicesExistsType(t *testing.T) { + client := setupTestClient(t) + + // Create index with tweet type + createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do() + if err != nil { + t.Fatal(err) + } + if createIndex == nil { + t.Errorf("expected result to be != nil; got: %v", createIndex) + } + if !createIndex.Acknowledged { + t.Errorf("expected CreateIndexResult.Acknowledged %v; got %v", true, createIndex.Acknowledged) + } + + // Check if type exists + exists, err := client.TypeExists().Index(testIndexName).Type("tweet").Do() + if err != nil { + t.Fatal(err) + } + if !exists { + t.Fatalf("type %s should exist in index %s, but doesn't\n", "tweet", testIndexName) + } + + // Delete index + deleteIndex, err := client.DeleteIndex(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if !deleteIndex.Acknowledged { + t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged) + } + + // Check if type exists + exists, err = client.TypeExists().Index(testIndexName).Type("tweet").Do() + if err != nil { + t.Fatal(err) + } + if exists { + t.Fatalf("type %s should not exist in index %s, but it does\n", "tweet", testIndexName) + } +} + +func TestIndicesExistsTypeValidate(t *testing.T) { + client := setupTestClient(t) + + // No index name -> fail with error + res, err := NewIndicesExistsTypeService(client).Do() + if err == nil { + t.Fatalf("expected IndicesExistsType to fail without index name") + } + if res != false { + t.Fatalf("expected result to be false; got: %v", res) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_flush.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_flush.go new file mode 100644 index 000000000..3d101f9bd --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_flush.go @@ -0,0 +1,169 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// Flush allows to flush one or more indices. The flush process of an index +// basically frees memory from the index by flushing data to the index +// storage and clearing the internal transaction log. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html +// for details. +type IndicesFlushService struct { + client *Client + pretty bool + index []string + force *bool + waitIfOngoing *bool + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string +} + +// NewIndicesFlushService creates a new IndicesFlushService. +func NewIndicesFlushService(client *Client) *IndicesFlushService { + return &IndicesFlushService{ + client: client, + index: make([]string, 0), + } +} + +// Index is a list of index names; use `_all` or empty string for all indices. +func (s *IndicesFlushService) Index(indices ...string) *IndicesFlushService { + s.index = append(s.index, indices...) + return s +} + +// Force indicates whether a flush should be forced even if it is not +// necessarily needed ie. if no changes will be committed to the index. +// This is useful if transaction log IDs should be incremented even if +// no uncommitted changes are present. (This setting can be considered as internal). +func (s *IndicesFlushService) Force(force bool) *IndicesFlushService { + s.force = &force + return s +} + +// WaitIfOngoing, if set to true, indicates that the flush operation will +// block until the flush can be executed if another flush operation is +// already executing. The default is false and will cause an exception +// to be thrown on the shard level if another flush operation is already running.. +func (s *IndicesFlushService) WaitIfOngoing(waitIfOngoing bool) *IndicesFlushService { + s.waitIfOngoing = &waitIfOngoing + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesFlushService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesFlushService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices expression +// resolves into no concrete indices. (This includes `_all` string or when +// no indices have been specified). +func (s *IndicesFlushService) AllowNoIndices(allowNoIndices bool) *IndicesFlushService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards specifies whether to expand wildcard expression to +// concrete indices that are open, closed or both.. +func (s *IndicesFlushService) ExpandWildcards(expandWildcards string) *IndicesFlushService { + s.expandWildcards = expandWildcards + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesFlushService) Pretty(pretty bool) *IndicesFlushService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesFlushService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_flush", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else { + path = "/_flush" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.force != nil { + params.Set("force", fmt.Sprintf("%v", *s.force)) + } + if s.waitIfOngoing != nil { + params.Set("wait_if_ongoing", fmt.Sprintf("%v", *s.waitIfOngoing)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesFlushService) Validate() error { + return nil +} + +// Do executes the service. +func (s *IndicesFlushService) Do() (*IndicesFlushResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesFlushResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a flush request. + +type IndicesFlushResponse struct { + Shards shardsInfo `json:"_shards"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_flush_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_flush_test.go new file mode 100644 index 000000000..4e30a000b --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_flush_test.go @@ -0,0 +1,69 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestFlush(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Flush all indices + res, err := client.Flush().Do() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Errorf("expected res to be != nil; got: %v", res) + } +} + +func TestFlushBuildURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Expected string + ExpectValidateFailure bool + }{ + { + []string{}, + "/_flush", + false, + }, + { + []string{"index1"}, + "/index1/_flush", + false, + }, + { + []string{"index1", "index2"}, + "/index1%2Cindex2/_flush", + false, + }, + } + + for i, test := range tests { + err := NewIndicesFlushService(client).Index(test.Indices...).Validate() + if err == nil && test.ExpectValidateFailure { + t.Errorf("case #%d: expected validate to fail", i+1) + continue + } + if err != nil && !test.ExpectValidateFailure { + t.Errorf("case #%d: expected validate to succeed", i+1) + continue + } + if !test.ExpectValidateFailure { + path, _, err := NewIndicesFlushService(client).Index(test.Indices...).buildURL() + if err != nil { + t.Fatalf("case #%d: %v", i+1, err) + } + if path != test.Expected { + t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + } + } + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_forcemerge.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_forcemerge.go new file mode 100644 index 000000000..6ca7b5b8f --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_forcemerge.go @@ -0,0 +1,200 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesForcemergeService allows to force merging of one or more indices. +// The merge relates to the number of segments a Lucene index holds +// within each shard. The force merge operation allows to reduce the number +// of segments by merging them. +// +// See http://www.elastic.co/guide/en/elasticsearch/reference/2.1/indices-forcemerge.html +// for more information. +type IndicesForcemergeService struct { + client *Client + pretty bool + index []string + allowNoIndices *bool + expandWildcards string + flush *bool + ignoreUnavailable *bool + maxNumSegments interface{} + onlyExpungeDeletes *bool + operationThreading interface{} + waitForMerge *bool +} + +// NewIndicesForcemergeService creates a new IndicesForcemergeService. +func NewIndicesForcemergeService(client *Client) *IndicesForcemergeService { + return &IndicesForcemergeService{ + client: client, + index: make([]string, 0), + } +} + +// Index is a list of index names; use `_all` or empty string to perform +// the operation on all indices. +func (s *IndicesForcemergeService) Index(index ...string) *IndicesForcemergeService { + if s.index == nil { + s.index = make([]string, 0) + } + s.index = append(s.index, index...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *IndicesForcemergeService) AllowNoIndices(allowNoIndices bool) *IndicesForcemergeService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both.. +func (s *IndicesForcemergeService) ExpandWildcards(expandWildcards string) *IndicesForcemergeService { + s.expandWildcards = expandWildcards + return s +} + +// Flush specifies whether the index should be flushed after performing +// the operation (default: true). +func (s *IndicesForcemergeService) Flush(flush bool) *IndicesForcemergeService { + s.flush = &flush + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should +// be ignored when unavailable (missing or closed). +func (s *IndicesForcemergeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesForcemergeService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// MaxNumSegments specifies the number of segments the index should be +// merged into (default: dynamic). +func (s *IndicesForcemergeService) MaxNumSegments(maxNumSegments interface{}) *IndicesForcemergeService { + s.maxNumSegments = maxNumSegments + return s +} + +// OnlyExpungeDeletes specifies whether the operation should only expunge +// deleted documents. +func (s *IndicesForcemergeService) OnlyExpungeDeletes(onlyExpungeDeletes bool) *IndicesForcemergeService { + s.onlyExpungeDeletes = &onlyExpungeDeletes + return s +} + +func (s *IndicesForcemergeService) OperationThreading(operationThreading interface{}) *IndicesForcemergeService { + s.operationThreading = operationThreading + return s +} + +// WaitForMerge specifies whether the request should block until the +// merge process is finished (default: true). +func (s *IndicesForcemergeService) WaitForMerge(waitForMerge bool) *IndicesForcemergeService { + s.waitForMerge = &waitForMerge + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesForcemergeService) Pretty(pretty bool) *IndicesForcemergeService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesForcemergeService) buildURL() (string, url.Values, error) { + var err error + var path string + + // Build URL + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_forcemerge", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else { + path = "/_forcemerge" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.flush != nil { + params.Set("flush", fmt.Sprintf("%v", *s.flush)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.maxNumSegments != nil { + params.Set("max_num_segments", fmt.Sprintf("%v", s.maxNumSegments)) + } + if s.onlyExpungeDeletes != nil { + params.Set("only_expunge_deletes", fmt.Sprintf("%v", *s.onlyExpungeDeletes)) + } + if s.operationThreading != nil { + params.Set("operation_threading", fmt.Sprintf("%v", s.operationThreading)) + } + if s.waitForMerge != nil { + params.Set("wait_for_merge", fmt.Sprintf("%v", *s.waitForMerge)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesForcemergeService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesForcemergeService) Do() (*IndicesForcemergeResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesForcemergeResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesForcemergeResponse is the response of IndicesForcemergeService.Do. +type IndicesForcemergeResponse struct { + Shards shardsInfo `json:"_shards"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_forcemerge_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_forcemerge_test.go new file mode 100644 index 000000000..c620654cc --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_forcemerge_test.go @@ -0,0 +1,56 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestIndicesForcemergeBuildURL(t *testing.T) { + client := setupTestClient(t) + + tests := []struct { + Indices []string + Expected string + }{ + { + []string{}, + "/_forcemerge", + }, + { + []string{"index1"}, + "/index1/_forcemerge", + }, + { + []string{"index1", "index2"}, + "/index1%2Cindex2/_forcemerge", + }, + } + + for i, test := range tests { + path, _, err := client.Forcemerge().Index(test.Indices...).buildURL() + if err != nil { + t.Errorf("case #%d: %v", i+1, err) + continue + } + if path != test.Expected { + t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + } + } +} + +func TestIndicesForcemerge(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) + + _, err := client.Forcemerge(testIndexName).MaxNumSegments(1).WaitForMerge(true).Do() + if err != nil { + t.Fatal(err) + } + /* + if !ok { + t.Fatalf("expected forcemerge to succeed; got: %v", ok) + } + */ +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get.go new file mode 100644 index 000000000..355184394 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get.go @@ -0,0 +1,202 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesGetService retrieves information about one or more indices. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-index.html +// for more details. +type IndicesGetService struct { + client *Client + pretty bool + index []string + feature []string + local *bool + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string + flatSettings *bool + human *bool +} + +// NewIndicesGetService creates a new IndicesGetService. +func NewIndicesGetService(client *Client) *IndicesGetService { + return &IndicesGetService{ + client: client, + index: make([]string, 0), + feature: make([]string, 0), + } +} + +// Index is a list of index names. +func (s *IndicesGetService) Index(indices ...string) *IndicesGetService { + s.index = append(s.index, indices...) + return s +} + +// Feature is a list of features. +func (s *IndicesGetService) Feature(features ...string) *IndicesGetService { + s.feature = append(s.feature, features...) + return s +} + +// Local indicates whether to return local information, i.e. do not retrieve +// the state from master node (default: false). +func (s *IndicesGetService) Local(local bool) *IndicesGetService { + s.local = &local + return s +} + +// IgnoreUnavailable indicates whether to ignore unavailable indexes (default: false). +func (s *IndicesGetService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard expression +// resolves to no concrete indices (default: false). +func (s *IndicesGetService) AllowNoIndices(allowNoIndices bool) *IndicesGetService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether wildcard expressions should get +// expanded to open or closed indices (default: open). +func (s *IndicesGetService) ExpandWildcards(expandWildcards string) *IndicesGetService { + s.expandWildcards = expandWildcards + return s +} + +/* Disabled because serialization would fail in that case. */ +/* +// FlatSettings make the service return settings in flat format (default: false). +func (s *IndicesGetService) FlatSettings(flatSettings bool) *IndicesGetService { + s.flatSettings = &flatSettings + return s +} +*/ + +// Human indicates whether to return version and creation date values +// in human-readable format (default: false). +func (s *IndicesGetService) Human(human bool) *IndicesGetService { + s.human = &human + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesGetService) Pretty(pretty bool) *IndicesGetService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesGetService) buildURL() (string, url.Values, error) { + var err error + var path string + var index []string + + if len(s.index) > 0 { + index = s.index + } else { + index = []string{"_all"} + } + + if len(s.feature) > 0 { + // Build URL + path, err = uritemplates.Expand("/{index}/{feature}", map[string]string{ + "index": strings.Join(index, ","), + "feature": strings.Join(s.feature, ","), + }) + } else { + // Build URL + path, err = uritemplates.Expand("/{index}", map[string]string{ + "index": strings.Join(index, ","), + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.human != nil { + params.Set("human", fmt.Sprintf("%v", *s.human)) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesGetService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesGetService) Do() (map[string]*IndicesGetResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + var ret map[string]*IndicesGetResponse + if err := json.Unmarshal(res.Body, &ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesGetResponse is part of the response of IndicesGetService.Do. +type IndicesGetResponse struct { + Aliases map[string]interface{} `json:"aliases"` + Mappings map[string]interface{} `json:"mappings"` + Settings map[string]interface{} `json:"settings"` + Warmers map[string]interface{} `json:"warmers"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_aliases.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_aliases.go new file mode 100644 index 000000000..4de88c63d --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_aliases.go @@ -0,0 +1,155 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +type AliasesService struct { + client *Client + indices []string + pretty bool +} + +func NewAliasesService(client *Client) *AliasesService { + builder := &AliasesService{ + client: client, + indices: make([]string, 0), + } + return builder +} + +func (s *AliasesService) Pretty(pretty bool) *AliasesService { + s.pretty = pretty + return s +} + +func (s *AliasesService) Index(indices ...string) *AliasesService { + s.indices = append(s.indices, indices...) + return s +} + +func (s *AliasesService) Do() (*AliasesResult, error) { + var err error + + // Build url + path := "/" + + // Indices part + indexPart := make([]string, 0) + for _, index := range s.indices { + index, err = uritemplates.Expand("{index}", map[string]string{ + "index": index, + }) + if err != nil { + return nil, err + } + indexPart = append(indexPart, index) + } + path += strings.Join(indexPart, ",") + + // TODO Add types here + + // Search + path += "/_aliases" + + // Parameters + params := make(url.Values) + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + + // Get response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // { + // "indexName" : { + // "aliases" : { + // "alias1" : { }, + // "alias2" : { } + // } + // }, + // "indexName2" : { + // ... + // }, + // } + indexMap := make(map[string]interface{}) + if err := json.Unmarshal(res.Body, &indexMap); err != nil { + return nil, err + } + + // Each (indexName, _) + ret := &AliasesResult{ + Indices: make(map[string]indexResult), + } + for indexName, indexData := range indexMap { + indexOut, found := ret.Indices[indexName] + if !found { + indexOut = indexResult{Aliases: make([]aliasResult, 0)} + } + + // { "aliases" : { ... } } + indexDataMap, ok := indexData.(map[string]interface{}) + if ok { + aliasesData, ok := indexDataMap["aliases"].(map[string]interface{}) + if ok { + for aliasName, _ := range aliasesData { + aliasRes := aliasResult{AliasName: aliasName} + indexOut.Aliases = append(indexOut.Aliases, aliasRes) + } + } + } + + ret.Indices[indexName] = indexOut + } + + return ret, nil +} + +// -- Result of an alias request. + +type AliasesResult struct { + Indices map[string]indexResult +} + +type indexResult struct { + Aliases []aliasResult +} + +type aliasResult struct { + AliasName string +} + +func (ar AliasesResult) IndicesByAlias(aliasName string) []string { + indices := make([]string, 0) + + for indexName, indexInfo := range ar.Indices { + for _, aliasInfo := range indexInfo.Aliases { + if aliasInfo.AliasName == aliasName { + indices = append(indices, indexName) + } + } + } + + return indices +} + +func (ir indexResult) HasAlias(aliasName string) bool { + for _, alias := range ir.Aliases { + if alias.AliasName == aliasName { + return true + } + } + return false +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_aliases_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_aliases_test.go new file mode 100644 index 000000000..6094f426e --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_aliases_test.go @@ -0,0 +1,146 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestAliases(t *testing.T) { + var err error + + client := setupTestClientAndCreateIndex(t) + + // Some tweets + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "sandrae", Message: "Cycling is fun."} + tweet3 := tweet{User: "olivere", Message: "Another unrelated topic."} + + // Add tweets to first index + _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + // Add tweets to second index + _, err = client.Index().Index(testIndexName2).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + // Flush + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Flush().Index(testIndexName2).Do() + if err != nil { + t.Fatal(err) + } + + // Alias should not yet exist + aliasesResult1, err := client.Aliases(). + Index(testIndexName, testIndexName2). + //Pretty(true). + Do() + if err != nil { + t.Fatal(err) + } + if len(aliasesResult1.Indices) != 2 { + t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult1.Indices)) + } + for indexName, indexDetails := range aliasesResult1.Indices { + if len(indexDetails.Aliases) != 0 { + t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 0, len(indexDetails.Aliases)) + } + } + + // Add both indices to a new alias + aliasCreate, err := client.Alias(). + Add(testIndexName, testAliasName). + Add(testIndexName2, testAliasName). + //Pretty(true). + Do() + if err != nil { + t.Fatal(err) + } + if !aliasCreate.Acknowledged { + t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasCreate.Acknowledged) + } + + // Alias should now exist + aliasesResult2, err := client.Aliases(). + Index(testIndexName, testIndexName2). + //Pretty(true). + Do() + if err != nil { + t.Fatal(err) + } + if len(aliasesResult2.Indices) != 2 { + t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult2.Indices)) + } + for indexName, indexDetails := range aliasesResult2.Indices { + if len(indexDetails.Aliases) != 1 { + t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 1, len(indexDetails.Aliases)) + } + } + + // Check the reverse function: + indexInfo1, found := aliasesResult2.Indices[testIndexName] + if !found { + t.Errorf("expected info about index %s = %v; got %v", testIndexName, true, found) + } + aliasFound := indexInfo1.HasAlias(testAliasName) + if !aliasFound { + t.Errorf("expected alias %s to include index %s; got %v", testAliasName, testIndexName, aliasFound) + } + + // Check the reverse function: + indexInfo2, found := aliasesResult2.Indices[testIndexName2] + if !found { + t.Errorf("expected info about index %s = %v; got %v", testIndexName, true, found) + } + aliasFound = indexInfo2.HasAlias(testAliasName) + if !aliasFound { + t.Errorf("expected alias %s to include index %s; got %v", testAliasName, testIndexName2, aliasFound) + } + + // Remove first index should remove two tweets, so should only yield 1 + aliasRemove1, err := client.Alias(). + Remove(testIndexName, testAliasName). + //Pretty(true). + Do() + if err != nil { + t.Fatal(err) + } + if !aliasRemove1.Acknowledged { + t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasRemove1.Acknowledged) + } + + // Alias should now exist only for index 2 + aliasesResult3, err := client.Aliases().Index(testIndexName, testIndexName2).Do() + if err != nil { + t.Fatal(err) + } + if len(aliasesResult3.Indices) != 2 { + t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult3.Indices)) + } + for indexName, indexDetails := range aliasesResult3.Indices { + if indexName == testIndexName { + if len(indexDetails.Aliases) != 0 { + t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 0, len(indexDetails.Aliases)) + } + } else if indexName == testIndexName2 { + if len(indexDetails.Aliases) != 1 { + t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 1, len(indexDetails.Aliases)) + } + } else { + t.Errorf("got index %s", indexName) + } + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_mapping.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_mapping.go new file mode 100644 index 000000000..5526cfcb8 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_mapping.go @@ -0,0 +1,170 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesGetMappingService retrieves the mapping definitions for an index or +// index/type. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-mapping.html +// for details. +type IndicesGetMappingService struct { + client *Client + pretty bool + index []string + typ []string + local *bool + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string +} + +// NewGetMappingService is an alias for NewIndicesGetMappingService. +// Use NewIndicesGetMappingService. +func NewGetMappingService(client *Client) *IndicesGetMappingService { + return NewIndicesGetMappingService(client) +} + +// NewIndicesGetMappingService creates a new IndicesGetMappingService. +func NewIndicesGetMappingService(client *Client) *IndicesGetMappingService { + return &IndicesGetMappingService{ + client: client, + index: make([]string, 0), + typ: make([]string, 0), + } +} + +// Index is a list of index names. +func (s *IndicesGetMappingService) Index(indices ...string) *IndicesGetMappingService { + s.index = append(s.index, indices...) + return s +} + +// Type is a list of document types. +func (s *IndicesGetMappingService) Type(types ...string) *IndicesGetMappingService { + s.typ = append(s.typ, types...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// This includes `_all` string or when no indices have been specified. +func (s *IndicesGetMappingService) AllowNoIndices(allowNoIndices bool) *IndicesGetMappingService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both.. +func (s *IndicesGetMappingService) ExpandWildcards(expandWildcards string) *IndicesGetMappingService { + s.expandWildcards = expandWildcards + return s +} + +// Local indicates whether to return local information, do not retrieve +// the state from master node (default: false). +func (s *IndicesGetMappingService) Local(local bool) *IndicesGetMappingService { + s.local = &local + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesGetMappingService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetMappingService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesGetMappingService) Pretty(pretty bool) *IndicesGetMappingService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesGetMappingService) buildURL() (string, url.Values, error) { + var index, typ []string + + if len(s.index) > 0 { + index = s.index + } else { + index = []string{"_all"} + } + + if len(s.typ) > 0 { + typ = s.typ + } else { + typ = []string{"_all"} + } + + // Build URL + path, err := uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{ + "index": strings.Join(index, ","), + "type": strings.Join(typ, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesGetMappingService) Validate() error { + return nil +} + +// Do executes the operation. It returns mapping definitions for an index +// or index/type. +func (s *IndicesGetMappingService) Do() (map[string]interface{}, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + var ret map[string]interface{} + if err := json.Unmarshal(res.Body, &ret); err != nil { + return nil, err + } + return ret, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_mapping_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_mapping_test.go new file mode 100644 index 000000000..ccfa27fed --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_mapping_test.go @@ -0,0 +1,50 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestIndicesGetMappingURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Types []string + Expected string + }{ + { + []string{}, + []string{}, + "/_all/_mapping/_all", + }, + { + []string{}, + []string{"tweet"}, + "/_all/_mapping/tweet", + }, + { + []string{"twitter"}, + []string{"tweet"}, + "/twitter/_mapping/tweet", + }, + { + []string{"store-1", "store-2"}, + []string{"tweet", "user"}, + "/store-1%2Cstore-2/_mapping/tweet%2Cuser", + }, + } + + for _, test := range tests { + path, _, err := client.GetMapping().Index(test.Indices...).Type(test.Types...).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_settings.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_settings.go new file mode 100644 index 000000000..4820cb656 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_settings.go @@ -0,0 +1,183 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesGetSettingsService allows to retrieve settings of one +// or more indices. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-settings.html +// for more details. +type IndicesGetSettingsService struct { + client *Client + pretty bool + index []string + name []string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string + flatSettings *bool + local *bool +} + +// NewIndicesGetSettingsService creates a new IndicesGetSettingsService. +func NewIndicesGetSettingsService(client *Client) *IndicesGetSettingsService { + return &IndicesGetSettingsService{ + client: client, + index: make([]string, 0), + name: make([]string, 0), + } +} + +// Index is a list of index names; use `_all` or empty string to perform +// the operation on all indices. +func (s *IndicesGetSettingsService) Index(indices ...string) *IndicesGetSettingsService { + s.index = append(s.index, indices...) + return s +} + +// Name are the names of the settings that should be included. +func (s *IndicesGetSettingsService) Name(name ...string) *IndicesGetSettingsService { + s.name = append(s.name, name...) + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should +// be ignored when unavailable (missing or closed). +func (s *IndicesGetSettingsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetSettingsService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *IndicesGetSettingsService) AllowNoIndices(allowNoIndices bool) *IndicesGetSettingsService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression +// to concrete indices that are open, closed or both. +// Options: open, closed, none, all. Default: open,closed. +func (s *IndicesGetSettingsService) ExpandWildcards(expandWildcards string) *IndicesGetSettingsService { + s.expandWildcards = expandWildcards + return s +} + +// FlatSettings indicates whether to return settings in flat format (default: false). +func (s *IndicesGetSettingsService) FlatSettings(flatSettings bool) *IndicesGetSettingsService { + s.flatSettings = &flatSettings + return s +} + +// Local indicates whether to return local information, do not retrieve +// the state from master node (default: false). +func (s *IndicesGetSettingsService) Local(local bool) *IndicesGetSettingsService { + s.local = &local + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesGetSettingsService) Pretty(pretty bool) *IndicesGetSettingsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesGetSettingsService) buildURL() (string, url.Values, error) { + var err error + var path string + var index []string + + if len(s.index) > 0 { + index = s.index + } else { + index = []string{"_all"} + } + + if len(s.name) > 0 { + // Build URL + path, err = uritemplates.Expand("/{index}/_settings/{name}", map[string]string{ + "index": strings.Join(index, ","), + "name": strings.Join(s.name, ","), + }) + } else { + // Build URL + path, err = uritemplates.Expand("/{index}/_settings", map[string]string{ + "index": strings.Join(index, ","), + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesGetSettingsService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesGetSettingsService) Do() (map[string]*IndicesGetSettingsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + var ret map[string]*IndicesGetSettingsResponse + if err := json.Unmarshal(res.Body, &ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesGetSettingsResponse is the response of IndicesGetSettingsService.Do. +type IndicesGetSettingsResponse struct { + Settings map[string]interface{} `json:"settings"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_settings_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_settings_test.go new file mode 100644 index 000000000..f53512d53 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_settings_test.go @@ -0,0 +1,81 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestIndexGetSettingsURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Names []string + Expected string + }{ + { + []string{}, + []string{}, + "/_all/_settings", + }, + { + []string{}, + []string{"index.merge.*"}, + "/_all/_settings/index.merge.%2A", + }, + { + []string{"twitter-*"}, + []string{"index.merge.*", "_settings"}, + "/twitter-%2A/_settings/index.merge.%2A%2C_settings", + }, + { + []string{"store-1", "store-2"}, + []string{"index.merge.*", "_settings"}, + "/store-1%2Cstore-2/_settings/index.merge.%2A%2C_settings", + }, + } + + for _, test := range tests { + path, _, err := client.IndexGetSettings().Index(test.Indices...).Name(test.Names...).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} + +func TestIndexGetSettingsService(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + if esversion < "1.4.0" { + t.Skip("Index Get API is available since 1.4") + return + } + + res, err := client.IndexGetSettings().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatalf("expected result; got: %v", res) + } + info, found := res[testIndexName] + if !found { + t.Fatalf("expected index %q to be found; got: %v", testIndexName, found) + } + if info == nil { + t.Fatalf("expected index %q to be != nil; got: %v", testIndexName, info) + } + if info.Settings == nil { + t.Fatalf("expected index settings of %q to be != nil; got: %v", testIndexName, info.Settings) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_template.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_template.go new file mode 100644 index 000000000..b0e66d3f9 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_template.go @@ -0,0 +1,128 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesGetTemplateService returns an index template. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html. +type IndicesGetTemplateService struct { + client *Client + pretty bool + name []string + flatSettings *bool + local *bool +} + +// NewIndicesGetTemplateService creates a new IndicesGetTemplateService. +func NewIndicesGetTemplateService(client *Client) *IndicesGetTemplateService { + return &IndicesGetTemplateService{ + client: client, + name: make([]string, 0), + } +} + +// Name is the name of the index template. +func (s *IndicesGetTemplateService) Name(name ...string) *IndicesGetTemplateService { + s.name = append(s.name, name...) + return s +} + +// FlatSettings is returns settings in flat format (default: false). +func (s *IndicesGetTemplateService) FlatSettings(flatSettings bool) *IndicesGetTemplateService { + s.flatSettings = &flatSettings + return s +} + +// Local indicates whether to return local information, i.e. do not retrieve +// the state from master node (default: false). +func (s *IndicesGetTemplateService) Local(local bool) *IndicesGetTemplateService { + s.local = &local + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesGetTemplateService) Pretty(pretty bool) *IndicesGetTemplateService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesGetTemplateService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if len(s.name) > 0 { + path, err = uritemplates.Expand("/_template/{name}", map[string]string{ + "name": strings.Join(s.name, ","), + }) + } else { + path = "/_template" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesGetTemplateService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesGetTemplateService) Do() (map[string]*IndicesGetTemplateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + var ret map[string]*IndicesGetTemplateResponse + if err := json.Unmarshal(res.Body, &ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesGetTemplateResponse is the response of IndicesGetTemplateService.Do. +type IndicesGetTemplateResponse struct { + Order int `json:"order,omitempty"` + Template string `json:"template,omitempty"` + Settings map[string]interface{} `json:"settings,omitempty"` + Mappings map[string]interface{} `json:"mappings,omitempty"` + Aliases map[string]interface{} `json:"aliases,omitempty"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_template_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_template_test.go new file mode 100644 index 000000000..693cde5ea --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_template_test.go @@ -0,0 +1,41 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestIndexGetTemplateURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Names []string + Expected string + }{ + { + []string{}, + "/_template", + }, + { + []string{"index1"}, + "/_template/index1", + }, + { + []string{"index1", "index2"}, + "/_template/index1%2Cindex2", + }, + } + + for _, test := range tests { + path, _, err := client.IndexGetTemplate().Name(test.Names...).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_test.go new file mode 100644 index 000000000..fcdee54db --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_test.go @@ -0,0 +1,97 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestIndicesGetValidate(t *testing.T) { + client := setupTestClient(t) + + // No index name -> fail with error + res, err := NewIndicesGetService(client).Index("").Do() + if err == nil { + t.Fatalf("expected IndicesGet to fail without index name") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } +} + +func TestIndicesGetURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Features []string + Expected string + }{ + { + []string{}, + []string{}, + "/_all", + }, + { + []string{}, + []string{"_mappings"}, + "/_all/_mappings", + }, + { + []string{"twitter"}, + []string{"_mappings", "_settings"}, + "/twitter/_mappings%2C_settings", + }, + { + []string{"store-1", "store-2"}, + []string{"_mappings", "_settings"}, + "/store-1%2Cstore-2/_mappings%2C_settings", + }, + } + + for _, test := range tests { + path, _, err := NewIndicesGetService(client).Index(test.Indices...).Feature(test.Features...).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} + +func TestIndicesGetService(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + if esversion < "1.4.0" { + t.Skip("Index Get API is available since 1.4") + return + } + + res, err := client.IndexGet().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatalf("expected result; got: %v", res) + } + info, found := res[testIndexName] + if !found { + t.Fatalf("expected index %q to be found; got: %v", testIndexName, found) + } + if info == nil { + t.Fatalf("expected index %q to be != nil; got: %v", testIndexName, info) + } + if info.Mappings == nil { + t.Errorf("expected mappings to be != nil; got: %v", info.Mappings) + } + if info.Settings == nil { + t.Errorf("expected settings to be != nil; got: %v", info.Settings) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_warmer.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_warmer.go new file mode 100644 index 000000000..29bc6cbfd --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_warmer.go @@ -0,0 +1,194 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesGetWarmerService allows to get the definition of a warmer for a +// specific index (or alias, or several indices) based on its name. +// The provided name can be a simple wildcard expression or omitted to get +// all warmers. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-warmers.html +// for more information. +type IndicesGetWarmerService struct { + client *Client + pretty bool + index []string + name []string + typ []string + allowNoIndices *bool + expandWildcards string + ignoreUnavailable *bool + local *bool +} + +// NewIndicesGetWarmerService creates a new IndicesGetWarmerService. +func NewIndicesGetWarmerService(client *Client) *IndicesGetWarmerService { + return &IndicesGetWarmerService{ + client: client, + typ: make([]string, 0), + index: make([]string, 0), + name: make([]string, 0), + } +} + +// Index is a list of index names to restrict the operation; use `_all` to perform the operation on all indices. +func (s *IndicesGetWarmerService) Index(indices ...string) *IndicesGetWarmerService { + s.index = append(s.index, indices...) + return s +} + +// Name is the name of the warmer (supports wildcards); leave empty to get all warmers. +func (s *IndicesGetWarmerService) Name(name ...string) *IndicesGetWarmerService { + s.name = append(s.name, name...) + return s +} + +// Type is a list of type names the mapping should be added to +// (supports wildcards); use `_all` or omit to add the mapping on all types. +func (s *IndicesGetWarmerService) Type(typ ...string) *IndicesGetWarmerService { + s.typ = append(s.typ, typ...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// This includes `_all` string or when no indices have been specified. +func (s *IndicesGetWarmerService) AllowNoIndices(allowNoIndices bool) *IndicesGetWarmerService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesGetWarmerService) ExpandWildcards(expandWildcards string) *IndicesGetWarmerService { + s.expandWildcards = expandWildcards + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesGetWarmerService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetWarmerService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Local indicates wether or not to return local information, +// do not retrieve the state from master node (default: false). +func (s *IndicesGetWarmerService) Local(local bool) *IndicesGetWarmerService { + s.local = &local + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesGetWarmerService) Pretty(pretty bool) *IndicesGetWarmerService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesGetWarmerService) buildURL() (string, url.Values, error) { + var err error + var path string + + if len(s.index) == 0 && len(s.typ) == 0 && len(s.name) == 0 { + path = "/_warmer" + } else if len(s.index) == 0 && len(s.typ) == 0 && len(s.name) > 0 { + path, err = uritemplates.Expand("/_warmer/{name}", map[string]string{ + "name": strings.Join(s.name, ","), + }) + } else if len(s.index) == 0 && len(s.typ) > 0 && len(s.name) == 0 { + path, err = uritemplates.Expand("/_all/{type}/_warmer", map[string]string{ + "type": strings.Join(s.typ, ","), + }) + } else if len(s.index) == 0 && len(s.typ) > 0 && len(s.name) > 0 { + path, err = uritemplates.Expand("/_all/{type}/_warmer/{name}", map[string]string{ + "type": strings.Join(s.typ, ","), + "name": strings.Join(s.name, ","), + }) + } else if len(s.index) > 0 && len(s.typ) == 0 && len(s.name) == 0 { + path, err = uritemplates.Expand("/{index}/_warmer", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else if len(s.index) > 0 && len(s.typ) == 0 && len(s.name) > 0 { + path, err = uritemplates.Expand("/{index}/_warmer/{name}", map[string]string{ + "index": strings.Join(s.index, ","), + "name": strings.Join(s.name, ","), + }) + } else if len(s.index) > 0 && len(s.typ) > 0 && len(s.name) == 0 { + path, err = uritemplates.Expand("/{index}/{type}/_warmer", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + }) + } else if len(s.index) > 0 && len(s.typ) > 0 && len(s.name) > 0 { + path, err = uritemplates.Expand("/{index}/{type}/_warmer/{name}", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + "name": strings.Join(s.name, ","), + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesGetWarmerService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesGetWarmerService) Do() (map[string]interface{}, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + var ret map[string]interface{} + if err := json.Unmarshal(res.Body, &ret); err != nil { + return nil, err + } + return ret, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_warmer_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_warmer_test.go new file mode 100644 index 000000000..ea01a628e --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_get_warmer_test.go @@ -0,0 +1,83 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestGetWarmerBuildURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Types []string + Names []string + Expected string + }{ + { + []string{}, + []string{}, + []string{}, + "/_warmer", + }, + { + []string{}, + []string{}, + []string{"warmer_1"}, + "/_warmer/warmer_1", + }, + { + []string{}, + []string{"tweet"}, + []string{}, + "/_all/tweet/_warmer", + }, + { + []string{}, + []string{"tweet"}, + []string{"warmer_1"}, + "/_all/tweet/_warmer/warmer_1", + }, + { + []string{"test"}, + []string{}, + []string{}, + "/test/_warmer", + }, + { + []string{"test"}, + []string{}, + []string{"warmer_1"}, + "/test/_warmer/warmer_1", + }, + { + []string{"*"}, + []string{}, + []string{"warmer_1"}, + "/%2A/_warmer/warmer_1", + }, + { + []string{"test"}, + []string{"tweet"}, + []string{"warmer_1"}, + "/test/tweet/_warmer/warmer_1", + }, + { + []string{"index-1", "index-2"}, + []string{"type-1", "type-2"}, + []string{"warmer_1", "warmer_2"}, + "/index-1%2Cindex-2/type-1%2Ctype-2/_warmer/warmer_1%2Cwarmer_2", + }, + } + + for _, test := range tests { + path, _, err := client.GetWarmer().Index(test.Indices...).Type(test.Types...).Name(test.Names...).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_open.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_open.go new file mode 100644 index 000000000..85a45bb1d --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_open.go @@ -0,0 +1,157 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesOpenService opens an index. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html +// for details. +type IndicesOpenService struct { + client *Client + pretty bool + index string + timeout string + masterTimeout string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string +} + +// NewIndicesOpenService creates and initializes a new IndicesOpenService. +func NewIndicesOpenService(client *Client) *IndicesOpenService { + return &IndicesOpenService{client: client} +} + +// Index is the name of the index to open. +func (s *IndicesOpenService) Index(index string) *IndicesOpenService { + s.index = index + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesOpenService) Timeout(timeout string) *IndicesOpenService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesOpenService) MasterTimeout(masterTimeout string) *IndicesOpenService { + s.masterTimeout = masterTimeout + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should +// be ignored when unavailable (missing or closed). +func (s *IndicesOpenService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesOpenService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *IndicesOpenService) AllowNoIndices(allowNoIndices bool) *IndicesOpenService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both.. +func (s *IndicesOpenService) ExpandWildcards(expandWildcards string) *IndicesOpenService { + s.expandWildcards = expandWildcards + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesOpenService) Pretty(pretty bool) *IndicesOpenService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesOpenService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/_open", map[string]string{ + "index": s.index, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesOpenService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesOpenService) Do() (*IndicesOpenResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesOpenResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesOpenResponse is the response of IndicesOpenService.Do. +type IndicesOpenResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_open_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_open_test.go new file mode 100644 index 000000000..352bb479b --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_open_test.go @@ -0,0 +1,20 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestIndicesOpenValidate(t *testing.T) { + client := setupTestClient(t) + + // No index name -> fail with error + res, err := NewIndicesOpenService(client).Do() + if err == nil { + t.Fatalf("expected IndicesOpen to fail without index name") + } + if res != nil { + t.Fatalf("expected result to be == nil; got: %v", res) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_alias.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_alias.go new file mode 100644 index 000000000..d8515036b --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_alias.go @@ -0,0 +1,111 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" +) + +type AliasService struct { + client *Client + actions []aliasAction + pretty bool +} + +type aliasAction struct { + // "add" or "remove" + Type string + // Index name + Index string + // Alias name + Alias string + // Filter + Filter Query +} + +func NewAliasService(client *Client) *AliasService { + builder := &AliasService{ + client: client, + actions: make([]aliasAction, 0), + } + return builder +} + +func (s *AliasService) Pretty(pretty bool) *AliasService { + s.pretty = pretty + return s +} + +func (s *AliasService) Add(indexName string, aliasName string) *AliasService { + action := aliasAction{Type: "add", Index: indexName, Alias: aliasName} + s.actions = append(s.actions, action) + return s +} + +func (s *AliasService) AddWithFilter(indexName string, aliasName string, filter Query) *AliasService { + action := aliasAction{Type: "add", Index: indexName, Alias: aliasName, Filter: filter} + s.actions = append(s.actions, action) + return s +} + +func (s *AliasService) Remove(indexName string, aliasName string) *AliasService { + action := aliasAction{Type: "remove", Index: indexName, Alias: aliasName} + s.actions = append(s.actions, action) + return s +} + +func (s *AliasService) Do() (*AliasResult, error) { + // Build url + path := "/_aliases" + + // Parameters + params := make(url.Values) + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + + // Actions + body := make(map[string]interface{}) + actionsJson := make([]interface{}, 0) + + for _, action := range s.actions { + actionJson := make(map[string]interface{}) + detailsJson := make(map[string]interface{}) + detailsJson["index"] = action.Index + detailsJson["alias"] = action.Alias + if action.Filter != nil { + src, err := action.Filter.Source() + if err != nil { + return nil, err + } + detailsJson["filter"] = src + } + actionJson[action.Type] = detailsJson + actionsJson = append(actionsJson, actionJson) + } + + body["actions"] = actionsJson + + // Get response + res, err := s.client.PerformRequest("POST", path, params, body) + if err != nil { + return nil, err + } + + // Return results + ret := new(AliasResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of an alias request. + +type AliasResult struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_alias_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_alias_test.go new file mode 100644 index 000000000..3e4e797b0 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_alias_test.go @@ -0,0 +1,123 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +const ( + testAliasName = "elastic-test-alias" +) + +func TestAliasLifecycle(t *testing.T) { + var err error + + client := setupTestClientAndCreateIndex(t) + + // Some tweets + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "sandrae", Message: "Cycling is fun."} + tweet3 := tweet{User: "olivere", Message: "Another unrelated topic."} + + // Add tweets to first index + _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + // Add tweets to second index + _, err = client.Index().Index(testIndexName2).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + // Flush + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Flush().Index(testIndexName2).Do() + if err != nil { + t.Fatal(err) + } + + /* + // Alias should not yet exist + aliasesResult1, err := client.Aliases().Do() + if err != nil { + t.Fatal(err) + } + if len(aliasesResult1.Indices) != 0 { + t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 0, len(aliasesResult1.Indices)) + } + */ + + // Add both indices to a new alias + aliasCreate, err := client.Alias(). + Add(testIndexName, testAliasName). + Add(testIndexName2, testAliasName). + //Pretty(true). + Do() + if err != nil { + t.Fatal(err) + } + if !aliasCreate.Acknowledged { + t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasCreate.Acknowledged) + } + + // Search should return all 3 tweets + matchAll := NewMatchAllQuery() + searchResult1, err := client.Search().Index(testAliasName).Query(matchAll).Do() + if err != nil { + t.Fatal(err) + } + if searchResult1.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult1.Hits.TotalHits != 3 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult1.Hits.TotalHits) + } + + /* + // Alias should return both indices + aliasesResult2, err := client.Aliases().Do() + if err != nil { + t.Fatal(err) + } + if len(aliasesResult2.Indices) != 2 { + t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult2.Indices)) + } + */ + + // Remove first index should remove two tweets, so should only yield 1 + aliasRemove1, err := client.Alias(). + Remove(testIndexName, testAliasName). + //Pretty(true). + Do() + if err != nil { + t.Fatal(err) + } + if !aliasRemove1.Acknowledged { + t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasRemove1.Acknowledged) + } + + searchResult2, err := client.Search().Index(testAliasName).Query(matchAll).Do() + if err != nil { + t.Fatal(err) + } + if searchResult2.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult2.Hits.TotalHits != 1 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult2.Hits.TotalHits) + } + +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_mapping.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_mapping.go new file mode 100644 index 000000000..5a23165b0 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_mapping.go @@ -0,0 +1,221 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesPutMappingService allows to register specific mapping definition +// for a specific type. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-mapping.html +// for details. +type IndicesPutMappingService struct { + client *Client + pretty bool + typ string + index []string + masterTimeout string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string + ignoreConflicts *bool + timeout string + bodyJson map[string]interface{} + bodyString string +} + +// NewPutMappingService is an alias for NewIndicesPutMappingService. +// Use NewIndicesPutMappingService. +func NewPutMappingService(client *Client) *IndicesPutMappingService { + return NewIndicesPutMappingService(client) +} + +// NewIndicesPutMappingService creates a new IndicesPutMappingService. +func NewIndicesPutMappingService(client *Client) *IndicesPutMappingService { + return &IndicesPutMappingService{ + client: client, + index: make([]string, 0), + } +} + +// Index is a list of index names the mapping should be added to +// (supports wildcards); use `_all` or omit to add the mapping on all indices. +func (s *IndicesPutMappingService) Index(indices ...string) *IndicesPutMappingService { + s.index = append(s.index, indices...) + return s +} + +// Type is the name of the document type. +func (s *IndicesPutMappingService) Type(typ string) *IndicesPutMappingService { + s.typ = typ + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesPutMappingService) Timeout(timeout string) *IndicesPutMappingService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesPutMappingService) MasterTimeout(masterTimeout string) *IndicesPutMappingService { + s.masterTimeout = masterTimeout + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesPutMappingService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesPutMappingService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// This includes `_all` string or when no indices have been specified. +func (s *IndicesPutMappingService) AllowNoIndices(allowNoIndices bool) *IndicesPutMappingService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesPutMappingService) ExpandWildcards(expandWildcards string) *IndicesPutMappingService { + s.expandWildcards = expandWildcards + return s +} + +// IgnoreConflicts specifies whether to ignore conflicts while updating +// the mapping (default: false). +func (s *IndicesPutMappingService) IgnoreConflicts(ignoreConflicts bool) *IndicesPutMappingService { + s.ignoreConflicts = &ignoreConflicts + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesPutMappingService) Pretty(pretty bool) *IndicesPutMappingService { + s.pretty = pretty + return s +} + +// BodyJson contains the mapping definition. +func (s *IndicesPutMappingService) BodyJson(mapping map[string]interface{}) *IndicesPutMappingService { + s.bodyJson = mapping + return s +} + +// BodyString is the mapping definition serialized as a string. +func (s *IndicesPutMappingService) BodyString(mapping string) *IndicesPutMappingService { + s.bodyString = mapping + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesPutMappingService) buildURL() (string, url.Values, error) { + var err error + var path string + + // Build URL: Typ MUST be specified and is verified in Validate. + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{ + "index": strings.Join(s.index, ","), + "type": s.typ, + }) + } else { + path, err = uritemplates.Expand("/_mapping/{type}", map[string]string{ + "type": s.typ, + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.ignoreConflicts != nil { + params.Set("ignore_conflicts", fmt.Sprintf("%v", *s.ignoreConflicts)) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesPutMappingService) Validate() error { + var invalid []string + if s.typ == "" { + invalid = append(invalid, "Type") + } + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesPutMappingService) Do() (*PutMappingResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest("PUT", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(PutMappingResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// PutMappingResponse is the response of IndicesPutMappingService.Do. +type PutMappingResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_mapping_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_mapping_test.go new file mode 100644 index 000000000..356aa2728 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_mapping_test.go @@ -0,0 +1,82 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestPutMappingURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Type string + Expected string + }{ + { + []string{}, + "tweet", + "/_mapping/tweet", + }, + { + []string{"*"}, + "tweet", + "/%2A/_mapping/tweet", + }, + { + []string{"store-1", "store-2"}, + "tweet", + "/store-1%2Cstore-2/_mapping/tweet", + }, + } + + for _, test := range tests { + path, _, err := client.PutMapping().Index(test.Indices...).Type(test.Type).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} + +func TestMappingLifecycle(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + mapping := `{ + "tweetdoc":{ + "properties":{ + "message":{ + "type":"string" + } + } + } + }` + + putresp, err := client.PutMapping().Index(testIndexName2).Type("tweetdoc").BodyString(mapping).Do() + if err != nil { + t.Fatalf("expected put mapping to succeed; got: %v", err) + } + if putresp == nil { + t.Fatalf("expected put mapping response; got: %v", putresp) + } + if !putresp.Acknowledged { + t.Fatalf("expected put mapping ack; got: %v", putresp.Acknowledged) + } + + getresp, err := client.GetMapping().Index(testIndexName2).Type("tweetdoc").Do() + if err != nil { + t.Fatalf("expected get mapping to succeed; got: %v", err) + } + if getresp == nil { + t.Fatalf("expected get mapping response; got: %v", getresp) + } + props, ok := getresp[testIndexName2] + if !ok { + t.Fatalf("expected JSON root to be of type map[string]interface{}; got: %#v", props) + } + + // NOTE There is no Delete Mapping API in Elasticsearch 2.0 +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_settings.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_settings.go new file mode 100644 index 000000000..4cdd3e1cb --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_settings.go @@ -0,0 +1,184 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesPutSettingsService changes specific index level settings in +// real time. +// +// See the documentation at +// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-update-settings.html. +type IndicesPutSettingsService struct { + client *Client + pretty bool + index []string + allowNoIndices *bool + expandWildcards string + flatSettings *bool + ignoreUnavailable *bool + masterTimeout string + bodyJson interface{} + bodyString string +} + +// NewIndicesPutSettingsService creates a new IndicesPutSettingsService. +func NewIndicesPutSettingsService(client *Client) *IndicesPutSettingsService { + return &IndicesPutSettingsService{ + client: client, + index: make([]string, 0), + } +} + +// Index is a list of index names the mapping should be added to +// (supports wildcards); use `_all` or omit to add the mapping on all indices. +func (s *IndicesPutSettingsService) Index(indices ...string) *IndicesPutSettingsService { + s.index = append(s.index, indices...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. (This includes `_all` +// string or when no indices have been specified). +func (s *IndicesPutSettingsService) AllowNoIndices(allowNoIndices bool) *IndicesPutSettingsService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards specifies whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesPutSettingsService) ExpandWildcards(expandWildcards string) *IndicesPutSettingsService { + s.expandWildcards = expandWildcards + return s +} + +// FlatSettings indicates whether to return settings in flat format (default: false). +func (s *IndicesPutSettingsService) FlatSettings(flatSettings bool) *IndicesPutSettingsService { + s.flatSettings = &flatSettings + return s +} + +// IgnoreUnavailable specifies whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesPutSettingsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesPutSettingsService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// MasterTimeout is the timeout for connection to master. +func (s *IndicesPutSettingsService) MasterTimeout(masterTimeout string) *IndicesPutSettingsService { + s.masterTimeout = masterTimeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesPutSettingsService) Pretty(pretty bool) *IndicesPutSettingsService { + s.pretty = pretty + return s +} + +// BodyJson is documented as: The index settings to be updated. +func (s *IndicesPutSettingsService) BodyJson(body interface{}) *IndicesPutSettingsService { + s.bodyJson = body + return s +} + +// BodyString is documented as: The index settings to be updated. +func (s *IndicesPutSettingsService) BodyString(body string) *IndicesPutSettingsService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesPutSettingsService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_settings", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else { + path = "/_settings" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesPutSettingsService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesPutSettingsService) Do() (*IndicesPutSettingsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest("PUT", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesPutSettingsResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesPutSettingsResponse is the response of IndicesPutSettingsService.Do. +type IndicesPutSettingsResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_settings_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_settings_test.go new file mode 100644 index 000000000..4bc86e18e --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_settings_test.go @@ -0,0 +1,92 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestIndicesPutSettingsBuildURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Expected string + }{ + { + []string{}, + "/_settings", + }, + { + []string{"*"}, + "/%2A/_settings", + }, + { + []string{"store-1", "store-2"}, + "/store-1%2Cstore-2/_settings", + }, + } + + for _, test := range tests { + path, _, err := client.IndexPutSettings().Index(test.Indices...).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} + +func TestIndicesSettingsLifecycle(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + body := `{ + "index":{ + "refresh_interval":"-1" + } + }` + + // Put settings + putres, err := client.IndexPutSettings().Index(testIndexName).BodyString(body).Do() + if err != nil { + t.Fatalf("expected put settings to succeed; got: %v", err) + } + if putres == nil { + t.Fatalf("expected put settings response; got: %v", putres) + } + if !putres.Acknowledged { + t.Fatalf("expected put settings ack; got: %v", putres.Acknowledged) + } + + // Read settings + getres, err := client.IndexGetSettings().Index(testIndexName).Do() + if err != nil { + t.Fatalf("expected get mapping to succeed; got: %v", err) + } + if getres == nil { + t.Fatalf("expected get mapping response; got: %v", getres) + } + + // Check settings + index, found := getres[testIndexName] + if !found { + t.Fatalf("expected to return settings for index %q; got: %#v", testIndexName, getres) + } + // Retrieve "index" section of the settings for index testIndexName + sectionIntf, ok := index.Settings["index"] + if !ok { + t.Fatalf("expected settings to have %q field; got: %#v", "index", getres) + } + section, ok := sectionIntf.(map[string]interface{}) + if !ok { + t.Fatalf("expected settings to be of type map[string]interface{}; got: %#v", getres) + } + refintv, ok := section["refresh_interval"] + if !ok { + t.Fatalf(`expected JSON to include "refresh_interval" field; got: %#v`, getres) + } + if got, want := refintv, "-1"; got != want { + t.Fatalf("expected refresh_interval = %v; got: %v", want, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_template.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_template.go new file mode 100644 index 000000000..72947f311 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_template.go @@ -0,0 +1,179 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesPutTemplateService creates or updates index mappings. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html. +type IndicesPutTemplateService struct { + client *Client + pretty bool + name string + order interface{} + create *bool + timeout string + masterTimeout string + flatSettings *bool + bodyJson interface{} + bodyString string +} + +// NewIndicesPutTemplateService creates a new IndicesPutTemplateService. +func NewIndicesPutTemplateService(client *Client) *IndicesPutTemplateService { + return &IndicesPutTemplateService{ + client: client, + } +} + +// Name is the name of the index template. +func (s *IndicesPutTemplateService) Name(name string) *IndicesPutTemplateService { + s.name = name + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesPutTemplateService) Timeout(timeout string) *IndicesPutTemplateService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesPutTemplateService) MasterTimeout(masterTimeout string) *IndicesPutTemplateService { + s.masterTimeout = masterTimeout + return s +} + +// FlatSettings indicates whether to return settings in flat format (default: false). +func (s *IndicesPutTemplateService) FlatSettings(flatSettings bool) *IndicesPutTemplateService { + s.flatSettings = &flatSettings + return s +} + +// Order is the order for this template when merging multiple matching ones +// (higher numbers are merged later, overriding the lower numbers). +func (s *IndicesPutTemplateService) Order(order interface{}) *IndicesPutTemplateService { + s.order = order + return s +} + +// Create indicates whether the index template should only be added if +// new or can also replace an existing one. +func (s *IndicesPutTemplateService) Create(create bool) *IndicesPutTemplateService { + s.create = &create + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesPutTemplateService) Pretty(pretty bool) *IndicesPutTemplateService { + s.pretty = pretty + return s +} + +// BodyJson is documented as: The template definition. +func (s *IndicesPutTemplateService) BodyJson(body interface{}) *IndicesPutTemplateService { + s.bodyJson = body + return s +} + +// BodyString is documented as: The template definition. +func (s *IndicesPutTemplateService) BodyString(body string) *IndicesPutTemplateService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesPutTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_template/{name}", map[string]string{ + "name": s.name, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.order != nil { + params.Set("order", fmt.Sprintf("%v", s.order)) + } + if s.create != nil { + params.Set("create", fmt.Sprintf("%v", *s.create)) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesPutTemplateService) Validate() error { + var invalid []string + if s.name == "" { + invalid = append(invalid, "Name") + } + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesPutTemplateService) Do() (*IndicesPutTemplateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest("PUT", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesPutTemplateResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesPutTemplateResponse is the response of IndicesPutTemplateService.Do. +type IndicesPutTemplateResponse struct { + Acknowledged bool `json:"acknowledged,omitempty"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_warmer.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_warmer.go new file mode 100644 index 000000000..6e1f3ae66 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_warmer.go @@ -0,0 +1,222 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesPutWarmerService allows to register a warmer. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-warmers.html. +type IndicesPutWarmerService struct { + client *Client + pretty bool + typ []string + index []string + name string + masterTimeout string + ignoreUnavailable *bool + allowNoIndices *bool + requestCache *bool + expandWildcards string + bodyJson map[string]interface{} + bodyString string +} + +// NewIndicesPutWarmerService creates a new IndicesPutWarmerService. +func NewIndicesPutWarmerService(client *Client) *IndicesPutWarmerService { + return &IndicesPutWarmerService{ + client: client, + index: make([]string, 0), + typ: make([]string, 0), + } +} + +// Index is a list of index names the mapping should be added to +// (supports wildcards); use `_all` or omit to add the mapping on all indices. +func (s *IndicesPutWarmerService) Index(indices ...string) *IndicesPutWarmerService { + s.index = append(s.index, indices...) + return s +} + +// Type is a list of type names the mapping should be added to +// (supports wildcards); use `_all` or omit to add the mapping on all types. +func (s *IndicesPutWarmerService) Type(typ ...string) *IndicesPutWarmerService { + s.typ = append(s.typ, typ...) + return s +} + +// Name specifies the name of the warmer (supports wildcards); +// leave empty to get all warmers +func (s *IndicesPutWarmerService) Name(name string) *IndicesPutWarmerService { + s.name = name + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesPutWarmerService) MasterTimeout(masterTimeout string) *IndicesPutWarmerService { + s.masterTimeout = masterTimeout + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesPutWarmerService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesPutWarmerService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// This includes `_all` string or when no indices have been specified. +func (s *IndicesPutWarmerService) AllowNoIndices(allowNoIndices bool) *IndicesPutWarmerService { + s.allowNoIndices = &allowNoIndices + return s +} + +// RequestCache specifies whether the request to be warmed should use the request cache, +// defaults to index level setting +func (s *IndicesPutWarmerService) RequestCache(requestCache bool) *IndicesPutWarmerService { + s.requestCache = &requestCache + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesPutWarmerService) ExpandWildcards(expandWildcards string) *IndicesPutWarmerService { + s.expandWildcards = expandWildcards + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesPutWarmerService) Pretty(pretty bool) *IndicesPutWarmerService { + s.pretty = pretty + return s +} + +// BodyJson contains the mapping definition. +func (s *IndicesPutWarmerService) BodyJson(mapping map[string]interface{}) *IndicesPutWarmerService { + s.bodyJson = mapping + return s +} + +// BodyString is the mapping definition serialized as a string. +func (s *IndicesPutWarmerService) BodyString(mapping string) *IndicesPutWarmerService { + s.bodyString = mapping + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesPutWarmerService) buildURL() (string, url.Values, error) { + var err error + var path string + + if len(s.index) == 0 && len(s.typ) == 0 { + path, err = uritemplates.Expand("/_warmer/{name}", map[string]string{ + "name": s.name, + }) + } else if len(s.index) == 0 && len(s.typ) > 0 { + path, err = uritemplates.Expand("/_all/{type}/_warmer/{name}", map[string]string{ + "type": strings.Join(s.typ, ","), + "name": s.name, + }) + } else if len(s.index) > 0 && len(s.typ) == 0 { + path, err = uritemplates.Expand("/{index}/_warmer/{name}", map[string]string{ + "index": strings.Join(s.index, ","), + "name": s.name, + }) + } else { + path, err = uritemplates.Expand("/{index}/{type}/_warmer/{name}", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + "name": s.name, + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.requestCache != nil { + params.Set("request_cache", fmt.Sprintf("%v", *s.requestCache)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesPutWarmerService) Validate() error { + var invalid []string + if s.name == "" { + invalid = append(invalid, "Name") + } + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesPutWarmerService) Do() (*PutWarmerResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest("PUT", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(PutWarmerResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// PutWarmerResponse is the response of IndicesPutWarmerService.Do. +type PutWarmerResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_warmer_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_warmer_test.go new file mode 100644 index 000000000..25a1f3ecb --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_put_warmer_test.go @@ -0,0 +1,100 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestPutWarmerBuildURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Types []string + Name string + Expected string + }{ + { + []string{}, + []string{}, + "warmer_1", + "/_warmer/warmer_1", + }, + { + []string{"*"}, + []string{}, + "warmer_1", + "/%2A/_warmer/warmer_1", + }, + { + []string{}, + []string{"*"}, + "warmer_1", + "/_all/%2A/_warmer/warmer_1", + }, + { + []string{"index-1", "index-2"}, + []string{"type-1", "type-2"}, + "warmer_1", + "/index-1%2Cindex-2/type-1%2Ctype-2/_warmer/warmer_1", + }, + } + + for _, test := range tests { + path, _, err := client.PutWarmer().Index(test.Indices...).Type(test.Types...).Name(test.Name).buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} + +func TestWarmerLifecycle(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + mapping := `{ + "query": { + "match_all": {} + } + }` + + // Ensure well prepared test index + client.Flush(testIndexName2).Do() + + putresp, err := client.PutWarmer().Index(testIndexName2).Type("tweet").Name("warmer_1").BodyString(mapping).Do() + if err != nil { + t.Fatalf("expected put warmer to succeed; got: %v", err) + } + if putresp == nil { + t.Fatalf("expected put warmer response; got: %v", putresp) + } + if !putresp.Acknowledged { + t.Fatalf("expected put warmer ack; got: %v", putresp.Acknowledged) + } + + getresp, err := client.GetWarmer().Index(testIndexName2).Name("warmer_1").Do() + if err != nil { + t.Fatalf("expected get warmer to succeed; got: %v", err) + } + if getresp == nil { + t.Fatalf("expected get warmer response; got: %v", getresp) + } + props, ok := getresp[testIndexName2] + if !ok { + t.Fatalf("expected JSON root to be of type map[string]interface{}; got: %#v", props) + } + + delresp, err := client.DeleteWarmer().Index(testIndexName2).Name("warmer_1").Do() + if err != nil { + t.Fatalf("expected del warmer to succeed; got: %v", err) + } + if delresp == nil { + t.Fatalf("expected del warmer response; got: %v", getresp) + } + if !delresp.Acknowledged { + t.Fatalf("expected del warmer ack; got: %v", delresp.Acknowledged) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_refresh.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_refresh.go new file mode 100644 index 000000000..392a8d393 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_refresh.go @@ -0,0 +1,94 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +type RefreshService struct { + client *Client + indices []string + force *bool + pretty bool +} + +func NewRefreshService(client *Client) *RefreshService { + builder := &RefreshService{ + client: client, + indices: make([]string, 0), + } + return builder +} + +func (s *RefreshService) Index(indices ...string) *RefreshService { + s.indices = append(s.indices, indices...) + return s +} + +func (s *RefreshService) Force(force bool) *RefreshService { + s.force = &force + return s +} + +func (s *RefreshService) Pretty(pretty bool) *RefreshService { + s.pretty = pretty + return s +} + +func (s *RefreshService) Do() (*RefreshResult, error) { + // Build url + path := "/" + + // Indices part + indexPart := make([]string, 0) + for _, index := range s.indices { + index, err := uritemplates.Expand("{index}", map[string]string{ + "index": index, + }) + if err != nil { + return nil, err + } + indexPart = append(indexPart, index) + } + if len(indexPart) > 0 { + path += strings.Join(indexPart, ",") + } + + path += "/_refresh" + + // Parameters + params := make(url.Values) + if s.force != nil { + params.Set("force", fmt.Sprintf("%v", *s.force)) + } + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + + // Get response + res, err := s.client.PerformRequest("POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return result + ret := new(RefreshResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a refresh request. + +type RefreshResult struct { + Shards shardsInfo `json:"_shards,omitempty"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_refresh_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_refresh_test.go new file mode 100644 index 000000000..885e63365 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_refresh_test.go @@ -0,0 +1,47 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestRefresh(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add some documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Refresh indices + res, err := client.Refresh(testIndexName, testIndexName2).Do() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected result; got nil") + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_stats.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_stats.go new file mode 100644 index 000000000..b9255c094 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_stats.go @@ -0,0 +1,385 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// IndicesStatsService provides stats on various metrics of one or more +// indices. See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-stats.html. +type IndicesStatsService struct { + client *Client + pretty bool + metric []string + index []string + level string + types []string + completionFields []string + fielddataFields []string + fields []string + groups []string + human *bool +} + +// NewIndicesStatsService creates a new IndicesStatsService. +func NewIndicesStatsService(client *Client) *IndicesStatsService { + return &IndicesStatsService{ + client: client, + index: make([]string, 0), + metric: make([]string, 0), + completionFields: make([]string, 0), + fielddataFields: make([]string, 0), + fields: make([]string, 0), + groups: make([]string, 0), + types: make([]string, 0), + } +} + +// Metric limits the information returned the specific metrics. Options are: +// docs, store, indexing, get, search, completion, fielddata, flush, merge, +// query_cache, refresh, suggest, and warmer. +func (s *IndicesStatsService) Metric(metric ...string) *IndicesStatsService { + s.metric = append(s.metric, metric...) + return s +} + +// Index is the list of index names; use `_all` or empty string to perform +// the operation on all indices. +func (s *IndicesStatsService) Index(indices ...string) *IndicesStatsService { + s.index = append(s.index, indices...) + return s +} + +// Type is a list of document types for the `indexing` index metric. +func (s *IndicesStatsService) Type(types ...string) *IndicesStatsService { + s.types = append(s.types, types...) + return s +} + +// Level returns stats aggregated at cluster, index or shard level. +func (s *IndicesStatsService) Level(level string) *IndicesStatsService { + s.level = level + return s +} + +// CompletionFields is a list of fields for `fielddata` and `suggest` +// index metric (supports wildcards). +func (s *IndicesStatsService) CompletionFields(completionFields ...string) *IndicesStatsService { + s.completionFields = append(s.completionFields, completionFields...) + return s +} + +// FielddataFields is a list of fields for `fielddata` index metric (supports wildcards). +func (s *IndicesStatsService) FielddataFields(fielddataFields ...string) *IndicesStatsService { + s.fielddataFields = append(s.fielddataFields, fielddataFields...) + return s +} + +// Fields is a list of fields for `fielddata` and `completion` index metric +// (supports wildcards). +func (s *IndicesStatsService) Fields(fields ...string) *IndicesStatsService { + s.fields = append(s.fields, fields...) + return s +} + +// Groups is a list of search groups for `search` index metric. +func (s *IndicesStatsService) Groups(groups ...string) *IndicesStatsService { + s.groups = append(s.groups, groups...) + return s +} + +// Human indicates whether to return time and byte values in human-readable format.. +func (s *IndicesStatsService) Human(human bool) *IndicesStatsService { + s.human = &human + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesStatsService) Pretty(pretty bool) *IndicesStatsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesStatsService) buildURL() (string, url.Values, error) { + var err error + var path string + if len(s.index) > 0 && len(s.metric) > 0 { + path, err = uritemplates.Expand("/{index}/_stats/{metric}", map[string]string{ + "index": strings.Join(s.index, ","), + "metric": strings.Join(s.metric, ","), + }) + } else if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_stats", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else if len(s.metric) > 0 { + path, err = uritemplates.Expand("/_stats/{metric}", map[string]string{ + "metric": strings.Join(s.metric, ","), + }) + } else { + path = "/_stats" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if len(s.groups) > 0 { + params.Set("groups", strings.Join(s.groups, ",")) + } + if s.human != nil { + params.Set("human", fmt.Sprintf("%v", *s.human)) + } + if s.level != "" { + params.Set("level", s.level) + } + if len(s.types) > 0 { + params.Set("types", strings.Join(s.types, ",")) + } + if len(s.completionFields) > 0 { + params.Set("completion_fields", strings.Join(s.completionFields, ",")) + } + if len(s.fielddataFields) > 0 { + params.Set("fielddata_fields", strings.Join(s.fielddataFields, ",")) + } + if len(s.fields) > 0 { + params.Set("fields", strings.Join(s.fields, ",")) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesStatsService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesStatsService) Do() (*IndicesStatsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesStatsResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesStatsResponse is the response of IndicesStatsService.Do. +type IndicesStatsResponse struct { + // Shards provides information returned from shards. + Shards shardsInfo `json:"_shards"` + + // All provides summary stats about all indices. + All *IndexStats `json:"_all,omitempty"` + + // Indices provides a map into the stats of an index. The key of the + // map is the index name. + Indices map[string]*IndexStats `json:"indices,omitempty"` +} + +// IndexStats is index stats for a specific index. +type IndexStats struct { + Primaries *IndexStatsDetails `json:"primaries,omitempty"` + Total *IndexStatsDetails `json:"total,omitempty"` +} + +type IndexStatsDetails struct { + Docs *IndexStatsDocs `json:"docs,omitempty"` + Store *IndexStatsStore `json:"store,omitempty"` + Indexing *IndexStatsIndexing `json:"indexing,omitempty"` + Get *IndexStatsGet `json:"get,omitempty"` + Search *IndexStatsSearch `json:"search,omitempty"` + Merges *IndexStatsMerges `json:"merges,omitempty"` + Refresh *IndexStatsRefresh `json:"refresh,omitempty"` + Flush *IndexStatsFlush `json:"flush,omitempty"` + Warmer *IndexStatsWarmer `json:"warmer,omitempty"` + FilterCache *IndexStatsFilterCache `json:"filter_cache,omitempty"` + IdCache *IndexStatsIdCache `json:"id_cache,omitempty"` + Fielddata *IndexStatsFielddata `json:"fielddata,omitempty"` + Percolate *IndexStatsPercolate `json:"percolate,omitempty"` + Completion *IndexStatsCompletion `json:"completion,omitempty"` + Segments *IndexStatsSegments `json:"segments,omitempty"` + Translog *IndexStatsTranslog `json:"translog,omitempty"` + Suggest *IndexStatsSuggest `json:"suggest,omitempty"` + QueryCache *IndexStatsQueryCache `json:"query_cache,omitempty"` +} + +type IndexStatsDocs struct { + Count int64 `json:"count,omitempty"` + Deleted int64 `json:"deleted,omitempty"` +} + +type IndexStatsStore struct { + Size string `json:"size,omitempty"` // human size, e.g. 119.3mb + SizeInBytes int64 `json:"size_in_bytes,omitempty"` + ThrottleTime string `json:"throttle_time,omitempty"` // human time, e.g. 0s + ThrottleTimeInMillis int64 `json:"throttle_time_in_millis,omitempty"` +} + +type IndexStatsIndexing struct { + IndexTotal int64 `json:"index_total,omitempty"` + IndexTime string `json:"index_time,omitempty"` + IndexTimeInMillis int64 `json:"index_time_in_millis,omitempty"` + IndexCurrent int64 `json:"index_current,omitempty"` + DeleteTotal int64 `json:"delete_total,omitempty"` + DeleteTime string `json:"delete_time,omitempty"` + DeleteTimeInMillis int64 `json:"delete_time_in_millis,omitempty"` + DeleteCurrent int64 `json:"delete_current,omitempty"` + NoopUpdateTotal int64 `json:"noop_update_total,omitempty"` + IsThrottled bool `json:"is_throttled,omitempty"` + ThrottleTime string `json:"throttle_time,omitempty"` + ThrottleTimeInMillis int64 `json:"throttle_time_in_millis,omitempty"` +} + +type IndexStatsGet struct { + Total int64 `json:"total,omitempty"` + GetTime string `json:"get_time,omitempty"` + TimeInMillis int64 `json:"time_in_millis,omitempty"` + ExistsTotal int64 `json:"exists_total,omitempty"` + ExistsTime string `json:"exists_time,omitempty"` + ExistsTimeInMillis int64 `json:"exists_time_in_millis,omitempty"` + MissingTotal int64 `json:"missing_total,omitempty"` + MissingTime string `json:"missing_time,omitempty"` + MissingTimeInMillis int64 `json:"missing_time_in_millis,omitempty"` + Current int64 `json:"current,omitempty"` +} + +type IndexStatsSearch struct { + OpenContexts int64 `json:"open_contexts,omitempty"` + QueryTotal int64 `json:"query_total,omitempty"` + QueryTime string `json:"query_time,omitempty"` + QueryTimeInMillis int64 `json:"query_time_in_millis,omitempty"` + QueryCurrent int64 `json:"query_current,omitempty"` + FetchTotal int64 `json:"fetch_total,omitempty"` + FetchTime string `json:"fetch_time,omitempty"` + FetchTimeInMillis int64 `json:"fetch_time_in_millis,omitempty"` + FetchCurrent int64 `json:"fetch_current,omitempty"` +} + +type IndexStatsMerges struct { + Current int64 `json:"current,omitempty"` + CurrentDocs int64 `json:"current_docs,omitempty"` + CurrentSize string `json:"current_size,omitempty"` + CurrentSizeInBytes int64 `json:"current_size_in_bytes,omitempty"` + Total int64 `json:"total,omitempty"` + TotalTime string `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` + TotalDocs int64 `json:"total_docs,omitempty"` + TotalSize string `json:"total_size,omitempty"` + TotalSizeInBytes int64 `json:"total_size_in_bytes,omitempty"` +} + +type IndexStatsRefresh struct { + Total int64 `json:"total,omitempty"` + TotalTime string `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` +} + +type IndexStatsFlush struct { + Total int64 `json:"total,omitempty"` + TotalTime string `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` +} + +type IndexStatsWarmer struct { + Current int64 `json:"current,omitempty"` + Total int64 `json:"total,omitempty"` + TotalTime string `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` +} + +type IndexStatsFilterCache struct { + MemorySize string `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` + Evictions int64 `json:"evictions,omitempty"` +} + +type IndexStatsIdCache struct { + MemorySize string `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` +} + +type IndexStatsFielddata struct { + MemorySize string `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` + Evictions int64 `json:"evictions,omitempty"` +} + +type IndexStatsPercolate struct { + Total int64 `json:"total,omitempty"` + GetTime string `json:"get_time,omitempty"` + TimeInMillis int64 `json:"time_in_millis,omitempty"` + Current int64 `json:"current,omitempty"` + MemorySize string `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` + Queries int64 `json:"queries,omitempty"` +} + +type IndexStatsCompletion struct { + Size string `json:"size,omitempty"` + SizeInBytes int64 `json:"size_in_bytes,omitempty"` +} + +type IndexStatsSegments struct { + Count int64 `json:"count,omitempty"` + Memory string `json:"memory,omitempty"` + MemoryInBytes int64 `json:"memory_in_bytes,omitempty"` + IndexWriterMemory string `json:"index_writer_memory,omitempty"` + IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes,omitempty"` + IndexWriterMaxMemory string `json:"index_writer_max_memory,omitempty"` + IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes,omitempty"` + VersionMapMemory string `json:"version_map_memory,omitempty"` + VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes,omitempty"` + FixedBitSetMemory string `json:"fixed_bit_set,omitempty"` + FixedBitSetMemoryInBytes int64 `json:"fixed_bit_set_memory_in_bytes,omitempty"` +} + +type IndexStatsTranslog struct { + Operations int64 `json:"operations,omitempty"` + Size string `json:"size,omitempty"` + SizeInBytes int64 `json:"size_in_bytes,omitempty"` +} + +type IndexStatsSuggest struct { + Total int64 `json:"total,omitempty"` + Time string `json:"time,omitempty"` + TimeInMillis int64 `json:"time_in_millis,omitempty"` + Current int64 `json:"current,omitempty"` +} + +type IndexStatsQueryCache struct { + MemorySize string `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` + Evictions int64 `json:"evictions,omitempty"` + HitCount int64 `json:"hit_count,omitempty"` + MissCount int64 `json:"miss_count,omitempty"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_stats_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_stats_test.go new file mode 100644 index 000000000..2a72858d7 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/indices_stats_test.go @@ -0,0 +1,85 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestIndexStatsBuildURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Indices []string + Metrics []string + Expected string + }{ + { + []string{}, + []string{}, + "/_stats", + }, + { + []string{"index1"}, + []string{}, + "/index1/_stats", + }, + { + []string{}, + []string{"metric1"}, + "/_stats/metric1", + }, + { + []string{"index1"}, + []string{"metric1"}, + "/index1/_stats/metric1", + }, + { + []string{"index1", "index2"}, + []string{"metric1"}, + "/index1%2Cindex2/_stats/metric1", + }, + { + []string{"index1", "index2"}, + []string{"metric1", "metric2"}, + "/index1%2Cindex2/_stats/metric1%2Cmetric2", + }, + } + + for i, test := range tests { + path, _, err := client.IndexStats().Index(test.Indices...).Metric(test.Metrics...).buildURL() + if err != nil { + t.Fatalf("case #%d: %v", i+1, err) + } + if path != test.Expected { + t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) + } + } +} + +func TestIndexStats(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) + + stats, err := client.IndexStats(testIndexName).Do() + if err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if stats == nil { + t.Fatalf("expected response; got: %v", stats) + } + stat, found := stats.Indices[testIndexName] + if !found { + t.Fatalf("expected stats about index %q; got: %v", testIndexName, found) + } + if stat.Total == nil { + t.Fatalf("expected total to be != nil; got: %v", stat.Total) + } + if stat.Total.Docs == nil { + t.Fatalf("expected total docs to be != nil; got: %v", stat.Total.Docs) + } + if stat.Total.Docs.Count == 0 { + t.Fatalf("expected total docs count to be > 0; got: %d", stat.Total.Docs.Count) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/inner_hit.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/inner_hit.go new file mode 100644 index 000000000..1330df1ee --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/inner_hit.go @@ -0,0 +1,160 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// InnerHit implements a simple join for parent/child, nested, and even +// top-level documents in Elasticsearch. +// It is an experimental feature for Elasticsearch versions 1.5 (or greater). +// See http://www.elastic.co/guide/en/elasticsearch/reference/1.5/search-request-inner-hits.html +// for documentation. +// +// See the tests for SearchSource, HasChildFilter, HasChildQuery, +// HasParentFilter, HasParentQuery, NestedFilter, and NestedQuery +// for usage examples. +type InnerHit struct { + source *SearchSource + path string + typ string + + name string +} + +// NewInnerHit creates a new InnerHit. +func NewInnerHit() *InnerHit { + return &InnerHit{source: NewSearchSource()} +} + +func (hit *InnerHit) Path(path string) *InnerHit { + hit.path = path + return hit +} + +func (hit *InnerHit) Type(typ string) *InnerHit { + hit.typ = typ + return hit +} + +func (hit *InnerHit) Query(query Query) *InnerHit { + hit.source.Query(query) + return hit +} + +func (hit *InnerHit) From(from int) *InnerHit { + hit.source.From(from) + return hit +} + +func (hit *InnerHit) Size(size int) *InnerHit { + hit.source.Size(size) + return hit +} + +func (hit *InnerHit) TrackScores(trackScores bool) *InnerHit { + hit.source.TrackScores(trackScores) + return hit +} + +func (hit *InnerHit) Explain(explain bool) *InnerHit { + hit.source.Explain(explain) + return hit +} + +func (hit *InnerHit) Version(version bool) *InnerHit { + hit.source.Version(version) + return hit +} + +func (hit *InnerHit) Field(fieldName string) *InnerHit { + hit.source.Field(fieldName) + return hit +} + +func (hit *InnerHit) Fields(fieldNames ...string) *InnerHit { + hit.source.Fields(fieldNames...) + return hit +} + +func (hit *InnerHit) NoFields() *InnerHit { + hit.source.NoFields() + return hit +} + +func (hit *InnerHit) FetchSource(fetchSource bool) *InnerHit { + hit.source.FetchSource(fetchSource) + return hit +} + +func (hit *InnerHit) FetchSourceContext(fetchSourceContext *FetchSourceContext) *InnerHit { + hit.source.FetchSourceContext(fetchSourceContext) + return hit +} + +func (hit *InnerHit) FieldDataFields(fieldDataFields ...string) *InnerHit { + hit.source.FieldDataFields(fieldDataFields...) + return hit +} + +func (hit *InnerHit) FieldDataField(fieldDataField string) *InnerHit { + hit.source.FieldDataField(fieldDataField) + return hit +} + +func (hit *InnerHit) ScriptFields(scriptFields ...*ScriptField) *InnerHit { + hit.source.ScriptFields(scriptFields...) + return hit +} + +func (hit *InnerHit) ScriptField(scriptField *ScriptField) *InnerHit { + hit.source.ScriptField(scriptField) + return hit +} + +func (hit *InnerHit) Sort(field string, ascending bool) *InnerHit { + hit.source.Sort(field, ascending) + return hit +} + +func (hit *InnerHit) SortWithInfo(info SortInfo) *InnerHit { + hit.source.SortWithInfo(info) + return hit +} + +func (hit *InnerHit) SortBy(sorter ...Sorter) *InnerHit { + hit.source.SortBy(sorter...) + return hit +} + +func (hit *InnerHit) Highlight(highlight *Highlight) *InnerHit { + hit.source.Highlight(highlight) + return hit +} + +func (hit *InnerHit) Highlighter() *Highlight { + return hit.source.Highlighter() +} + +func (hit *InnerHit) Name(name string) *InnerHit { + hit.name = name + return hit +} + +func (hit *InnerHit) Source() (interface{}, error) { + src, err := hit.source.Source() + if err != nil { + return nil, err + } + source, ok := src.(map[string]interface{}) + if !ok { + return nil, nil + } + + // Notice that hit.typ and hit.path are not exported here. + // They are only used with SearchSource and serialized there. + + if hit.name != "" { + source["name"] = hit.name + } + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/inner_hit_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/inner_hit_test.go new file mode 100644 index 000000000..c4a74dafa --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/inner_hit_test.go @@ -0,0 +1,44 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestInnerHitEmpty(t *testing.T) { + hit := NewInnerHit() + src, err := hit.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestInnerHitWithName(t *testing.T) { + hit := NewInnerHit().Name("comments") + src, err := hit.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"name":"comments"}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/logger.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/logger.go new file mode 100644 index 000000000..0fb16b19f --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/logger.go @@ -0,0 +1,10 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// Logger specifies the interface for all log operations. +type Logger interface { + Printf(format string, v ...interface{}) +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/mget.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/mget.go new file mode 100644 index 000000000..6cc6b8d22 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/mget.go @@ -0,0 +1,219 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" +) + +// MgetService allows to get multiple documents based on an index, +// type (optional) and id (possibly routing). The response includes +// a docs array with all the fetched documents, each element similar +// in structure to a document provided by the Get API. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html +// for details. +type MgetService struct { + client *Client + pretty bool + preference string + realtime *bool + refresh *bool + items []*MultiGetItem +} + +func NewMgetService(client *Client) *MgetService { + builder := &MgetService{ + client: client, + items: make([]*MultiGetItem, 0), + } + return builder +} + +func (b *MgetService) Preference(preference string) *MgetService { + b.preference = preference + return b +} + +func (b *MgetService) Refresh(refresh bool) *MgetService { + b.refresh = &refresh + return b +} + +func (b *MgetService) Realtime(realtime bool) *MgetService { + b.realtime = &realtime + return b +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *MgetService) Pretty(pretty bool) *MgetService { + s.pretty = pretty + return s +} + +func (b *MgetService) Add(items ...*MultiGetItem) *MgetService { + b.items = append(b.items, items...) + return b +} + +func (b *MgetService) Source() (interface{}, error) { + source := make(map[string]interface{}) + items := make([]interface{}, len(b.items)) + for i, item := range b.items { + src, err := item.Source() + if err != nil { + return nil, err + } + items[i] = src + } + source["docs"] = items + return source, nil +} + +func (b *MgetService) Do() (*MgetResponse, error) { + // Build url + path := "/_mget" + + params := make(url.Values) + if b.realtime != nil { + params.Add("realtime", fmt.Sprintf("%v", *b.realtime)) + } + if b.preference != "" { + params.Add("preference", b.preference) + } + if b.refresh != nil { + params.Add("refresh", fmt.Sprintf("%v", *b.refresh)) + } + + // Set body + body, err := b.Source() + if err != nil { + return nil, err + } + + // Get response + res, err := b.client.PerformRequest("GET", path, params, body) + if err != nil { + return nil, err + } + + // Return result + ret := new(MgetResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Multi Get Item -- + +// MultiGetItem is a single document to retrieve via the MgetService. +type MultiGetItem struct { + index string + typ string + id string + routing string + fields []string + version *int64 // see org.elasticsearch.common.lucene.uid.Versions + versionType string // see org.elasticsearch.index.VersionType + fsc *FetchSourceContext +} + +func NewMultiGetItem() *MultiGetItem { + return &MultiGetItem{} +} + +func (item *MultiGetItem) Index(index string) *MultiGetItem { + item.index = index + return item +} + +func (item *MultiGetItem) Type(typ string) *MultiGetItem { + item.typ = typ + return item +} + +func (item *MultiGetItem) Id(id string) *MultiGetItem { + item.id = id + return item +} + +func (item *MultiGetItem) Routing(routing string) *MultiGetItem { + item.routing = routing + return item +} + +func (item *MultiGetItem) Fields(fields ...string) *MultiGetItem { + if item.fields == nil { + item.fields = make([]string, 0) + } + item.fields = append(item.fields, fields...) + return item +} + +// Version can be MatchAny (-3), MatchAnyPre120 (0), NotFound (-1), +// or NotSet (-2). These are specified in org.elasticsearch.common.lucene.uid.Versions. +// The default in Elasticsearch is MatchAny (-3). +func (item *MultiGetItem) Version(version int64) *MultiGetItem { + item.version = &version + return item +} + +// VersionType can be "internal", "external", "external_gt", "external_gte", +// or "force". See org.elasticsearch.index.VersionType in Elasticsearch source. +// It is "internal" by default. +func (item *MultiGetItem) VersionType(versionType string) *MultiGetItem { + item.versionType = versionType + return item +} + +func (item *MultiGetItem) FetchSource(fetchSourceContext *FetchSourceContext) *MultiGetItem { + item.fsc = fetchSourceContext + return item +} + +// Source returns the serialized JSON to be sent to Elasticsearch as +// part of a MultiGet search. +func (item *MultiGetItem) Source() (interface{}, error) { + source := make(map[string]interface{}) + + source["_id"] = item.id + + if item.index != "" { + source["_index"] = item.index + } + if item.typ != "" { + source["_type"] = item.typ + } + if item.fsc != nil { + src, err := item.fsc.Source() + if err != nil { + return nil, err + } + source["_source"] = src + } + if item.fields != nil { + source["fields"] = item.fields + } + if item.routing != "" { + source["_routing"] = item.routing + } + if item.version != nil { + source["version"] = fmt.Sprintf("%d", *item.version) + } + if item.versionType != "" { + source["version_type"] = item.versionType + } + + return source, nil +} + +// -- Result of a Multi Get request. + +type MgetResponse struct { + Docs []*GetResult `json:"docs,omitempty"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/mget_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/mget_test.go new file mode 100644 index 000000000..da78e3122 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/mget_test.go @@ -0,0 +1,95 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMultiGet(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add some documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Count documents + count, err := client.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if count != 3 { + t.Errorf("expected Count = %d; got %d", 3, count) + } + + // Get documents 1 and 3 + res, err := client.MultiGet(). + Add(NewMultiGetItem().Index(testIndexName).Type("tweet").Id("1")). + Add(NewMultiGetItem().Index(testIndexName).Type("tweet").Id("3")). + Do() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected result to be != nil; got nil") + } + if res.Docs == nil { + t.Fatal("expected result docs to be != nil; got nil") + } + if len(res.Docs) != 2 { + t.Fatalf("expected to have 2 docs; got %d", len(res.Docs)) + } + + item := res.Docs[0] + if item.Error != nil { + t.Errorf("expected no error on item 0; got %v", item.Error) + } + if item.Source == nil { + t.Errorf("expected Source != nil; got %v", item.Source) + } + var doc tweet + if err := json.Unmarshal(*item.Source, &doc); err != nil { + t.Fatalf("expected to unmarshal item Source; got %v", err) + } + if doc.Message != tweet1.Message { + t.Errorf("expected Message of first tweet to be %q; got %q", tweet1.Message, doc.Message) + } + + item = res.Docs[1] + if item.Error != nil { + t.Errorf("expected no error on item 1; got %v", item.Error) + } + if item.Source == nil { + t.Errorf("expected Source != nil; got %v", item.Source) + } + if err := json.Unmarshal(*item.Source, &doc); err != nil { + t.Fatalf("expected to unmarshal item Source; got %v", err) + } + if doc.Message != tweet3.Message { + t.Errorf("expected Message of second tweet to be %q; got %q", tweet3.Message, doc.Message) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/msearch.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/msearch.go new file mode 100644 index 000000000..2eb2b550e --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/msearch.go @@ -0,0 +1,96 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" +) + +// MultiSearch executes one or more searches in one roundtrip. +// See http://www.elasticsearch.org/guide/reference/api/multi-search/ +type MultiSearchService struct { + client *Client + requests []*SearchRequest + indices []string + pretty bool + routing string + preference string +} + +func NewMultiSearchService(client *Client) *MultiSearchService { + builder := &MultiSearchService{ + client: client, + requests: make([]*SearchRequest, 0), + indices: make([]string, 0), + } + return builder +} + +func (s *MultiSearchService) Add(requests ...*SearchRequest) *MultiSearchService { + s.requests = append(s.requests, requests...) + return s +} + +func (s *MultiSearchService) Index(indices ...string) *MultiSearchService { + s.indices = append(s.indices, indices...) + return s +} + +func (s *MultiSearchService) Pretty(pretty bool) *MultiSearchService { + s.pretty = pretty + return s +} + +func (s *MultiSearchService) Do() (*MultiSearchResult, error) { + // Build url + path := "/_msearch" + + // Parameters + params := make(url.Values) + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + + // Set body + lines := make([]string, 0) + for _, sr := range s.requests { + // Set default indices if not specified in the request + if !sr.HasIndices() && len(s.indices) > 0 { + sr = sr.Index(s.indices...) + } + + header, err := json.Marshal(sr.header()) + if err != nil { + return nil, err + } + body, err := json.Marshal(sr.body()) + if err != nil { + return nil, err + } + lines = append(lines, string(header)) + lines = append(lines, string(body)) + } + body := strings.Join(lines, "\n") + "\n" // Don't forget trailing \n + + // Get response + res, err := s.client.PerformRequest("GET", path, params, body) + if err != nil { + return nil, err + } + + // Return result + ret := new(MultiSearchResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +type MultiSearchResult struct { + Responses []*SearchResult `json:"responses,omitempty"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/msearch_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/msearch_test.go new file mode 100644 index 000000000..332ade2c6 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/msearch_test.go @@ -0,0 +1,197 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + _ "net/http" + "testing" +) + +func TestMultiSearch(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", + Message: "Welcome to Golang and Elasticsearch.", + Tags: []string{"golang", "elasticsearch"}, + } + tweet2 := tweet{ + User: "olivere", + Message: "Another unrelated topic.", + Tags: []string{"golang"}, + } + tweet3 := tweet{ + User: "sandrae", + Message: "Cycling is fun.", + Tags: []string{"sports", "cycling"}, + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Spawn two search queries with one roundtrip + q1 := NewMatchAllQuery() + q2 := NewTermQuery("tags", "golang") + + sreq1 := NewSearchRequest().Index(testIndexName, testIndexName2). + Source(NewSearchSource().Query(q1).Size(10)) + sreq2 := NewSearchRequest().Index(testIndexName).Type("tweet"). + Source(NewSearchSource().Query(q2)) + + searchResult, err := client.MultiSearch(). + Add(sreq1, sreq2). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Responses == nil { + t.Fatal("expected responses != nil; got nil") + } + if len(searchResult.Responses) != 2 { + t.Fatalf("expected 2 responses; got %d", len(searchResult.Responses)) + } + + sres := searchResult.Responses[0] + if sres.Hits == nil { + t.Errorf("expected Hits != nil; got nil") + } + if sres.Hits.TotalHits != 3 { + t.Errorf("expected Hits.TotalHits = %d; got %d", 3, sres.Hits.TotalHits) + } + if len(sres.Hits.Hits) != 3 { + t.Errorf("expected len(Hits.Hits) = %d; got %d", 3, len(sres.Hits.Hits)) + } + for _, hit := range sres.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } + + sres = searchResult.Responses[1] + if sres.Hits == nil { + t.Errorf("expected Hits != nil; got nil") + } + if sres.Hits.TotalHits != 2 { + t.Errorf("expected Hits.TotalHits = %d; got %d", 2, sres.Hits.TotalHits) + } + if len(sres.Hits.Hits) != 2 { + t.Errorf("expected len(Hits.Hits) = %d; got %d", 2, len(sres.Hits.Hits)) + } + for _, hit := range sres.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } +} + +func TestMultiSearchWithOneRequest(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", + Message: "Welcome to Golang and Elasticsearch.", + Tags: []string{"golang", "elasticsearch"}, + } + tweet2 := tweet{ + User: "olivere", + Message: "Another unrelated topic.", + Tags: []string{"golang"}, + } + tweet3 := tweet{ + User: "sandrae", + Message: "Cycling is fun.", + Tags: []string{"sports", "cycling"}, + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Spawn two search queries with one roundtrip + query := NewMatchAllQuery() + source := NewSearchSource().Query(query).Size(10) + sreq := NewSearchRequest().Source(source) + + searchResult, err := client.MultiSearch(). + Index(testIndexName). + Add(sreq). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Responses == nil { + t.Fatal("expected responses != nil; got nil") + } + if len(searchResult.Responses) != 1 { + t.Fatalf("expected 1 responses; got %d", len(searchResult.Responses)) + } + + sres := searchResult.Responses[0] + if sres.Hits == nil { + t.Errorf("expected Hits != nil; got nil") + } + if sres.Hits.TotalHits != 3 { + t.Errorf("expected Hits.TotalHits = %d; got %d", 3, sres.Hits.TotalHits) + } + if len(sres.Hits.Hits) != 3 { + t.Errorf("expected len(Hits.Hits) = %d; got %d", 3, len(sres.Hits.Hits)) + } + for _, hit := range sres.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/nodes_info.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/nodes_info.go new file mode 100644 index 000000000..8a1c40fa9 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/nodes_info.go @@ -0,0 +1,318 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "log" + "net/url" + "strings" + "time" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +var ( + _ = fmt.Print + _ = log.Print + _ = strings.Index + _ = uritemplates.Expand + _ = url.Parse +) + +// NodesInfoService allows to retrieve one or more or all of the +// cluster nodes information. +// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cluster-nodes-info.html. +type NodesInfoService struct { + client *Client + pretty bool + nodeId []string + metric []string + flatSettings *bool + human *bool +} + +// NewNodesInfoService creates a new NodesInfoService. +func NewNodesInfoService(client *Client) *NodesInfoService { + return &NodesInfoService{ + client: client, + nodeId: []string{"_all"}, + metric: []string{"_all"}, + } +} + +// NodeId is a list of node IDs or names to limit the returned information. +// Use "_local" to return information from the node you're connecting to, +// leave empty to get information from all nodes. +func (s *NodesInfoService) NodeId(nodeId ...string) *NodesInfoService { + s.nodeId = append(s.nodeId, nodeId...) + return s +} + +// Metric is a list of metrics you wish returned. Leave empty to return all. +// Valid metrics are: settings, os, process, jvm, thread_pool, network, +// transport, http, and plugins. +func (s *NodesInfoService) Metric(metric ...string) *NodesInfoService { + s.metric = append(s.metric, metric...) + return s +} + +// FlatSettings returns settings in flat format (default: false). +func (s *NodesInfoService) FlatSettings(flatSettings bool) *NodesInfoService { + s.flatSettings = &flatSettings + return s +} + +// Human indicates whether to return time and byte values in human-readable format. +func (s *NodesInfoService) Human(human bool) *NodesInfoService { + s.human = &human + return s +} + +// Pretty indicates whether to indent the returned JSON. +func (s *NodesInfoService) Pretty(pretty bool) *NodesInfoService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *NodesInfoService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_nodes/{node_id}/{metric}", map[string]string{ + "node_id": strings.Join(s.nodeId, ","), + "metric": strings.Join(s.metric, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.human != nil { + params.Set("human", fmt.Sprintf("%v", *s.human)) + } + if s.pretty { + params.Set("pretty", "1") + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *NodesInfoService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *NodesInfoService) Do() (*NodesInfoResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(NodesInfoResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// NodesInfoResponse is the response of NodesInfoService.Do. +type NodesInfoResponse struct { + ClusterName string `json:"cluster_name"` + Nodes map[string]*NodesInfoNode `json:"nodes"` +} + +type NodesInfoNode struct { + // Name of the node, e.g. "Mister Fear" + Name string `json:"name"` + // TransportAddress, e.g. "127.0.0.1:9300" + TransportAddress string `json:"transport_address"` + // Host is the host name, e.g. "macbookair" + Host string `json:"host"` + // IP is the IP address, e.g. "192.168.1.2" + IP string `json:"ip"` + // Version is the Elasticsearch version running on the node, e.g. "1.4.3" + Version string `json:"version"` + // Build is the Elasticsearch build, e.g. "36a29a7" + Build string `json:"build"` + // HTTPAddress, e.g. "127.0.0.1:9200" + HTTPAddress string `json:"http_address"` + // HTTPSAddress, e.g. "127.0.0.1:9200" + HTTPSAddress string `json:"https_address"` + + // Attributes of the node. + Attributes map[string]interface{} `json:"attributes"` + + // Settings of the node, e.g. paths and pidfile. + Settings map[string]interface{} `json:"settings"` + + // OS information, e.g. CPU and memory. + OS *NodesInfoNodeOS `json:"os"` + + // Process information, e.g. max file descriptors. + Process *NodesInfoNodeProcess `json:"process"` + + // JVM information, e.g. VM version. + JVM *NodesInfoNodeProcess `json:"jvm"` + + // ThreadPool information. + ThreadPool *NodesInfoNodeThreadPool `json:"thread_pool"` + + // Network information. + Network *NodesInfoNodeNetwork `json:"network"` + + // Network information. + Transport *NodesInfoNodeTransport `json:"transport"` + + // HTTP information. + HTTP *NodesInfoNodeHTTP `json:"http"` + + // Plugins information. + Plugins []*NodesInfoNodePlugin `json:"plugins"` +} + +type NodesInfoNodeOS struct { + RefreshInterval string `json:"refresh_interval"` // e.g. 1s + RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000 + AvailableProcessors int `json:"available_processors"` // e.g. 4 + + // CPU information + CPU struct { + Vendor string `json:"vendor"` // e.g. Intel + Model string `json:"model"` // e.g. iMac15,1 + MHz int `json:"mhz"` // e.g. 3500 + TotalCores int `json:"total_cores"` // e.g. 4 + TotalSockets int `json:"total_sockets"` // e.g. 4 + CoresPerSocket int `json:"cores_per_socket"` // e.g. 16 + CacheSizeInBytes int `json:"cache_size_in_bytes"` // e.g. 256 + } `json:"cpu"` + + // Mem information + Mem struct { + Total string `json:"total"` // e.g. 16gb + TotalInBytes int `json:"total_in_bytes"` // e.g. 17179869184 + } `json:"mem"` + + // Swap information + Swap struct { + Total string `json:"total"` // e.g. 1gb + TotalInBytes int `json:"total_in_bytes"` // e.g. 1073741824 + } `json:"swap"` +} + +type NodesInfoNodeProcess struct { + RefreshInterval string `json:"refresh_interval"` // e.g. 1s + RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000 + ID int `json:"id"` // process id, e.g. 87079 + MaxFileDescriptors int `json:"max_file_descriptors"` // e.g. 32768 + Mlockall bool `json:"mlockall"` // e.g. false +} + +type NodesInfoNodeJVM struct { + PID int `json:"pid"` // process id, e.g. 87079 + Version string `json:"version"` // e.g. "1.8.0_25" + VMName string `json:"vm_name"` // e.g. "Java HotSpot(TM) 64-Bit Server VM" + VMVersion string `json:"vm_version"` // e.g. "25.25-b02" + VMVendor string `json:"vm_vendor"` // e.g. "Oracle Corporation" + StartTime time.Time `json:"start_time"` // e.g. "2015-01-03T15:18:30.982Z" + StartTimeInMillis int64 `json:"start_time_in_millis"` + + // Mem information + Mem struct { + HeapInit string `json:"heap_init"` // e.g. 1gb + HeapInitInBytes int `json:"heap_init_in_bytes"` + HeapMax string `json:"heap_max"` // e.g. 4gb + HeapMaxInBytes int `json:"heap_max_in_bytes"` + NonHeapInit string `json:"non_heap_init"` // e.g. 2.4mb + NonHeapInitInBytes int `json:"non_heap_init_in_bytes"` + NonHeapMax string `json:"non_heap_max"` // e.g. 0b + NonHeapMaxInBytes int `json:"non_heap_max_in_bytes"` + DirectMax string `json:"direct_max"` // e.g. 4gb + DirectMaxInBytes int `json:"direct_max_in_bytes"` + } `json:"mem"` + + GCCollectors []string `json:"gc_collectors"` // e.g. ["ParNew"] + MemoryPools []string `json:"memory_pools"` // e.g. ["Code Cache", "Metaspace"] +} + +type NodesInfoNodeThreadPool struct { + Percolate *NodesInfoNodeThreadPoolSection `json:"percolate"` + Bench *NodesInfoNodeThreadPoolSection `json:"bench"` + Listener *NodesInfoNodeThreadPoolSection `json:"listener"` + Index *NodesInfoNodeThreadPoolSection `json:"index"` + Refresh *NodesInfoNodeThreadPoolSection `json:"refresh"` + Suggest *NodesInfoNodeThreadPoolSection `json:"suggest"` + Generic *NodesInfoNodeThreadPoolSection `json:"generic"` + Warmer *NodesInfoNodeThreadPoolSection `json:"warmer"` + Search *NodesInfoNodeThreadPoolSection `json:"search"` + Flush *NodesInfoNodeThreadPoolSection `json:"flush"` + Optimize *NodesInfoNodeThreadPoolSection `json:"optimize"` + Management *NodesInfoNodeThreadPoolSection `json:"management"` + Get *NodesInfoNodeThreadPoolSection `json:"get"` + Merge *NodesInfoNodeThreadPoolSection `json:"merge"` + Bulk *NodesInfoNodeThreadPoolSection `json:"bulk"` + Snapshot *NodesInfoNodeThreadPoolSection `json:"snapshot"` +} + +type NodesInfoNodeThreadPoolSection struct { + Type string `json:"type"` // e.g. fixed + Min int `json:"min"` // e.g. 4 + Max int `json:"max"` // e.g. 4 + KeepAlive string `json:"keep_alive"` // e.g. "5m" + QueueSize interface{} `json:"queue_size"` // e.g. "1k" or -1 +} + +type NodesInfoNodeNetwork struct { + RefreshInterval string `json:"refresh_interval"` // e.g. 1s + RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000 + PrimaryInterface struct { + Address string `json:"address"` // e.g. 192.168.1.2 + Name string `json:"name"` // e.g. en0 + MACAddress string `json:"mac_address"` // e.g. 11:22:33:44:55:66 + } `json:"primary_interface"` +} + +type NodesInfoNodeTransport struct { + BoundAddress []string `json:"bound_address"` + PublishAddress string `json:"publish_address"` + Profiles map[string]*NodesInfoNodeTransportProfile `json:"profiles"` +} + +type NodesInfoNodeTransportProfile struct { + BoundAddress []string `json:"bound_address"` + PublishAddress string `json:"publish_address"` +} + +type NodesInfoNodeHTTP struct { + BoundAddress []string `json:"bound_address"` // e.g. ["127.0.0.1:9200", "[fe80::1]:9200", "[::1]:9200"] + PublishAddress string `json:"publish_address"` // e.g. "127.0.0.1:9300" + MaxContentLength string `json:"max_content_length"` // e.g. "100mb" + MaxContentLengthInBytes int64 `json:"max_content_length_in_bytes"` +} + +type NodesInfoNodePlugin struct { + Name string `json:"name"` + Description string `json:"description"` + Site bool `json:"site"` + JVM bool `json:"jvm"` + URL string `json:"url"` // e.g. /_plugin/dummy/ +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/nodes_info_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/nodes_info_test.go new file mode 100644 index 000000000..0402b2706 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/nodes_info_test.go @@ -0,0 +1,40 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestNodesInfo(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + + info, err := client.NodesInfo().Do() + if err != nil { + t.Fatal(err) + } + if info == nil { + t.Fatal("expected nodes info") + } + + if info.ClusterName == "" { + t.Errorf("expected cluster name; got: %q", info.ClusterName) + } + if len(info.Nodes) == 0 { + t.Errorf("expected some nodes; got: %d", len(info.Nodes)) + } + for id, node := range info.Nodes { + if id == "" { + t.Errorf("expected node id; got: %q", id) + } + if node == nil { + t.Fatalf("expected node info; got: %v", node) + } + if node.IP == "" { + t.Errorf("expected node IP; got: %q", node.IP) + } + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/optimize.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/optimize.go new file mode 100644 index 000000000..c9107f714 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/optimize.go @@ -0,0 +1,130 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +type OptimizeService struct { + client *Client + indices []string + maxNumSegments *int + onlyExpungeDeletes *bool + flush *bool + waitForMerge *bool + force *bool + pretty bool +} + +func NewOptimizeService(client *Client) *OptimizeService { + builder := &OptimizeService{ + client: client, + indices: make([]string, 0), + } + return builder +} + +func (s *OptimizeService) Index(indices ...string) *OptimizeService { + s.indices = append(s.indices, indices...) + return s +} + +func (s *OptimizeService) MaxNumSegments(maxNumSegments int) *OptimizeService { + s.maxNumSegments = &maxNumSegments + return s +} + +func (s *OptimizeService) OnlyExpungeDeletes(onlyExpungeDeletes bool) *OptimizeService { + s.onlyExpungeDeletes = &onlyExpungeDeletes + return s +} + +func (s *OptimizeService) Flush(flush bool) *OptimizeService { + s.flush = &flush + return s +} + +func (s *OptimizeService) WaitForMerge(waitForMerge bool) *OptimizeService { + s.waitForMerge = &waitForMerge + return s +} + +func (s *OptimizeService) Force(force bool) *OptimizeService { + s.force = &force + return s +} + +func (s *OptimizeService) Pretty(pretty bool) *OptimizeService { + s.pretty = pretty + return s +} + +func (s *OptimizeService) Do() (*OptimizeResult, error) { + // Build url + path := "/" + + // Indices part + indexPart := make([]string, 0) + for _, index := range s.indices { + index, err := uritemplates.Expand("{index}", map[string]string{ + "index": index, + }) + if err != nil { + return nil, err + } + indexPart = append(indexPart, index) + } + if len(indexPart) > 0 { + path += strings.Join(indexPart, ",") + } + + path += "/_optimize" + + // Parameters + params := make(url.Values) + if s.maxNumSegments != nil { + params.Set("max_num_segments", fmt.Sprintf("%d", *s.maxNumSegments)) + } + if s.onlyExpungeDeletes != nil { + params.Set("only_expunge_deletes", fmt.Sprintf("%v", *s.onlyExpungeDeletes)) + } + if s.flush != nil { + params.Set("flush", fmt.Sprintf("%v", *s.flush)) + } + if s.waitForMerge != nil { + params.Set("wait_for_merge", fmt.Sprintf("%v", *s.waitForMerge)) + } + if s.force != nil { + params.Set("force", fmt.Sprintf("%v", *s.force)) + } + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + + // Get response + res, err := s.client.PerformRequest("POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return result + ret := new(OptimizeResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of an optimize request. + +type OptimizeResult struct { + Shards shardsInfo `json:"_shards,omitempty"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/optimize_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/optimize_test.go new file mode 100644 index 000000000..c47de3a94 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/optimize_test.go @@ -0,0 +1,47 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestOptimize(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add some documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Optimize documents + res, err := client.Optimize(testIndexName, testIndexName2).Do() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatal("expected result; got nil") + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/percolate.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/percolate.go new file mode 100644 index 000000000..a2bd14ba2 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/percolate.go @@ -0,0 +1,309 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// PercolateService is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/search-percolate.html. +type PercolateService struct { + client *Client + pretty bool + index string + typ string + id string + version interface{} + versionType string + routing []string + preference string + ignoreUnavailable *bool + percolateIndex string + percolatePreference string + percolateRouting string + source string + allowNoIndices *bool + expandWildcards string + percolateFormat string + percolateType string + bodyJson interface{} + bodyString string +} + +// NewPercolateService creates a new PercolateService. +func NewPercolateService(client *Client) *PercolateService { + return &PercolateService{ + client: client, + routing: make([]string, 0), + } +} + +// Index is the name of the index of the document being percolated. +func (s *PercolateService) Index(index string) *PercolateService { + s.index = index + return s +} + +// Type is the type of the document being percolated. +func (s *PercolateService) Type(typ string) *PercolateService { + s.typ = typ + return s +} + +// Id is to substitute the document in the request body with a +// document that is known by the specified id. On top of the id, +// the index and type parameter will be used to retrieve +// the document from within the cluster. +func (s *PercolateService) Id(id string) *PercolateService { + s.id = id + return s +} + +// ExpandWildcards indicates whether to expand wildcard expressions +// to concrete indices that are open, closed or both. +func (s *PercolateService) ExpandWildcards(expandWildcards string) *PercolateService { + s.expandWildcards = expandWildcards + return s +} + +// PercolateFormat indicates whether to return an array of matching +// query IDs instead of objects. +func (s *PercolateService) PercolateFormat(percolateFormat string) *PercolateService { + s.percolateFormat = percolateFormat + return s +} + +// PercolateType is the type to percolate document into. Defaults to type. +func (s *PercolateService) PercolateType(percolateType string) *PercolateService { + s.percolateType = percolateType + return s +} + +// PercolateRouting is the routing value to use when percolating +// the existing document. +func (s *PercolateService) PercolateRouting(percolateRouting string) *PercolateService { + s.percolateRouting = percolateRouting + return s +} + +// Source is the URL-encoded request definition. +func (s *PercolateService) Source(source string) *PercolateService { + s.source = source + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *PercolateService) AllowNoIndices(allowNoIndices bool) *PercolateService { + s.allowNoIndices = &allowNoIndices + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should +// be ignored when unavailable (missing or closed). +func (s *PercolateService) IgnoreUnavailable(ignoreUnavailable bool) *PercolateService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// PercolateIndex is the index to percolate the document into. Defaults to index. +func (s *PercolateService) PercolateIndex(percolateIndex string) *PercolateService { + s.percolateIndex = percolateIndex + return s +} + +// PercolatePreference defines which shard to prefer when executing +// the percolate request. +func (s *PercolateService) PercolatePreference(percolatePreference string) *PercolateService { + s.percolatePreference = percolatePreference + return s +} + +// Version is an explicit version number for concurrency control. +func (s *PercolateService) Version(version interface{}) *PercolateService { + s.version = version + return s +} + +// VersionType is the specific version type. +func (s *PercolateService) VersionType(versionType string) *PercolateService { + s.versionType = versionType + return s +} + +// Routing is a list of specific routing values. +func (s *PercolateService) Routing(routing []string) *PercolateService { + s.routing = routing + return s +} + +// Preference specifies the node or shard the operation should be +// performed on (default: random). +func (s *PercolateService) Preference(preference string) *PercolateService { + s.preference = preference + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *PercolateService) Pretty(pretty bool) *PercolateService { + s.pretty = pretty + return s +} + +// Doc wraps the given document into the "doc" key of the body. +func (s *PercolateService) Doc(doc interface{}) *PercolateService { + return s.BodyJson(map[string]interface{}{"doc": doc}) +} + +// BodyJson is the percolator request definition using the percolate DSL. +func (s *PercolateService) BodyJson(body interface{}) *PercolateService { + s.bodyJson = body + return s +} + +// BodyString is the percolator request definition using the percolate DSL. +func (s *PercolateService) BodyString(body string) *PercolateService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *PercolateService) buildURL() (string, url.Values, error) { + // Build URL + var path string + var err error + if s.id == "" { + path, err = uritemplates.Expand("/{index}/{type}/_percolate", map[string]string{ + "index": s.index, + "type": s.typ, + }) + } else { + path, err = uritemplates.Expand("/{index}/{type}/{id}/_percolate", map[string]string{ + "index": s.index, + "type": s.typ, + "id": s.id, + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + if len(s.routing) > 0 { + params.Set("routing", strings.Join(s.routing, ",")) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.percolateIndex != "" { + params.Set("percolate_index", s.percolateIndex) + } + if s.percolatePreference != "" { + params.Set("percolate_preference", s.percolatePreference) + } + if s.percolateRouting != "" { + params.Set("percolate_routing", s.percolateRouting) + } + if s.source != "" { + params.Set("source", s.source) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.percolateFormat != "" { + params.Set("percolate_format", s.percolateFormat) + } + if s.percolateType != "" { + params.Set("percolate_type", s.percolateType) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *PercolateService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *PercolateService) Do() (*PercolateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(PercolateResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// PercolateResponse is the response of PercolateService.Do. +type PercolateResponse struct { + TookInMillis int64 `json:"took"` // search time in milliseconds + Total int64 `json:"total"` // total matches + Matches []*PercolateMatch `json:"matches,omitempty"` + Aggregations Aggregations `json:"aggregations,omitempty"` // results from aggregations +} + +// PercolateMatch returns a single match in a PercolateResponse. +type PercolateMatch struct { + Index string `json:"_index,omitempty"` + Id string `json:"_id"` + Score float64 `json:"_score,omitempty"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/percolate_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/percolate_test.go new file mode 100644 index 000000000..07b36fef7 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/percolate_test.go @@ -0,0 +1,92 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestPercolate(t *testing.T) { + client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + + // Add a document + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + // Register a query in the ".percolator" type. + search := NewSearchSource().Query(NewMatchQuery("message", "Golang")) + searchSrc, err := search.Source() + if err != nil { + t.Fatal(err) + } + _, err = client.Index(). + Index(testIndexName).Type(".percolator").Id("1"). + BodyJson(searchSrc). + Do() + if err != nil { + t.Fatal(err) + } + + // Percolate should return our registered query + newTweet := tweet{User: "olivere", Message: "Golang is fun."} + res, err := client.Percolate(). + Index(testIndexName).Type("tweet"). + Doc(newTweet). // shortcut for: BodyJson(map[string]interface{}{"doc": newTweet}). + Pretty(true). + Do() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Errorf("expected results != nil; got nil") + } + if res.Total != 1 { + t.Fatalf("expected 1 result; got: %d", res.Total) + } + if res.Matches == nil { + t.Fatalf("expected Matches; got: %v", res.Matches) + } + matches := res.Matches + if matches == nil { + t.Fatalf("expected matches as map; got: %v", matches) + } + if len(matches) != 1 { + t.Fatalf("expected %d registered matches; got: %d", 1, len(matches)) + } + if matches[0].Id != "1" { + t.Errorf("expected to return query %q; got: %q", "1", matches[0].Id) + } + + // Percolating an existsing document should return our registered query + res, err = client.Percolate(). + Index(testIndexName).Type("tweet"). + Id("1"). + Pretty(true). + Do() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Errorf("expected results != nil; got nil") + } + if res.Total != 1 { + t.Fatalf("expected 1 result; got: %d", res.Total) + } + if res.Matches == nil { + t.Fatalf("expected Matches; got: %v", res.Matches) + } + matches = res.Matches + if matches == nil { + t.Fatalf("expected matches as map; got: %v", matches) + } + if len(matches) != 1 { + t.Fatalf("expected %d registered matches; got: %d", 1, len(matches)) + } + if matches[0].Id != "1" { + t.Errorf("expected to return query %q; got: %q", "1", matches[0].Id) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/ping.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/ping.go new file mode 100644 index 000000000..fada22817 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/ping.go @@ -0,0 +1,126 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "net/http" + "net/url" +) + +// PingService checks if an Elasticsearch server on a given URL is alive. +// When asked for, it can also return various information about the +// Elasticsearch server, e.g. the Elasticsearch version number. +// +// Ping simply starts a HTTP GET request to the URL of the server. +// If the server responds with HTTP Status code 200 OK, the server is alive. +type PingService struct { + client *Client + url string + timeout string + httpHeadOnly bool + pretty bool +} + +// PingResult is the result returned from querying the Elasticsearch server. +type PingResult struct { + Name string `json:"name"` + ClusterName string `json:"cluster_name"` + Version struct { + Number string `json:"number"` + BuildHash string `json:"build_hash"` + BuildTimestamp string `json:"build_timestamp"` + BuildSnapshot bool `json:"build_snapshot"` + LuceneVersion string `json:"lucene_version"` + } `json:"version"` + TagLine string `json:"tagline"` +} + +func NewPingService(client *Client) *PingService { + return &PingService{ + client: client, + url: DefaultURL, + httpHeadOnly: false, + pretty: false, + } +} + +func (s *PingService) URL(url string) *PingService { + s.url = url + return s +} + +func (s *PingService) Timeout(timeout string) *PingService { + s.timeout = timeout + return s +} + +// HeadOnly makes the service to only return the status code in Do; +// the PingResult will be nil. +func (s *PingService) HttpHeadOnly(httpHeadOnly bool) *PingService { + s.httpHeadOnly = httpHeadOnly + return s +} + +func (s *PingService) Pretty(pretty bool) *PingService { + s.pretty = pretty + return s +} + +// Do returns the PingResult, the HTTP status code of the Elasticsearch +// server, and an error. +func (s *PingService) Do() (*PingResult, int, error) { + s.client.mu.RLock() + basicAuth := s.client.basicAuth + basicAuthUsername := s.client.basicAuthUsername + basicAuthPassword := s.client.basicAuthPassword + s.client.mu.RUnlock() + + url_ := s.url + "/" + + params := make(url.Values) + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.pretty { + params.Set("pretty", "1") + } + if len(params) > 0 { + url_ += "?" + params.Encode() + } + + var method string + if s.httpHeadOnly { + method = "HEAD" + } else { + method = "GET" + } + + // Notice: This service must NOT use PerformRequest! + req, err := NewRequest(method, url_) + if err != nil { + return nil, 0, err + } + + if basicAuth { + req.SetBasicAuth(basicAuthUsername, basicAuthPassword) + } + + res, err := s.client.c.Do((*http.Request)(req)) + if err != nil { + return nil, 0, err + } + defer res.Body.Close() + + var ret *PingResult + if !s.httpHeadOnly { + ret = new(PingResult) + if err := json.NewDecoder(res.Body).Decode(ret); err != nil { + return nil, res.StatusCode, err + } + } + + return ret, res.StatusCode, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/ping_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/ping_test.go new file mode 100644 index 000000000..9891c2025 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/ping_test.go @@ -0,0 +1,64 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "net/http" + "testing" +) + +func TestPingGet(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + res, code, err := client.Ping(DefaultURL).Do() + if err != nil { + t.Fatal(err) + } + if code != http.StatusOK { + t.Errorf("expected status code = %d; got %d", http.StatusOK, code) + } + if res == nil { + t.Fatalf("expected to return result, got: %v", res) + } + if res.Name == "" { + t.Errorf("expected Name != \"\"; got %q", res.Name) + } + if res.Version.Number == "" { + t.Errorf("expected Version.Number != \"\"; got %q", res.Version.Number) + } +} + +func TestPingHead(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + res, code, err := client.Ping(DefaultURL).HttpHeadOnly(true).Do() + if err != nil { + t.Fatal(err) + } + if code != http.StatusOK { + t.Errorf("expected status code = %d; got %d", http.StatusOK, code) + } + if res != nil { + t.Errorf("expected not to return result, got: %v", res) + } +} + +func TestPingHeadFailure(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + res, code, err := client. + Ping("http://127.0.0.1:9299"). + HttpHeadOnly(true). + Do() + if err == nil { + t.Error("expected error, got nil") + } + if code == http.StatusOK { + t.Errorf("expected status code != %d; got %d", http.StatusOK, code) + } + if res != nil { + t.Errorf("expected not to return result, got: %v", res) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/plugins.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/plugins.go new file mode 100644 index 000000000..3906d74d7 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/plugins.go @@ -0,0 +1,38 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// HasPlugin indicates whether the cluster has the named plugin. +func (c *Client) HasPlugin(name string) (bool, error) { + plugins, err := c.Plugins() + if err != nil { + return false, nil + } + for _, plugin := range plugins { + if plugin == name { + return true, nil + } + } + return false, nil +} + +// Plugins returns the list of all registered plugins. +func (c *Client) Plugins() ([]string, error) { + stats, err := c.ClusterStats().Do() + if err != nil { + return nil, err + } + if stats == nil { + return nil, err + } + if stats.Nodes == nil { + return nil, err + } + var plugins []string + for _, plugin := range stats.Nodes.Plugins { + plugins = append(plugins, plugin.Name) + } + return plugins, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/plugins_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/plugins_test.go new file mode 100644 index 000000000..112b80943 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/plugins_test.go @@ -0,0 +1,32 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestClientPlugins(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + _, err = client.Plugins() + if err != nil { + t.Fatal(err) + } +} + +func TestClientHasPlugin(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Fatal(err) + } + found, err := client.HasPlugin("no-such-plugin") + if err != nil { + t.Fatal(err) + } + if found { + t.Fatalf("expected to not find plugin %q", "no-such-plugin") + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/query.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/query.go new file mode 100644 index 000000000..0869eaecc --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/query.go @@ -0,0 +1,13 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// Query represents the generic query interface. A query's sole purpose +// is to return the source of the query as a JSON-serializable object. +// Returning map[string]interface{} is the norm for queries. +type Query interface { + // Source returns the JSON-serializable query request. + Source() (interface{}, error) +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/reindexer.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/reindexer.go new file mode 100644 index 000000000..7193a1337 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/reindexer.go @@ -0,0 +1,270 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "errors" +) + +// Reindexer simplifies the process of reindexing an index. You typically +// reindex a source index to a target index. However, you can also specify +// a query that filters out documents from the source index before bulk +// indexing them into the target index. The caller may also specify a +// different client for the target, e.g. when copying indices from one +// Elasticsearch cluster to another. +// +// Internally, the Reindex users a scan and scroll operation on the source +// index and bulk indexing to push data into the target index. +// +// By default the reindexer fetches the _source, _parent, and _routing +// attributes from the source index, using the provided CopyToTargetIndex +// will copy those attributes into the destinationIndex. +// This behaviour can be overridden by setting the ScanFields and providing a +// custom ReindexerFunc. +// +// The caller is responsible for setting up and/or clearing the target index +// before starting the reindex process. +// +// See http://www.elastic.co/guide/en/elasticsearch/guide/current/reindex.html +// for more information about reindexing. +type Reindexer struct { + sourceClient, targetClient *Client + sourceIndex string + query Query + scanFields []string + bulkSize int + size int + scroll string + reindexerFunc ReindexerFunc + progress ReindexerProgressFunc + statsOnly bool +} + +// A ReindexerFunc receives each hit from the sourceIndex. +// It can choose to add any number of BulkableRequests to the bulkService. +type ReindexerFunc func(hit *SearchHit, bulkService *BulkService) error + +// CopyToTargetIndex returns a ReindexerFunc that copies the SearchHit's +// _source, _parent, and _routing attributes into the targetIndex +func CopyToTargetIndex(targetIndex string) ReindexerFunc { + return func(hit *SearchHit, bulkService *BulkService) error { + // TODO(oe) Do we need to deserialize here? + source := make(map[string]interface{}) + if err := json.Unmarshal(*hit.Source, &source); err != nil { + return err + } + req := NewBulkIndexRequest().Index(targetIndex).Type(hit.Type).Id(hit.Id).Doc(source) + if hit.Parent != "" { + req = req.Parent(hit.Parent) + } + if hit.Routing != "" { + req = req.Routing(hit.Routing) + } + bulkService.Add(req) + return nil + } +} + +// ReindexerProgressFunc is a callback that can be used with Reindexer +// to report progress while reindexing data. +type ReindexerProgressFunc func(current, total int64) + +// ReindexerResponse is returned from the Do func in a Reindexer. +// By default, it returns the number of succeeded and failed bulk operations. +// To return details about all failed items, set StatsOnly to false in +// Reindexer. +type ReindexerResponse struct { + Success int64 + Failed int64 + Errors []*BulkResponseItem +} + +// NewReindexer returns a new Reindexer. +func NewReindexer(client *Client, source string, reindexerFunc ReindexerFunc) *Reindexer { + return &Reindexer{ + sourceClient: client, + sourceIndex: source, + reindexerFunc: reindexerFunc, + statsOnly: true, + } +} + +// TargetClient specifies a different client for the target. This is +// necessary when the target index is in a different Elasticsearch cluster. +// By default, the source and target clients are the same. +func (ix *Reindexer) TargetClient(c *Client) *Reindexer { + ix.targetClient = c + return ix +} + +// Query specifies the query to apply to the source. It filters out those +// documents to be indexed into target. A nil query does not filter out any +// documents. +func (ix *Reindexer) Query(q Query) *Reindexer { + ix.query = q + return ix +} + +// ScanFields specifies the fields the scan query should load. +// The default fields are _source, _parent, _routing. +func (ix *Reindexer) ScanFields(scanFields ...string) *Reindexer { + ix.scanFields = scanFields + return ix +} + +// BulkSize returns the number of documents to send to Elasticsearch per chunk. +// The default is 500. +func (ix *Reindexer) BulkSize(bulkSize int) *Reindexer { + ix.bulkSize = bulkSize + return ix +} + +// Size is the number of results to return per shard, not per request. +// So a size of 10 which hits 5 shards will return a maximum of 50 results +// per scan request. +func (ix *Reindexer) Size(size int) *Reindexer { + ix.size = size + return ix +} + +// Scroll specifies for how long the scroll operation on the source index +// should be maintained. The default is 5m. +func (ix *Reindexer) Scroll(timeout string) *Reindexer { + ix.scroll = timeout + return ix +} + +// Progress indicates a callback that will be called while indexing. +func (ix *Reindexer) Progress(f ReindexerProgressFunc) *Reindexer { + ix.progress = f + return ix +} + +// StatsOnly indicates whether the Do method should return details e.g. about +// the documents that failed while indexing. It is true by default, i.e. only +// the number of documents that succeeded/failed are returned. Set to false +// if you want all the details. +func (ix *Reindexer) StatsOnly(statsOnly bool) *Reindexer { + ix.statsOnly = statsOnly + return ix +} + +// Do starts the reindexing process. +func (ix *Reindexer) Do() (*ReindexerResponse, error) { + if ix.sourceClient == nil { + return nil, errors.New("no source client") + } + if ix.sourceIndex == "" { + return nil, errors.New("no source index") + } + if ix.targetClient == nil { + ix.targetClient = ix.sourceClient + } + if ix.scanFields == nil { + ix.scanFields = []string{"_source", "_parent", "_routing"} + } + if ix.bulkSize <= 0 { + ix.bulkSize = 500 + } + if ix.scroll == "" { + ix.scroll = "5m" + } + + // Count total to report progress (if necessary) + var err error + var current, total int64 + if ix.progress != nil { + total, err = ix.count() + if err != nil { + return nil, err + } + } + + // Prepare scan and scroll to iterate through the source index + scanner := ix.sourceClient.Scan(ix.sourceIndex).Scroll(ix.scroll).Fields(ix.scanFields...) + if ix.query != nil { + scanner = scanner.Query(ix.query) + } + if ix.size > 0 { + scanner = scanner.Size(ix.size) + } + cursor, err := scanner.Do() + + bulk := ix.targetClient.Bulk() + + ret := &ReindexerResponse{ + Errors: make([]*BulkResponseItem, 0), + } + + // Main loop iterates through the source index and bulk indexes into target. + for { + docs, err := cursor.Next() + if err == EOS { + break + } + if err != nil { + return ret, err + } + + if docs.TotalHits() > 0 { + for _, hit := range docs.Hits.Hits { + if ix.progress != nil { + current++ + ix.progress(current, total) + } + + err := ix.reindexerFunc(hit, bulk) + if err != nil { + return ret, err + } + + if bulk.NumberOfActions() >= ix.bulkSize { + bulk, err = ix.commit(bulk, ret) + if err != nil { + return ret, err + } + } + } + } + } + + // Final flush + if bulk.NumberOfActions() > 0 { + bulk, err = ix.commit(bulk, ret) + if err != nil { + return ret, err + } + bulk = nil + } + + return ret, nil +} + +// count returns the number of documents in the source index. +// The query is taken into account, if specified. +func (ix *Reindexer) count() (int64, error) { + service := ix.sourceClient.Count(ix.sourceIndex) + if ix.query != nil { + service = service.Query(ix.query) + } + return service.Do() +} + +// commit commits a bulk, updates the stats, and returns a fresh bulk service. +func (ix *Reindexer) commit(bulk *BulkService, ret *ReindexerResponse) (*BulkService, error) { + bres, err := bulk.Do() + if err != nil { + return nil, err + } + ret.Success += int64(len(bres.Succeeded())) + failed := bres.Failed() + ret.Failed += int64(len(failed)) + if !ix.statsOnly { + ret.Errors = append(ret.Errors, failed...) + } + bulk = ix.targetClient.Bulk() + return bulk, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/reindexer_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/reindexer_test.go new file mode 100644 index 000000000..a21dff5c5 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/reindexer_test.go @@ -0,0 +1,285 @@ +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestReindexer(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) + + sourceCount, err := client.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if sourceCount <= 0 { + t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount) + } + + targetCount, err := client.Count(testIndexName2).Do() + if err != nil { + t.Fatal(err) + } + if targetCount != 0 { + t.Fatalf("expected %d documents; got: %d", 0, targetCount) + } + + r := NewReindexer(client, testIndexName, CopyToTargetIndex(testIndexName2)) + ret, err := r.Do() + if err != nil { + t.Fatal(err) + } + if ret == nil { + t.Fatalf("expected result != %v; got: %v", nil, ret) + } + if ret.Success != sourceCount { + t.Errorf("expected success = %d; got: %d", sourceCount, ret.Success) + } + if ret.Failed != 0 { + t.Errorf("expected failed = %d; got: %d", 0, ret.Failed) + } + if len(ret.Errors) != 0 { + t.Errorf("expected to return no errors by default; got: %v", ret.Errors) + } + + if _, err := client.Flush().Index(testIndexName2).Do(); err != nil { + t.Fatal(err) + } + + targetCount, err = client.Count(testIndexName2).Do() + if err != nil { + t.Fatal(err) + } + if targetCount != sourceCount { + t.Fatalf("expected %d documents; got: %d", sourceCount, targetCount) + } +} + +func TestReindexerWithQuery(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) + + q := NewTermQuery("user", "olivere") + + sourceCount, err := client.Count(testIndexName).Query(q).Do() + if err != nil { + t.Fatal(err) + } + if sourceCount <= 0 { + t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount) + } + + targetCount, err := client.Count(testIndexName2).Do() + if err != nil { + t.Fatal(err) + } + if targetCount != 0 { + t.Fatalf("expected %d documents; got: %d", 0, targetCount) + } + + r := NewReindexer(client, testIndexName, CopyToTargetIndex(testIndexName2)) + r = r.Query(q) + ret, err := r.Do() + if err != nil { + t.Fatal(err) + } + if ret == nil { + t.Fatalf("expected result != %v; got: %v", nil, ret) + } + if ret.Success != sourceCount { + t.Errorf("expected success = %d; got: %d", sourceCount, ret.Success) + } + if ret.Failed != 0 { + t.Errorf("expected failed = %d; got: %d", 0, ret.Failed) + } + if len(ret.Errors) != 0 { + t.Errorf("expected to return no errors by default; got: %v", ret.Errors) + } + + if _, err := client.Flush().Index(testIndexName2).Do(); err != nil { + t.Fatal(err) + } + + targetCount, err = client.Count(testIndexName2).Do() + if err != nil { + t.Fatal(err) + } + if targetCount != sourceCount { + t.Fatalf("expected %d documents; got: %d", sourceCount, targetCount) + } +} + +func TestReindexerProgress(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) + + sourceCount, err := client.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if sourceCount <= 0 { + t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount) + } + + var calls int64 + totalsOk := true + progress := func(current, total int64) { + calls += 1 + totalsOk = totalsOk && total == sourceCount + } + + r := NewReindexer(client, testIndexName, CopyToTargetIndex(testIndexName2)) + r = r.Progress(progress) + ret, err := r.Do() + if err != nil { + t.Fatal(err) + } + if ret == nil { + t.Fatalf("expected result != %v; got: %v", nil, ret) + } + if ret.Success != sourceCount { + t.Errorf("expected success = %d; got: %d", sourceCount, ret.Success) + } + if ret.Failed != 0 { + t.Errorf("expected failed = %d; got: %d", 0, ret.Failed) + } + if len(ret.Errors) != 0 { + t.Errorf("expected to return no errors by default; got: %v", ret.Errors) + } + + if calls != sourceCount { + t.Errorf("expected progress to be called %d times; got: %d", sourceCount, calls) + } + if !totalsOk { + t.Errorf("expected totals in progress to be %d", sourceCount) + } +} + +func TestReindexerWithTargetClient(t *testing.T) { + sourceClient := setupTestClientAndCreateIndexAndAddDocs(t) + targetClient, err := NewClient() + if err != nil { + t.Fatal(err) + } + + sourceCount, err := sourceClient.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if sourceCount <= 0 { + t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount) + } + + targetCount, err := targetClient.Count(testIndexName2).Do() + if err != nil { + t.Fatal(err) + } + if targetCount != 0 { + t.Fatalf("expected %d documents; got: %d", 0, targetCount) + } + + r := NewReindexer(sourceClient, testIndexName, CopyToTargetIndex(testIndexName2)) + r = r.TargetClient(targetClient) + ret, err := r.Do() + if err != nil { + t.Fatal(err) + } + if ret == nil { + t.Fatalf("expected result != %v; got: %v", nil, ret) + } + if ret.Success != sourceCount { + t.Errorf("expected success = %d; got: %d", sourceCount, ret.Success) + } + if ret.Failed != 0 { + t.Errorf("expected failed = %d; got: %d", 0, ret.Failed) + } + if len(ret.Errors) != 0 { + t.Errorf("expected to return no errors by default; got: %v", ret.Errors) + } + + if _, err := targetClient.Flush().Index(testIndexName2).Do(); err != nil { + t.Fatal(err) + } + + targetCount, err = targetClient.Count(testIndexName2).Do() + if err != nil { + t.Fatal(err) + } + if targetCount != sourceCount { + t.Fatalf("expected %d documents; got: %d", sourceCount, targetCount) + } +} + +// TestReindexerPreservingTTL shows how a caller can take control of the +// copying process by providing ScanFields and a custom ReindexerFunc. +func TestReindexerPreservingTTL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").TTL("999999s").Version(10).VersionType("external").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + sourceCount, err := client.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + if sourceCount <= 0 { + t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount) + } + + targetCount, err := client.Count(testIndexName2).Do() + if err != nil { + t.Fatal(err) + } + if targetCount != 0 { + t.Fatalf("expected %d documents; got: %d", 0, targetCount) + } + + // Carries over the source item's ttl to the reindexed item + copyWithTTL := func(hit *SearchHit, bulkService *BulkService) error { + source := make(map[string]interface{}) + if err := json.Unmarshal(*hit.Source, &source); err != nil { + return err + } + req := NewBulkIndexRequest().Index(testIndexName2).Type(hit.Type).Id(hit.Id).Doc(source) + if hit.TTL > 0 { + req = req.Ttl(hit.TTL) + } + bulkService.Add(req) + return nil + } + + r := NewReindexer(client, testIndexName, copyWithTTL).ScanFields("_source", "_ttl") + + ret, err := r.Do() + if err != nil { + t.Fatal(err) + } + if ret == nil { + t.Fatalf("expected result != %v; got: %v", nil, ret) + } + if ret.Success != sourceCount { + t.Errorf("expected success = %d; got: %d", sourceCount, ret.Success) + } + if ret.Failed != 0 { + t.Errorf("expected failed = %d; got: %d", 0, ret.Failed) + } + if len(ret.Errors) != 0 { + t.Errorf("expected to return no errors by default; got: %v", ret.Errors) + } + + getResult, err := client.Get().Index(testIndexName2).Id("1").Fields("_source", "_ttl").Do() + if err != nil { + t.Fatal(err) + } + + if getResult.TTL <= 0 { + t.Errorf("expected TTL field in reindexed document") + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/request.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/request.go new file mode 100644 index 000000000..1347e1b6f --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/request.go @@ -0,0 +1,123 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "bytes" + "compress/gzip" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "runtime" + "strings" +) + +// Elasticsearch-specific HTTP request +type Request http.Request + +// NewRequest is a http.Request and adds features such as encoding the body. +func NewRequest(method, url string) (*Request, error) { + req, err := http.NewRequest(method, url, nil) + if err != nil { + return nil, err + } + req.Header.Add("User-Agent", "elastic/"+Version+" ("+runtime.GOOS+"-"+runtime.GOARCH+")") + req.Header.Add("Accept", "application/json") + return (*Request)(req), nil +} + +// SetBasicAuth wraps http.Request's SetBasicAuth. +func (r *Request) SetBasicAuth(username, password string) { + ((*http.Request)(r)).SetBasicAuth(username, password) +} + +// SetBody encodes the body in the request. Optionally, it performs GZIP compression. +func (r *Request) SetBody(body interface{}, gzipCompress bool) error { + switch b := body.(type) { + case string: + if gzipCompress { + return r.setBodyGzip(b) + } else { + return r.setBodyString(b) + } + default: + if gzipCompress { + return r.setBodyGzip(body) + } else { + return r.setBodyJson(body) + } + } +} + +// setBodyJson encodes the body as a struct to be marshaled via json.Marshal. +func (r *Request) setBodyJson(data interface{}) error { + body, err := json.Marshal(data) + if err != nil { + return err + } + r.Header.Set("Content-Type", "application/json") + r.setBodyReader(bytes.NewReader(body)) + return nil +} + +// setBodyString encodes the body as a string. +func (r *Request) setBodyString(body string) error { + return r.setBodyReader(strings.NewReader(body)) +} + +// setBodyGzip gzip's the body. It accepts both strings and structs as body. +// The latter will be encoded via json.Marshal. +func (r *Request) setBodyGzip(body interface{}) error { + switch b := body.(type) { + case string: + buf := new(bytes.Buffer) + w := gzip.NewWriter(buf) + if _, err := w.Write([]byte(b)); err != nil { + return err + } + if err := w.Close(); err != nil { + return err + } + r.Header.Add("Content-Encoding", "gzip") + r.Header.Add("Vary", "Accept-Encoding") + return r.setBodyReader(bytes.NewReader(buf.Bytes())) + default: + data, err := json.Marshal(b) + if err != nil { + return err + } + buf := new(bytes.Buffer) + w := gzip.NewWriter(buf) + if _, err := w.Write(data); err != nil { + return err + } + if err := w.Close(); err != nil { + return err + } + r.Header.Add("Content-Encoding", "gzip") + r.Header.Add("Vary", "Accept-Encoding") + r.Header.Set("Content-Type", "application/json") + return r.setBodyReader(bytes.NewReader(buf.Bytes())) + } +} + +// setBodyReader writes the body from an io.Reader. +func (r *Request) setBodyReader(body io.Reader) error { + rc, ok := body.(io.ReadCloser) + if !ok && body != nil { + rc = ioutil.NopCloser(body) + } + r.Body = rc + if body != nil { + switch v := body.(type) { + case *strings.Reader: + r.ContentLength = int64(v.Len()) + case *bytes.Buffer: + r.ContentLength = int64(v.Len()) + } + } + return nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/rescore.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/rescore.go new file mode 100644 index 000000000..0cbc06710 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/rescore.go @@ -0,0 +1,44 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +type Rescore struct { + rescorer Rescorer + windowSize *int + defaultRescoreWindowSize *int +} + +func NewRescore() *Rescore { + return &Rescore{} +} + +func (r *Rescore) WindowSize(windowSize int) *Rescore { + r.windowSize = &windowSize + return r +} + +func (r *Rescore) IsEmpty() bool { + return r.rescorer == nil +} + +func (r *Rescore) Rescorer(rescorer Rescorer) *Rescore { + r.rescorer = rescorer + return r +} + +func (r *Rescore) Source() (interface{}, error) { + source := make(map[string]interface{}) + if r.windowSize != nil { + source["window_size"] = *r.windowSize + } else if r.defaultRescoreWindowSize != nil { + source["window_size"] = *r.defaultRescoreWindowSize + } + rescorerSrc, err := r.rescorer.Source() + if err != nil { + return nil, err + } + source[r.rescorer.Name()] = rescorerSrc + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/rescorer.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/rescorer.go new file mode 100644 index 000000000..28ad59cbb --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/rescorer.go @@ -0,0 +1,64 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +type Rescorer interface { + Name() string + Source() (interface{}, error) +} + +// -- Query Rescorer -- + +type QueryRescorer struct { + query Query + rescoreQueryWeight *float64 + queryWeight *float64 + scoreMode string +} + +func NewQueryRescorer(query Query) *QueryRescorer { + return &QueryRescorer{ + query: query, + } +} + +func (r *QueryRescorer) Name() string { + return "query" +} + +func (r *QueryRescorer) RescoreQueryWeight(rescoreQueryWeight float64) *QueryRescorer { + r.rescoreQueryWeight = &rescoreQueryWeight + return r +} + +func (r *QueryRescorer) QueryWeight(queryWeight float64) *QueryRescorer { + r.queryWeight = &queryWeight + return r +} + +func (r *QueryRescorer) ScoreMode(scoreMode string) *QueryRescorer { + r.scoreMode = scoreMode + return r +} + +func (r *QueryRescorer) Source() (interface{}, error) { + rescoreQuery, err := r.query.Source() + if err != nil { + return nil, err + } + + source := make(map[string]interface{}) + source["rescore_query"] = rescoreQuery + if r.queryWeight != nil { + source["query_weight"] = *r.queryWeight + } + if r.rescoreQueryWeight != nil { + source["rescore_query_weight"] = *r.rescoreQueryWeight + } + if r.scoreMode != "" { + source["score_mode"] = r.scoreMode + } + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/response.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/response.go new file mode 100644 index 000000000..9426c23af --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/response.go @@ -0,0 +1,43 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "io/ioutil" + "net/http" +) + +// Response represents a response from Elasticsearch. +type Response struct { + // StatusCode is the HTTP status code, e.g. 200. + StatusCode int + // Header is the HTTP header from the HTTP response. + // Keys in the map are canonicalized (see http.CanonicalHeaderKey). + Header http.Header + // Body is the deserialized response body. + Body json.RawMessage +} + +// newResponse creates a new response from the HTTP response. +func (c *Client) newResponse(res *http.Response) (*Response, error) { + r := &Response{ + StatusCode: res.StatusCode, + Header: res.Header, + } + if res.Body != nil { + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + // HEAD requests return a body but no content + if len(slurp) > 0 { + if err := c.decoder.Decode(slurp, &r.Body); err != nil { + return nil, err + } + } + } + return r, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/scan.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/scan.go new file mode 100644 index 000000000..08822531b --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/scan.go @@ -0,0 +1,359 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "errors" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +const ( + defaultKeepAlive = "5m" +) + +var ( + // End of stream (or scan) + EOS = errors.New("EOS") + + // No ScrollId + ErrNoScrollId = errors.New("no scrollId") +) + +// ScanService manages a cursor through documents in Elasticsearch. +type ScanService struct { + client *Client + indices []string + types []string + keepAlive string + searchSource *SearchSource + pretty bool + routing string + preference string + size *int +} + +// NewScanService creates a new service to iterate through the results +// of a query. +func NewScanService(client *Client) *ScanService { + builder := &ScanService{ + client: client, + searchSource: NewSearchSource().Query(NewMatchAllQuery()), + } + return builder +} + +// Index sets the name(s) of the index to use for scan. +func (s *ScanService) Index(indices ...string) *ScanService { + if s.indices == nil { + s.indices = make([]string, 0) + } + s.indices = append(s.indices, indices...) + return s +} + +// Types allows to restrict the scan to a list of types. +func (s *ScanService) Type(types ...string) *ScanService { + if s.types == nil { + s.types = make([]string, 0) + } + s.types = append(s.types, types...) + return s +} + +// Scroll is an alias for KeepAlive, the time to keep +// the cursor alive (e.g. "5m" for 5 minutes). +func (s *ScanService) Scroll(keepAlive string) *ScanService { + s.keepAlive = keepAlive + return s +} + +// KeepAlive sets the maximum time the cursor will be +// available before expiration (e.g. "5m" for 5 minutes). +func (s *ScanService) KeepAlive(keepAlive string) *ScanService { + s.keepAlive = keepAlive + return s +} + +// Fields tells Elasticsearch to only load specific fields from a search hit. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-fields.html. +func (s *ScanService) Fields(fields ...string) *ScanService { + s.searchSource = s.searchSource.Fields(fields...) + return s +} + +// SearchSource sets the search source builder to use with this service. +func (s *ScanService) SearchSource(searchSource *SearchSource) *ScanService { + s.searchSource = searchSource + if s.searchSource == nil { + s.searchSource = NewSearchSource().Query(NewMatchAllQuery()) + } + return s +} + +// Routing allows for (a comma-separated) list of specific routing values. +func (s *ScanService) Routing(routings ...string) *ScanService { + s.routing = strings.Join(routings, ",") + return s +} + +// Preference specifies the node or shard the operation should be +// performed on (default: "random"). +func (s *ScanService) Preference(preference string) *ScanService { + s.preference = preference + return s +} + +// Query sets the query to perform, e.g. MatchAllQuery. +func (s *ScanService) Query(query Query) *ScanService { + s.searchSource = s.searchSource.Query(query) + return s +} + +// PostFilter is executed as the last filter. It only affects the +// search hits but not facets. See +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-post-filter.html +// for details. +func (s *ScanService) PostFilter(postFilter Query) *ScanService { + s.searchSource = s.searchSource.PostFilter(postFilter) + return s +} + +// FetchSource indicates whether the response should contain the stored +// _source for every hit. +func (s *ScanService) FetchSource(fetchSource bool) *ScanService { + s.searchSource = s.searchSource.FetchSource(fetchSource) + return s +} + +// FetchSourceContext indicates how the _source should be fetched. +func (s *ScanService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *ScanService { + s.searchSource = s.searchSource.FetchSourceContext(fetchSourceContext) + return s +} + +// Version can be set to true to return a version for each search hit. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-version.html. +func (s *ScanService) Version(version bool) *ScanService { + s.searchSource = s.searchSource.Version(version) + return s +} + +// Sort the results by the given field, in the given order. +// Use the alternative SortWithInfo to use a struct to define the sorting. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html +// for detailed documentation of sorting. +func (s *ScanService) Sort(field string, ascending bool) *ScanService { + s.searchSource = s.searchSource.Sort(field, ascending) + return s +} + +// SortWithInfo defines how to sort results. +// Use the Sort func for a shortcut. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html +// for detailed documentation of sorting. +func (s *ScanService) SortWithInfo(info SortInfo) *ScanService { + s.searchSource = s.searchSource.SortWithInfo(info) + return s +} + +// SortBy defines how to sort results. +// Use the Sort func for a shortcut. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html +// for detailed documentation of sorting. +func (s *ScanService) SortBy(sorter ...Sorter) *ScanService { + s.searchSource = s.searchSource.SortBy(sorter...) + return s +} + +// Pretty enables the caller to indent the JSON output. +func (s *ScanService) Pretty(pretty bool) *ScanService { + s.pretty = pretty + return s +} + +// Size is the number of results to return per shard, not per request. +// So a size of 10 which hits 5 shards will return a maximum of 50 results +// per scan request. +func (s *ScanService) Size(size int) *ScanService { + s.size = &size + return s +} + +// Do executes the query and returns a "server-side cursor". +func (s *ScanService) Do() (*ScanCursor, error) { + // Build url + path := "/" + + // Indices part + indexPart := make([]string, 0) + for _, index := range s.indices { + index, err := uritemplates.Expand("{index}", map[string]string{ + "index": index, + }) + if err != nil { + return nil, err + } + indexPart = append(indexPart, index) + } + if len(indexPart) > 0 { + path += strings.Join(indexPart, ",") + } + + // Types + typesPart := make([]string, 0) + for _, typ := range s.types { + typ, err := uritemplates.Expand("{type}", map[string]string{ + "type": typ, + }) + if err != nil { + return nil, err + } + typesPart = append(typesPart, typ) + } + if len(typesPart) > 0 { + path += "/" + strings.Join(typesPart, ",") + } + + // Search + path += "/_search" + + // Parameters + params := make(url.Values) + if !s.searchSource.hasSort() { + // TODO: ES 2.1 deprecates search_type=scan. See https://www.elastic.co/guide/en/elasticsearch/reference/current/breaking_21_search_changes.html#_literal_search_type_scan_literal_deprecated. + params.Set("search_type", "scan") + } + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + if s.keepAlive != "" { + params.Set("scroll", s.keepAlive) + } else { + params.Set("scroll", defaultKeepAlive) + } + if s.size != nil && *s.size > 0 { + params.Set("size", fmt.Sprintf("%d", *s.size)) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + + // Get response + body, err := s.searchSource.Source() + if err != nil { + return nil, err + } + res, err := s.client.PerformRequest("POST", path, params, body) + if err != nil { + return nil, err + } + + // Return result + searchResult := new(SearchResult) + if err := json.Unmarshal(res.Body, searchResult); err != nil { + return nil, err + } + + cursor := NewScanCursor(s.client, s.keepAlive, s.pretty, searchResult) + + return cursor, nil +} + +// scanCursor represents a single page of results from +// an Elasticsearch Scan operation. +type ScanCursor struct { + Results *SearchResult + + client *Client + keepAlive string + pretty bool + currentPage int +} + +// newScanCursor returns a new initialized instance +// of scanCursor. +func NewScanCursor(client *Client, keepAlive string, pretty bool, searchResult *SearchResult) *ScanCursor { + return &ScanCursor{ + client: client, + keepAlive: keepAlive, + pretty: pretty, + Results: searchResult, + } +} + +// TotalHits is a convenience method that returns the number +// of hits the cursor will iterate through. +func (c *ScanCursor) TotalHits() int64 { + if c.Results.Hits == nil { + return 0 + } + return c.Results.Hits.TotalHits +} + +// Next returns the next search result or nil when all +// documents have been scanned. +// +// Usage: +// +// for { +// res, err := cursor.Next() +// if err == elastic.EOS { +// // End of stream (or scan) +// break +// } +// if err != nil { +// // Handle error +// } +// // Work with res +// } +// +func (c *ScanCursor) Next() (*SearchResult, error) { + if c.currentPage > 0 { + if c.Results.Hits == nil || len(c.Results.Hits.Hits) == 0 || c.Results.Hits.TotalHits == 0 { + return nil, EOS + } + } + if c.Results.ScrollId == "" { + return nil, EOS + } + + // Build url + path := "/_search/scroll" + + // Parameters + params := make(url.Values) + if c.pretty { + params.Set("pretty", fmt.Sprintf("%v", c.pretty)) + } + if c.keepAlive != "" { + params.Set("scroll", c.keepAlive) + } else { + params.Set("scroll", defaultKeepAlive) + } + + // Set body + body := c.Results.ScrollId + + // Get response + res, err := c.client.PerformRequest("POST", path, params, body) + if err != nil { + return nil, err + } + + // Return result + c.Results = &SearchResult{ScrollId: body} + if err := json.Unmarshal(res.Body, c.Results); err != nil { + return nil, err + } + + c.currentPage += 1 + + return c.Results, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/scan_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/scan_test.go new file mode 100644 index 000000000..b2a8f0ef9 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/scan_test.go @@ -0,0 +1,559 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + _ "net/http" + "testing" +) + +func TestScan(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + cursor, err := client.Scan(testIndexName).Size(1).Do() + if err != nil { + t.Fatal(err) + } + + if cursor.Results == nil { + t.Errorf("expected results != nil; got nil") + } + if cursor.Results.Hits == nil { + t.Errorf("expected results.Hits != nil; got nil") + } + if cursor.Results.Hits.TotalHits != 3 { + t.Errorf("expected results.Hits.TotalHits = %d; got %d", 3, cursor.Results.Hits.TotalHits) + } + if len(cursor.Results.Hits.Hits) != 0 { + t.Errorf("expected len(results.Hits.Hits) = %d; got %d", 0, len(cursor.Results.Hits.Hits)) + } + + pages := 0 + numDocs := 0 + + for { + searchResult, err := cursor.Next() + if err == EOS { + break + } + if err != nil { + t.Fatal(err) + } + + pages += 1 + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + numDocs += 1 + } + } + + if pages <= 0 { + t.Errorf("expected to retrieve at least 1 page; got %d", pages) + } + + if numDocs != 3 { + t.Errorf("expected to retrieve %d hits; got %d", 3, numDocs) + } +} + +func TestScanWithSort(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch.", Retweets: 4} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic.", Retweets: 10} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun.", Retweets: 3} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // We sort on a numerical field, because sorting on the 'message' string field would + // raise the whole question of tokenizing and analyzing. + cursor, err := client.Scan(testIndexName).Sort("retweets", true).Size(1).Do() + if err != nil { + t.Fatal(err) + } + + if cursor.Results == nil { + t.Errorf("expected results != nil; got nil") + } + if cursor.Results.Hits == nil { + t.Errorf("expected results.Hits != nil; got nil") + } + if cursor.Results.Hits.TotalHits != 3 { + t.Errorf("expected results.Hits.TotalHits = %d; got %d", 3, cursor.Results.Hits.TotalHits) + } + if len(cursor.Results.Hits.Hits) != 1 { + t.Errorf("expected len(results.Hits.Hits) = %d; got %d", 1, len(cursor.Results.Hits.Hits)) + } + + if cursor.Results.Hits.Hits[0].Id != "3" { + t.Errorf("expected hitID = %v; got %v", "3", cursor.Results.Hits.Hits[0].Id) + } + + numDocs := 1 // The cursor already gave us a result + pages := 0 + + for { + searchResult, err := cursor.Next() + if err == EOS { + break + } + if err != nil { + t.Fatal(err) + } + + pages += 1 + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + numDocs += 1 + } + } + + if pages <= 0 { + t.Errorf("expected to retrieve at least 1 page; got %d", pages) + } + + if numDocs != 3 { + t.Errorf("expected to retrieve %d hits; got %d", 3, numDocs) + } +} + +func TestScanWithSortByDoc(t *testing.T) { + // Sorting by doc is introduced in Elasticsearch 2.1, + // and replaces the deprecated search_type=scan. + // See https://www.elastic.co/guide/en/elasticsearch/reference/2.x/breaking_21_search_changes.html#_literal_search_type_scan_literal_deprecated + client := setupTestClientAndCreateIndex(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + if esversion < "2.1" { + t.Skipf(`Elasticsearch %s does not have {"sort":["_doc"]}`, esversion) + return + } + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + comment1 := comment{User: "nico", Comment: "You bet."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("comment").Id("1").Parent("1").BodyJson(&comment1).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + cursor, err := client.Scan(testIndexName).Sort("_doc", true).Size(1).Do() + if err != nil { + t.Fatal(err) + } + + numDocs := 0 + pages := 0 + + for { + searchResult, err := cursor.Next() + if err == EOS { + break + } + if err != nil { + t.Fatal(err) + } + + pages += 1 + + for range searchResult.Hits.Hits { + numDocs += 1 + } + } + + if pages != 3 { + t.Errorf("expected to retrieve %d pages; got %d", 2, pages) + } + if numDocs != 2 { + t.Errorf("expected to retrieve %d hits; got %d", 2, numDocs) + } +} + +func TestScanWithSearchSource(t *testing.T) { + //client := setupTestClientAndCreateIndexAndLog(t) + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch.", Retweets: 4} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic.", Retweets: 10} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun.", Retweets: 3} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + src := NewSearchSource(). + Query(NewTermQuery("user", "olivere")). + FetchSourceContext(NewFetchSourceContext(true).Include("retweets")) + cursor, err := client.Scan(testIndexName).SearchSource(src).Size(1).Do() + if err != nil { + t.Fatal(err) + } + + if cursor.Results == nil { + t.Fatalf("expected results != nil; got nil") + } + if cursor.Results.Hits == nil { + t.Fatalf("expected results.Hits != nil; got nil") + } + if cursor.Results.Hits.TotalHits != 2 { + t.Fatalf("expected results.Hits.TotalHits = %d; got %d", 2, cursor.Results.Hits.TotalHits) + } + + numDocs := 0 + pages := 0 + + for { + searchResult, err := cursor.Next() + if err == EOS { + break + } + if err != nil { + t.Fatal(err) + } + + pages += 1 + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + if _, found := item["message"]; found { + t.Fatalf("expected to not see field %q; got: %#v", "message", item) + } + numDocs += 1 + } + } + + if pages != 3 { + t.Errorf("expected to retrieve %d pages; got %d", 2, pages) + } + if numDocs != 2 { + t.Errorf("expected to retrieve %d hits; got %d", 2, numDocs) + } +} + +func TestScanWithQuery(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Return tweets from olivere only + termQuery := NewTermQuery("user", "olivere") + cursor, err := client.Scan(testIndexName). + Size(1). + Query(termQuery). + Do() + if err != nil { + t.Fatal(err) + } + + if cursor.Results == nil { + t.Errorf("expected results != nil; got nil") + } + if cursor.Results.Hits == nil { + t.Errorf("expected results.Hits != nil; got nil") + } + if cursor.Results.Hits.TotalHits != 2 { + t.Errorf("expected results.Hits.TotalHits = %d; got %d", 2, cursor.Results.Hits.TotalHits) + } + if len(cursor.Results.Hits.Hits) != 0 { + t.Errorf("expected len(results.Hits.Hits) = %d; got %d", 0, len(cursor.Results.Hits.Hits)) + } + + pages := 0 + numDocs := 0 + + for { + searchResult, err := cursor.Next() + if err == EOS { + break + } + if err != nil { + t.Fatal(err) + } + + pages += 1 + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + numDocs += 1 + } + } + + if pages <= 0 { + t.Errorf("expected to retrieve at least 1 page; got %d", pages) + } + + if numDocs != 2 { + t.Errorf("expected to retrieve %d hits; got %d", 2, numDocs) + } +} + +func TestScanAndScrollWithMissingIndex(t *testing.T) { + client := setupTestClient(t) // does not create testIndexName + + cursor, err := client.Scan(testIndexName).Scroll("30s").Do() + if err == nil { + t.Fatalf("expected error != nil; got: %v", err) + } + if cursor != nil { + t.Fatalf("expected cursor == nil; got: %v", cursor) + } +} + +func TestScanAndScrollWithEmptyIndex(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + if isTravis() { + t.Skip("test on Travis failes regularly with " + + "Error 503 (Service Unavailable): SearchPhaseExecutionException[Failed to execute phase [init_scan], all shards failed]") + } + + _, err := client.Flush().Index(testIndexName).WaitIfOngoing(true).Do() + if err != nil { + t.Fatal(err) + } + + cursor, err := client.Scan(testIndexName).Scroll("30s").Do() + if err != nil { + t.Fatal(err) + } + if cursor == nil { + t.Fatalf("expected cursor; got: %v", cursor) + } + + // First request returns no error, but no hits + res, err := cursor.Next() + if err != nil { + t.Fatal(err) + } + if res == nil { + t.Fatalf("expected results != nil; got: nil") + } + if res.ScrollId == "" { + t.Errorf("expected scrollId in results; got: %q", res.ScrollId) + } + if res.TotalHits() != 0 { + t.Errorf("expected TotalHits() = %d; got %d", 0, res.TotalHits()) + } + if res.Hits == nil { + t.Errorf("expected results.Hits != nil; got: nil") + } + if res.Hits.TotalHits != 0 { + t.Errorf("expected results.Hits.TotalHits = %d; got %d", 0, res.Hits.TotalHits) + } + if res.Hits.Hits == nil { + t.Errorf("expected results.Hits.Hits != nil; got: %v", res.Hits.Hits) + } + if len(res.Hits.Hits) != 0 { + t.Errorf("expected len(results.Hits.Hits) == %d; got: %d", 0, len(res.Hits.Hits)) + } + + // Subsequent requests return EOS + res, err = cursor.Next() + if err != EOS { + t.Fatal(err) + } + if res != nil { + t.Fatalf("expected results == %v; got: %v", nil, res) + } + + res, err = cursor.Next() + if err != EOS { + t.Fatal(err) + } + if res != nil { + t.Fatalf("expected results == %v; got: %v", nil, res) + } +} + +func TestScanIssue119(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + comment1 := comment{User: "nico", Comment: "You bet."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("comment").Id("1").Parent("1").BodyJson(&comment1).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + cursor, err := client.Scan(testIndexName).Fields("_source", "_parent").Size(1).Do() + if err != nil { + t.Fatal(err) + } + + for { + searchResult, err := cursor.Next() + if err == EOS { + break + } + if err != nil { + t.Fatal(err) + } + + for _, hit := range searchResult.Hits.Hits { + if hit.Type == "tweet" { + if _, ok := hit.Fields["_parent"].(string); ok { + t.Errorf("Type `tweet` cannot have any parent...") + + toPrint, _ := json.MarshalIndent(hit, "", " ") + t.Fatal(string(toPrint)) + } + } + + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/script.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/script.go new file mode 100644 index 000000000..a5c9e45e2 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/script.go @@ -0,0 +1,131 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// Script holds all the paramaters necessary to compile or find in cache +// and then execute a script. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html +// for details of scripting. +type Script struct { + script string + typ string + lang string + params map[string]interface{} +} + +// NewScript creates and initializes a new Script. +func NewScript(script string) *Script { + return &Script{ + script: script, + typ: "", // default type is "inline" + params: make(map[string]interface{}), + } +} + +// NewScriptInline creates and initializes a new Script of type "inline". +func NewScriptInline(script string) *Script { + return NewScript(script).Type("inline") +} + +// NewScriptId creates and initializes a new Script of type "id". +func NewScriptId(script string) *Script { + return NewScript(script).Type("id") +} + +// NewScriptFile creates and initializes a new Script of type "file". +func NewScriptFile(script string) *Script { + return NewScript(script).Type("file") +} + +// Script is either the cache key of the script to be compiled/executed +// or the actual script source code for inline scripts. For indexed +// scripts this is the id used in the request. For file scripts this is +// the file name. +func (s *Script) Script(script string) *Script { + s.script = script + return s +} + +// Type sets the type of script: "inline", "id", or "file". +func (s *Script) Type(typ string) *Script { + s.typ = typ + return s +} + +// Lang sets the language of the script. Permitted values are "groovy", +// "expression", "mustache", "mvel" (default), "javascript", "python". +// To use certain languages, you need to configure your server and/or +// add plugins. See https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html +// for details. +func (s *Script) Lang(lang string) *Script { + s.lang = lang + return s +} + +// Param adds a key/value pair to the parameters that this script will be executed with. +func (s *Script) Param(name string, value interface{}) *Script { + if s.params == nil { + s.params = make(map[string]interface{}) + } + s.params[name] = value + return s +} + +// Params sets the map of parameters this script will be executed with. +func (s *Script) Params(params map[string]interface{}) *Script { + s.params = params + return s +} + +// Source returns the JSON serializable data for this Script. +func (s *Script) Source() (interface{}, error) { + if s.typ == "" && s.lang == "" && len(s.params) == 0 { + return s.script, nil + } + source := make(map[string]interface{}) + if s.typ == "" { + source["inline"] = s.script + } else { + source[s.typ] = s.script + } + if s.lang != "" { + source["lang"] = s.lang + } + if len(s.params) > 0 { + source["params"] = s.params + } + return source, nil +} + +// -- Script Field -- + +// ScriptField is a single script field. +type ScriptField struct { + FieldName string // name of the field + + script *Script +} + +// NewScriptField creates and initializes a new ScriptField. +func NewScriptField(fieldName string, script *Script) *ScriptField { + return &ScriptField{FieldName: fieldName, script: script} +} + +// Source returns the serializable JSON for the ScriptField. +func (f *ScriptField) Source() (interface{}, error) { + if f.script == nil { + return nil, errors.New("ScriptField expects script") + } + source := make(map[string]interface{}) + src, err := f.script.Source() + if err != nil { + return nil, err + } + source["script"] = src + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/script_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/script_test.go new file mode 100644 index 000000000..552d92a02 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/script_test.go @@ -0,0 +1,78 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestScriptingDefault(t *testing.T) { + builder := NewScript("doc['field'].value * 2") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `"doc['field'].value * 2"` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestScriptingInline(t *testing.T) { + builder := NewScriptInline("doc['field'].value * factor").Param("factor", 2.0) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"inline":"doc['field'].value * factor","params":{"factor":2}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestScriptingId(t *testing.T) { + builder := NewScriptId("script-with-id").Param("factor", 2.0) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"id":"script-with-id","params":{"factor":2}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestScriptingFile(t *testing.T) { + builder := NewScriptFile("script-file").Param("factor", 2.0).Lang("groovy") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"file":"script-file","lang":"groovy","params":{"factor":2}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/scroll.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/scroll.go new file mode 100644 index 000000000..1cab35c36 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/scroll.go @@ -0,0 +1,208 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// ScrollService manages a cursor through documents in Elasticsearch. +type ScrollService struct { + client *Client + indices []string + types []string + keepAlive string + query Query + size *int + pretty bool + scrollId string +} + +func NewScrollService(client *Client) *ScrollService { + builder := &ScrollService{ + client: client, + query: NewMatchAllQuery(), + } + return builder +} + +func (s *ScrollService) Index(indices ...string) *ScrollService { + if s.indices == nil { + s.indices = make([]string, 0) + } + s.indices = append(s.indices, indices...) + return s +} + +func (s *ScrollService) Type(types ...string) *ScrollService { + if s.types == nil { + s.types = make([]string, 0) + } + s.types = append(s.types, types...) + return s +} + +// Scroll is an alias for KeepAlive, the time to keep +// the cursor alive (e.g. "5m" for 5 minutes). +func (s *ScrollService) Scroll(keepAlive string) *ScrollService { + s.keepAlive = keepAlive + return s +} + +// KeepAlive sets the maximum time the cursor will be +// available before expiration (e.g. "5m" for 5 minutes). +func (s *ScrollService) KeepAlive(keepAlive string) *ScrollService { + s.keepAlive = keepAlive + return s +} + +func (s *ScrollService) Query(query Query) *ScrollService { + s.query = query + return s +} + +func (s *ScrollService) Pretty(pretty bool) *ScrollService { + s.pretty = pretty + return s +} + +func (s *ScrollService) Size(size int) *ScrollService { + s.size = &size + return s +} + +func (s *ScrollService) ScrollId(scrollId string) *ScrollService { + s.scrollId = scrollId + return s +} + +func (s *ScrollService) Do() (*SearchResult, error) { + if s.scrollId == "" { + return s.GetFirstPage() + } + return s.GetNextPage() +} + +func (s *ScrollService) GetFirstPage() (*SearchResult, error) { + // Build url + path := "/" + + // Indices part + indexPart := make([]string, 0) + for _, index := range s.indices { + index, err := uritemplates.Expand("{index}", map[string]string{ + "index": index, + }) + if err != nil { + return nil, err + } + indexPart = append(indexPart, index) + } + if len(indexPart) > 0 { + path += strings.Join(indexPart, ",") + } + + // Types + typesPart := make([]string, 0) + for _, typ := range s.types { + typ, err := uritemplates.Expand("{type}", map[string]string{ + "type": typ, + }) + if err != nil { + return nil, err + } + typesPart = append(typesPart, typ) + } + if len(typesPart) > 0 { + path += "/" + strings.Join(typesPart, ",") + } + + // Search + path += "/_search" + + // Parameters + params := make(url.Values) + // TODO: ES 2.1 deprecates search_type=scan. See https://www.elastic.co/guide/en/elasticsearch/reference/current/breaking_21_search_changes.html#_literal_search_type_scan_literal_deprecated. + params.Set("search_type", "scan") + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + if s.keepAlive != "" { + params.Set("scroll", s.keepAlive) + } else { + params.Set("scroll", defaultKeepAlive) + } + if s.size != nil && *s.size > 0 { + params.Set("size", fmt.Sprintf("%d", *s.size)) + } + + // Set body + body := make(map[string]interface{}) + if s.query != nil { + src, err := s.query.Source() + if err != nil { + return nil, err + } + body["query"] = src + } + + // Get response + res, err := s.client.PerformRequest("POST", path, params, body) + if err != nil { + return nil, err + } + + // Return result + searchResult := new(SearchResult) + if err := json.Unmarshal(res.Body, searchResult); err != nil { + return nil, err + } + + return searchResult, nil +} + +func (s *ScrollService) GetNextPage() (*SearchResult, error) { + if s.scrollId == "" { + return nil, EOS + } + + // Build url + path := "/_search/scroll" + + // Parameters + params := make(url.Values) + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + if s.keepAlive != "" { + params.Set("scroll", s.keepAlive) + } else { + params.Set("scroll", defaultKeepAlive) + } + + // Get response + res, err := s.client.PerformRequest("POST", path, params, s.scrollId) + if err != nil { + return nil, err + } + + // Return result + searchResult := new(SearchResult) + if err := json.Unmarshal(res.Body, searchResult); err != nil { + return nil, err + } + + // Determine last page + if searchResult == nil || searchResult.Hits == nil || len(searchResult.Hits.Hits) == 0 || searchResult.Hits.TotalHits == 0 { + return nil, EOS + } + + return searchResult, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/scroll_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/scroll_test.go new file mode 100644 index 000000000..4a5c48111 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/scroll_test.go @@ -0,0 +1,106 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + _ "net/http" + "testing" +) + +func TestScroll(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + res, err := client.Scroll(testIndexName).Size(1).Do() + if err != nil { + t.Fatal(err) + } + + if res == nil { + t.Errorf("expected results != nil; got nil") + } + if res.Hits == nil { + t.Errorf("expected results.Hits != nil; got nil") + } + if res.Hits.TotalHits != 3 { + t.Errorf("expected results.Hits.TotalHits = %d; got %d", 3, res.Hits.TotalHits) + } + if len(res.Hits.Hits) != 0 { + t.Errorf("expected len(results.Hits.Hits) = %d; got %d", 0, len(res.Hits.Hits)) + } + if res.ScrollId == "" { + t.Errorf("expected scrollId in results; got %q", res.ScrollId) + } + + pages := 0 + numDocs := 0 + scrollId := res.ScrollId + + for { + searchResult, err := client.Scroll(testIndexName). + Size(1). + ScrollId(scrollId). + Do() + if err == EOS { + break + } + if err != nil { + t.Fatal(err) + } + + pages += 1 + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + numDocs += 1 + } + + scrollId = searchResult.ScrollId + if scrollId == "" { + t.Errorf("expeced scrollId in results; got %q", scrollId) + } + } + + if pages <= 0 { + t.Errorf("expected to retrieve at least 1 page; got %d", pages) + } + + if numDocs != 3 { + t.Errorf("expected to retrieve %d hits; got %d", 3, numDocs) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search.go new file mode 100644 index 000000000..4811ee1ed --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search.go @@ -0,0 +1,429 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "reflect" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// Search for documents in Elasticsearch. +type SearchService struct { + client *Client + searchSource *SearchSource + source interface{} + pretty bool + searchType string + indices []string + routing string + preference string + types []string +} + +// NewSearchService creates a new service for searching in Elasticsearch. +func NewSearchService(client *Client) *SearchService { + builder := &SearchService{ + client: client, + searchSource: NewSearchSource(), + } + return builder +} + +// SearchSource sets the search source builder to use with this service. +func (s *SearchService) SearchSource(searchSource *SearchSource) *SearchService { + s.searchSource = searchSource + if s.searchSource == nil { + s.searchSource = NewSearchSource() + } + return s +} + +// Source allows the user to set the request body manually without using +// any of the structs and interfaces in Elastic. +func (s *SearchService) Source(source interface{}) *SearchService { + s.source = source + return s +} + +// Index sets the names of the indices to use for search. +func (s *SearchService) Index(indices ...string) *SearchService { + if s.indices == nil { + s.indices = make([]string, 0) + } + s.indices = append(s.indices, indices...) + return s +} + +// Type allows to restrict the search to a list of types. +func (s *SearchService) Type(types ...string) *SearchService { + if s.types == nil { + s.types = make([]string, 0) + } + s.types = append(s.types, types...) + return s +} + +// Pretty enables the caller to indent the JSON output. +func (s *SearchService) Pretty(pretty bool) *SearchService { + s.pretty = pretty + return s +} + +// Timeout sets the timeout to use, e.g. "1s" or "1000ms". +func (s *SearchService) Timeout(timeout string) *SearchService { + s.searchSource = s.searchSource.Timeout(timeout) + return s +} + +// TimeoutInMillis sets the timeout in milliseconds. +func (s *SearchService) TimeoutInMillis(timeoutInMillis int) *SearchService { + s.searchSource = s.searchSource.TimeoutInMillis(timeoutInMillis) + return s +} + +// SearchType sets the search operation type. Valid values are: +// "query_then_fetch", "query_and_fetch", "dfs_query_then_fetch", +// "dfs_query_and_fetch", "count", "scan". +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-search-type.html +// for details. +func (s *SearchService) SearchType(searchType string) *SearchService { + s.searchType = searchType + return s +} + +// Routing is a list of specific routing values to control the shards +// the search will be executed on. +func (s *SearchService) Routing(routings ...string) *SearchService { + s.routing = strings.Join(routings, ",") + return s +} + +// Preference sets the preference to execute the search. Defaults to +// randomize across shards. Can be set to "_local" to prefer local shards, +// "_primary" to execute on primary shards only, or a custom value which +// guarantees that the same order will be used across different requests. +func (s *SearchService) Preference(preference string) *SearchService { + s.preference = preference + return s +} + +// Query sets the query to perform, e.g. MatchAllQuery. +func (s *SearchService) Query(query Query) *SearchService { + s.searchSource = s.searchSource.Query(query) + return s +} + +// PostFilter will be executed after the query has been executed and +// only affects the search hits, not the aggregations. +// This filter is always executed as the last filtering mechanism. +func (s *SearchService) PostFilter(postFilter Query) *SearchService { + s.searchSource = s.searchSource.PostFilter(postFilter) + return s +} + +// FetchSource indicates whether the response should contain the stored +// _source for every hit. +func (s *SearchService) FetchSource(fetchSource bool) *SearchService { + s.searchSource = s.searchSource.FetchSource(fetchSource) + return s +} + +// FetchSourceContext indicates how the _source should be fetched. +func (s *SearchService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *SearchService { + s.searchSource = s.searchSource.FetchSourceContext(fetchSourceContext) + return s +} + +// Highlight adds highlighting to the search. +func (s *SearchService) Highlight(highlight *Highlight) *SearchService { + s.searchSource = s.searchSource.Highlight(highlight) + return s +} + +// GlobalSuggestText defines the global text to use with all suggesters. +// This avoids repetition. +func (s *SearchService) GlobalSuggestText(globalText string) *SearchService { + s.searchSource = s.searchSource.GlobalSuggestText(globalText) + return s +} + +// Suggester adds a suggester to the search. +func (s *SearchService) Suggester(suggester Suggester) *SearchService { + s.searchSource = s.searchSource.Suggester(suggester) + return s +} + +// Aggregation adds an aggreation to perform as part of the search. +func (s *SearchService) Aggregation(name string, aggregation Aggregation) *SearchService { + s.searchSource = s.searchSource.Aggregation(name, aggregation) + return s +} + +// MinScore sets the minimum score below which docs will be filtered out. +func (s *SearchService) MinScore(minScore float64) *SearchService { + s.searchSource = s.searchSource.MinScore(minScore) + return s +} + +// From index to start the search from. Defaults to 0. +func (s *SearchService) From(from int) *SearchService { + s.searchSource = s.searchSource.From(from) + return s +} + +// Size is the number of search hits to return. Defaults to 10. +func (s *SearchService) Size(size int) *SearchService { + s.searchSource = s.searchSource.Size(size) + return s +} + +// Explain indicates whether each search hit should be returned with +// an explanation of the hit (ranking). +func (s *SearchService) Explain(explain bool) *SearchService { + s.searchSource = s.searchSource.Explain(explain) + return s +} + +// Version indicates whether each search hit should be returned with +// a version associated to it. +func (s *SearchService) Version(version bool) *SearchService { + s.searchSource = s.searchSource.Version(version) + return s +} + +// Sort adds a sort order. +func (s *SearchService) Sort(field string, ascending bool) *SearchService { + s.searchSource = s.searchSource.Sort(field, ascending) + return s +} + +// SortWithInfo adds a sort order. +func (s *SearchService) SortWithInfo(info SortInfo) *SearchService { + s.searchSource = s.searchSource.SortWithInfo(info) + return s +} + +// SortBy adds a sort order. +func (s *SearchService) SortBy(sorter ...Sorter) *SearchService { + s.searchSource = s.searchSource.SortBy(sorter...) + return s +} + +// NoFields indicates that no fields should be loaded, resulting in only +// id and type to be returned per field. +func (s *SearchService) NoFields() *SearchService { + s.searchSource = s.searchSource.NoFields() + return s +} + +// Field adds a single field to load and return (note, must be stored) as +// part of the search request. If none are specified, the source of the +// document will be returned. +func (s *SearchService) Field(fieldName string) *SearchService { + s.searchSource = s.searchSource.Field(fieldName) + return s +} + +// Fields sets the fields to load and return as part of the search request. +// If none are specified, the source of the document will be returned. +func (s *SearchService) Fields(fields ...string) *SearchService { + s.searchSource = s.searchSource.Fields(fields...) + return s +} + +// Do executes the search and returns a SearchResult. +func (s *SearchService) Do() (*SearchResult, error) { + // Build url + path := "/" + + // Indices part + indexPart := make([]string, 0) + for _, index := range s.indices { + index, err := uritemplates.Expand("{index}", map[string]string{ + "index": index, + }) + if err != nil { + return nil, err + } + indexPart = append(indexPart, index) + } + path += strings.Join(indexPart, ",") + + // Types part + if len(s.types) > 0 { + typesPart := make([]string, 0) + for _, typ := range s.types { + typ, err := uritemplates.Expand("{type}", map[string]string{ + "type": typ, + }) + if err != nil { + return nil, err + } + typesPart = append(typesPart, typ) + } + path += "/" + path += strings.Join(typesPart, ",") + } + + // Search + path += "/_search" + + // Parameters + params := make(url.Values) + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + if s.searchType != "" { + params.Set("search_type", s.searchType) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + + // Perform request + var body interface{} + if s.source != nil { + body = s.source + } else { + src, err := s.searchSource.Source() + if err != nil { + return nil, err + } + body = src + } + res, err := s.client.PerformRequest("POST", path, params, body) + if err != nil { + return nil, err + } + + // Return search results + ret := new(SearchResult) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// SearchResult is the result of a search in Elasticsearch. +type SearchResult struct { + TookInMillis int64 `json:"took"` // search time in milliseconds + ScrollId string `json:"_scroll_id"` // only used with Scroll and Scan operations + Hits *SearchHits `json:"hits"` // the actual search hits + Suggest SearchSuggest `json:"suggest"` // results from suggesters + Aggregations Aggregations `json:"aggregations"` // results from aggregations + TimedOut bool `json:"timed_out"` // true if the search timed out + //Error string `json:"error,omitempty"` // used in MultiSearch only + // TODO double-check that MultiGet now returns details error information + Error *ErrorDetails `json:"error,omitempty"` // only used in MultiGet +} + +// TotalHits is a convenience function to return the number of hits for +// a search result. +func (r *SearchResult) TotalHits() int64 { + if r.Hits != nil { + return r.Hits.TotalHits + } + return 0 +} + +// Each is a utility function to iterate over all hits. It saves you from +// checking for nil values. Notice that Each will ignore errors in +// serializing JSON. +func (r *SearchResult) Each(typ reflect.Type) []interface{} { + if r.Hits == nil || r.Hits.Hits == nil || len(r.Hits.Hits) == 0 { + return nil + } + slice := make([]interface{}, 0) + for _, hit := range r.Hits.Hits { + v := reflect.New(typ).Elem() + if err := json.Unmarshal(*hit.Source, v.Addr().Interface()); err == nil { + slice = append(slice, v.Interface()) + } + } + return slice +} + +// SearchHits specifies the list of search hits. +type SearchHits struct { + TotalHits int64 `json:"total"` // total number of hits found + MaxScore *float64 `json:"max_score"` // maximum score of all hits + Hits []*SearchHit `json:"hits"` // the actual hits returned +} + +// SearchHit is a single hit. +type SearchHit struct { + Score *float64 `json:"_score"` // computed score + Index string `json:"_index"` // index name + Type string `json:"_type"` // type meta field + Id string `json:"_id"` // external or internal + Uid string `json:"_uid"` // uid meta field (see MapperService.java for all meta fields) + Timestamp int64 `json:"_timestamp"` // timestamp meta field + TTL int64 `json:"_ttl"` // ttl meta field + Routing string `json:"_routing"` // routing meta field + Parent string `json:"_parent"` // parent meta field + Version *int64 `json:"_version"` // version number, when Version is set to true in SearchService + Sort []interface{} `json:"sort"` // sort information + Highlight SearchHitHighlight `json:"highlight"` // highlighter information + Source *json.RawMessage `json:"_source"` // stored document source + Fields map[string]interface{} `json:"fields"` // returned fields + Explanation *SearchExplanation `json:"_explanation"` // explains how the score was computed + MatchedQueries []string `json:"matched_queries"` // matched queries + InnerHits map[string]*SearchHitInnerHits `json:"inner_hits"` // inner hits with ES >= 1.5.0 + + // Shard + // HighlightFields + // SortValues + // MatchedFilters +} + +type SearchHitInnerHits struct { + Hits *SearchHits `json:"hits"` +} + +// SearchExplanation explains how the score for a hit was computed. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-explain.html. +type SearchExplanation struct { + Value float64 `json:"value"` // e.g. 1.0 + Description string `json:"description"` // e.g. "boost" or "ConstantScore(*:*), product of:" + Details []SearchExplanation `json:"details,omitempty"` // recursive details +} + +// Suggest + +// SearchSuggest is a map of suggestions. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html. +type SearchSuggest map[string][]SearchSuggestion + +// SearchSuggestion is a single search suggestion. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html. +type SearchSuggestion struct { + Text string `json:"text"` + Offset int `json:"offset"` + Length int `json:"length"` + Options []SearchSuggestionOption `json:"options"` +} + +// SearchSuggestionOption is an option of a SearchSuggestion. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html. +type SearchSuggestionOption struct { + Text string `json:"text"` + Score float64 `json:"score"` + Freq int `json:"freq"` + Payload interface{} `json:"payload"` +} + +// Aggregations (see search_aggs.go) + +// Highlighting + +// SearchHitHighlight is the highlight information of a search hit. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-highlighting.html +// for a general discussion of highlighting. +type SearchHitHighlight map[string][]string diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs.go new file mode 100644 index 000000000..8e13a539a --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs.go @@ -0,0 +1,1270 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "bytes" + "encoding/json" +) + +// Aggregations can be seen as a unit-of-work that build +// analytic information over a set of documents. It is +// (in many senses) the follow-up of facets in Elasticsearch. +// For more details about aggregations, visit: +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations.html +type Aggregation interface { + // Source returns a JSON-serializable aggregation that is a fragment + // of the request sent to Elasticsearch. + Source() (interface{}, error) +} + +// Aggregations is a list of aggregations that are part of a search result. +type Aggregations map[string]*json.RawMessage + +// Min returns min aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-min-aggregation.html +func (a Aggregations) Min(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Max returns max aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-max-aggregation.html +func (a Aggregations) Max(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Sum returns sum aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-sum-aggregation.html +func (a Aggregations) Sum(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Avg returns average aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-avg-aggregation.html +func (a Aggregations) Avg(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// ValueCount returns value-count aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-valuecount-aggregation.html +func (a Aggregations) ValueCount(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Cardinality returns cardinality aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html +func (a Aggregations) Cardinality(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Stats returns stats aggregation results. +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-stats-aggregation.html +func (a Aggregations) Stats(name string) (*AggregationStatsMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationStatsMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// ExtendedStats returns extended stats aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-extendedstats-aggregation.html +func (a Aggregations) ExtendedStats(name string) (*AggregationExtendedStatsMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationExtendedStatsMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Percentiles returns percentiles results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-aggregation.html +func (a Aggregations) Percentiles(name string) (*AggregationPercentilesMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPercentilesMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// PercentileRanks returns percentile ranks results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-rank-aggregation.html +func (a Aggregations) PercentileRanks(name string) (*AggregationPercentilesMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPercentilesMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// TopHits returns top-hits aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-top-hits-aggregation.html +func (a Aggregations) TopHits(name string) (*AggregationTopHitsMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationTopHitsMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Global returns global results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-global-aggregation.html +func (a Aggregations) Global(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Filter returns filter results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filter-aggregation.html +func (a Aggregations) Filter(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Filters returns filters results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filters-aggregation.html +func (a Aggregations) Filters(name string) (*AggregationBucketFilters, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketFilters) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Missing returns missing results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-missing-aggregation.html +func (a Aggregations) Missing(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Nested returns nested results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-nested-aggregation.html +func (a Aggregations) Nested(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// ReverseNested returns reverse-nested results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-reverse-nested-aggregation.html +func (a Aggregations) ReverseNested(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Children returns children results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-children-aggregation.html +func (a Aggregations) Children(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Terms returns terms aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html +func (a Aggregations) Terms(name string) (*AggregationBucketKeyItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketKeyItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// SignificantTerms returns significant terms aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html +func (a Aggregations) SignificantTerms(name string) (*AggregationBucketSignificantTerms, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketSignificantTerms) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Sampler returns sampler aggregation results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-sampler-aggregation.html +func (a Aggregations) Sampler(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Range returns range aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-range-aggregation.html +func (a Aggregations) Range(name string) (*AggregationBucketRangeItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketRangeItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// KeyedRange returns keyed range aggregation results. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-range-aggregation.html. +func (a Aggregations) KeyedRange(name string) (*AggregationBucketKeyedRangeItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketKeyedRangeItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// DateRange returns date range aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-daterange-aggregation.html +func (a Aggregations) DateRange(name string) (*AggregationBucketRangeItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketRangeItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// IPv4Range returns IPv4 range aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-iprange-aggregation.html +func (a Aggregations) IPv4Range(name string) (*AggregationBucketRangeItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketRangeItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Histogram returns histogram aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-histogram-aggregation.html +func (a Aggregations) Histogram(name string) (*AggregationBucketHistogramItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketHistogramItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// DateHistogram returns date histogram aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html +func (a Aggregations) DateHistogram(name string) (*AggregationBucketHistogramItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketHistogramItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// GeoBounds returns geo-bounds aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-geobounds-aggregation.html +func (a Aggregations) GeoBounds(name string) (*AggregationGeoBoundsMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationGeoBoundsMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// GeoHash returns geo-hash aggregation results. +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geohashgrid-aggregation.html +func (a Aggregations) GeoHash(name string) (*AggregationBucketKeyItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketKeyItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// GeoDistance returns geo distance aggregation results. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geodistance-aggregation.html +func (a Aggregations) GeoDistance(name string) (*AggregationBucketRangeItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketRangeItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// AvgBucket returns average bucket pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-avg-bucket-aggregation.html +func (a Aggregations) AvgBucket(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// SumBucket returns sum bucket pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-sum-bucket-aggregation.html +func (a Aggregations) SumBucket(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// MaxBucket returns maximum bucket pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-max-bucket-aggregation.html +func (a Aggregations) MaxBucket(name string) (*AggregationPipelineBucketMetricValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineBucketMetricValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// MinBucket returns minimum bucket pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-min-bucket-aggregation.html +func (a Aggregations) MinBucket(name string) (*AggregationPipelineBucketMetricValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineBucketMetricValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// MovAvg returns moving average pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html +func (a Aggregations) MovAvg(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Derivative returns derivative pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-derivative-aggregation.html +func (a Aggregations) Derivative(name string) (*AggregationPipelineDerivative, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineDerivative) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// CumulativeSum returns a cumulative sum pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-cumulative-sum-aggregation.html +func (a Aggregations) CumulativeSum(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// BucketScript returns bucket script pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-bucket-script-aggregation.html +func (a Aggregations) BucketScript(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// SerialDiff returns serial differencing pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-serialdiff-aggregation.html +func (a Aggregations) SerialDiff(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// -- Single value metric -- + +// AggregationValueMetric is a single-value metric, returned e.g. by a +// Min or Max aggregation. +type AggregationValueMetric struct { + Aggregations + + Value *float64 //`json:"value"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationValueMetric structure. +func (a *AggregationValueMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["value"]; ok && v != nil { + json.Unmarshal(*v, &a.Value) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Stats metric -- + +// AggregationStatsMetric is a multi-value metric, returned by a Stats aggregation. +type AggregationStatsMetric struct { + Aggregations + + Count int64 // `json:"count"` + Min *float64 //`json:"min,omitempty"` + Max *float64 //`json:"max,omitempty"` + Avg *float64 //`json:"avg,omitempty"` + Sum *float64 //`json:"sum,omitempty"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationStatsMetric structure. +func (a *AggregationStatsMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["count"]; ok && v != nil { + json.Unmarshal(*v, &a.Count) + } + if v, ok := aggs["min"]; ok && v != nil { + json.Unmarshal(*v, &a.Min) + } + if v, ok := aggs["max"]; ok && v != nil { + json.Unmarshal(*v, &a.Max) + } + if v, ok := aggs["avg"]; ok && v != nil { + json.Unmarshal(*v, &a.Avg) + } + if v, ok := aggs["sum"]; ok && v != nil { + json.Unmarshal(*v, &a.Sum) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Extended stats metric -- + +// AggregationExtendedStatsMetric is a multi-value metric, returned by an ExtendedStats aggregation. +type AggregationExtendedStatsMetric struct { + Aggregations + + Count int64 // `json:"count"` + Min *float64 //`json:"min,omitempty"` + Max *float64 //`json:"max,omitempty"` + Avg *float64 //`json:"avg,omitempty"` + Sum *float64 //`json:"sum,omitempty"` + SumOfSquares *float64 //`json:"sum_of_squares,omitempty"` + Variance *float64 //`json:"variance,omitempty"` + StdDeviation *float64 //`json:"std_deviation,omitempty"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationExtendedStatsMetric structure. +func (a *AggregationExtendedStatsMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["count"]; ok && v != nil { + json.Unmarshal(*v, &a.Count) + } + if v, ok := aggs["min"]; ok && v != nil { + json.Unmarshal(*v, &a.Min) + } + if v, ok := aggs["max"]; ok && v != nil { + json.Unmarshal(*v, &a.Max) + } + if v, ok := aggs["avg"]; ok && v != nil { + json.Unmarshal(*v, &a.Avg) + } + if v, ok := aggs["sum"]; ok && v != nil { + json.Unmarshal(*v, &a.Sum) + } + if v, ok := aggs["sum_of_squares"]; ok && v != nil { + json.Unmarshal(*v, &a.SumOfSquares) + } + if v, ok := aggs["variance"]; ok && v != nil { + json.Unmarshal(*v, &a.Variance) + } + if v, ok := aggs["std_deviation"]; ok && v != nil { + json.Unmarshal(*v, &a.StdDeviation) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Percentiles metric -- + +// AggregationPercentilesMetric is a multi-value metric, returned by a Percentiles aggregation. +type AggregationPercentilesMetric struct { + Aggregations + + Values map[string]float64 // `json:"values"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationPercentilesMetric structure. +func (a *AggregationPercentilesMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["values"]; ok && v != nil { + json.Unmarshal(*v, &a.Values) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Top-hits metric -- + +// AggregationTopHitsMetric is a metric returned by a TopHits aggregation. +type AggregationTopHitsMetric struct { + Aggregations + + Hits *SearchHits //`json:"hits"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationTopHitsMetric structure. +func (a *AggregationTopHitsMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + a.Aggregations = aggs + a.Hits = new(SearchHits) + if v, ok := aggs["hits"]; ok && v != nil { + json.Unmarshal(*v, &a.Hits) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + return nil +} + +// -- Geo-bounds metric -- + +// AggregationGeoBoundsMetric is a metric as returned by a GeoBounds aggregation. +type AggregationGeoBoundsMetric struct { + Aggregations + + Bounds struct { + TopLeft struct { + Latitude float64 `json:"lat"` + Longitude float64 `json:"lon"` + } `json:"top_left"` + BottomRight struct { + Latitude float64 `json:"lat"` + Longitude float64 `json:"lon"` + } `json:"bottom_right"` + } `json:"bounds"` + + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationGeoBoundsMetric structure. +func (a *AggregationGeoBoundsMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["bounds"]; ok && v != nil { + json.Unmarshal(*v, &a.Bounds) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Single bucket -- + +// AggregationSingleBucket is a single bucket, returned e.g. via an aggregation of type Global. +type AggregationSingleBucket struct { + Aggregations + + DocCount int64 // `json:"doc_count"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationSingleBucket structure. +func (a *AggregationSingleBucket) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Bucket range items -- + +// AggregationBucketRangeItems is a bucket aggregation that is e.g. returned +// with a range aggregation. +type AggregationBucketRangeItems struct { + Aggregations + + DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"` + SumOfOtherDocCount int64 //`json:"sum_other_doc_count"` + Buckets []*AggregationBucketRangeItem //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItems structure. +func (a *AggregationBucketRangeItems) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCountErrorUpperBound) + } + if v, ok := aggs["sum_other_doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.SumOfOtherDocCount) + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// AggregationBucketKeyedRangeItems is a bucket aggregation that is e.g. returned +// with a keyed range aggregation. +type AggregationBucketKeyedRangeItems struct { + Aggregations + + DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"` + SumOfOtherDocCount int64 //`json:"sum_other_doc_count"` + Buckets map[string]*AggregationBucketRangeItem //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItems structure. +func (a *AggregationBucketKeyedRangeItems) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCountErrorUpperBound) + } + if v, ok := aggs["sum_other_doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.SumOfOtherDocCount) + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// AggregationBucketRangeItem is a single bucket of an AggregationBucketRangeItems structure. +type AggregationBucketRangeItem struct { + Aggregations + + Key string //`json:"key"` + DocCount int64 //`json:"doc_count"` + From *float64 //`json:"from"` + FromAsString string //`json:"from_as_string"` + To *float64 //`json:"to"` + ToAsString string //`json:"to_as_string"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItem structure. +func (a *AggregationBucketRangeItem) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["key"]; ok && v != nil { + json.Unmarshal(*v, &a.Key) + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + if v, ok := aggs["from"]; ok && v != nil { + json.Unmarshal(*v, &a.From) + } + if v, ok := aggs["from_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.FromAsString) + } + if v, ok := aggs["to"]; ok && v != nil { + json.Unmarshal(*v, &a.To) + } + if v, ok := aggs["to_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.ToAsString) + } + a.Aggregations = aggs + return nil +} + +// -- Bucket key items -- + +// AggregationBucketKeyItems is a bucket aggregation that is e.g. returned +// with a terms aggregation. +type AggregationBucketKeyItems struct { + Aggregations + + DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"` + SumOfOtherDocCount int64 //`json:"sum_other_doc_count"` + Buckets []*AggregationBucketKeyItem //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItems structure. +func (a *AggregationBucketKeyItems) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCountErrorUpperBound) + } + if v, ok := aggs["sum_other_doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.SumOfOtherDocCount) + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// AggregationBucketKeyItem is a single bucket of an AggregationBucketKeyItems structure. +type AggregationBucketKeyItem struct { + Aggregations + + Key interface{} //`json:"key"` + KeyNumber json.Number + DocCount int64 //`json:"doc_count"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItem structure. +func (a *AggregationBucketKeyItem) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + dec := json.NewDecoder(bytes.NewReader(data)) + dec.UseNumber() + if err := dec.Decode(&aggs); err != nil { + return err + } + if v, ok := aggs["key"]; ok && v != nil { + json.Unmarshal(*v, &a.Key) + json.Unmarshal(*v, &a.KeyNumber) + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + a.Aggregations = aggs + return nil +} + +// -- Bucket types for significant terms -- + +// AggregationBucketSignificantTerms is a bucket aggregation returned +// with a significant terms aggregation. +type AggregationBucketSignificantTerms struct { + Aggregations + + DocCount int64 //`json:"doc_count"` + Buckets []*AggregationBucketSignificantTerm //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerms structure. +func (a *AggregationBucketSignificantTerms) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// AggregationBucketSignificantTerm is a single bucket of an AggregationBucketSignificantTerms structure. +type AggregationBucketSignificantTerm struct { + Aggregations + + Key string //`json:"key"` + DocCount int64 //`json:"doc_count"` + BgCount int64 //`json:"bg_count"` + Score float64 //`json:"score"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerm structure. +func (a *AggregationBucketSignificantTerm) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["key"]; ok && v != nil { + json.Unmarshal(*v, &a.Key) + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + if v, ok := aggs["bg_count"]; ok && v != nil { + json.Unmarshal(*v, &a.BgCount) + } + if v, ok := aggs["score"]; ok && v != nil { + json.Unmarshal(*v, &a.Score) + } + a.Aggregations = aggs + return nil +} + +// -- Bucket filters -- + +// AggregationBucketFilters is a multi-bucket aggregation that is returned +// with a filters aggregation. +type AggregationBucketFilters struct { + Aggregations + + Buckets []*AggregationBucketKeyItem //`json:"buckets"` + NamedBuckets map[string]*AggregationBucketKeyItem //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketFilters structure. +func (a *AggregationBucketFilters) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + json.Unmarshal(*v, &a.NamedBuckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Bucket histogram items -- + +// AggregationBucketHistogramItems is a bucket aggregation that is returned +// with a date histogram aggregation. +type AggregationBucketHistogramItems struct { + Aggregations + + Buckets []*AggregationBucketHistogramItem //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItems structure. +func (a *AggregationBucketHistogramItems) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// AggregationBucketHistogramItem is a single bucket of an AggregationBucketHistogramItems structure. +type AggregationBucketHistogramItem struct { + Aggregations + + Key int64 //`json:"key"` + KeyAsString *string //`json:"key_as_string"` + DocCount int64 //`json:"doc_count"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItem structure. +func (a *AggregationBucketHistogramItem) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["key"]; ok && v != nil { + json.Unmarshal(*v, &a.Key) + } + if v, ok := aggs["key_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.KeyAsString) + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + a.Aggregations = aggs + return nil +} + +// -- Pipeline simple value -- + +// AggregationPipelineSimpleValue is a simple value, returned e.g. by a +// MovAvg aggregation. +type AggregationPipelineSimpleValue struct { + Aggregations + + Value *float64 // `json:"value"` + ValueAsString string // `json:"value_as_string"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineSimpleValue structure. +func (a *AggregationPipelineSimpleValue) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["value"]; ok && v != nil { + json.Unmarshal(*v, &a.Value) + } + if v, ok := aggs["value_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.ValueAsString) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Pipeline simple value -- + +// AggregationPipelineBucketMetricValue is a value returned e.g. by a +// MaxBucket aggregation. +type AggregationPipelineBucketMetricValue struct { + Aggregations + + Keys []interface{} // `json:"keys"` + Value *float64 // `json:"value"` + ValueAsString string // `json:"value_as_string"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineBucketMetricValue structure. +func (a *AggregationPipelineBucketMetricValue) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["keys"]; ok && v != nil { + json.Unmarshal(*v, &a.Keys) + } + if v, ok := aggs["value"]; ok && v != nil { + json.Unmarshal(*v, &a.Value) + } + if v, ok := aggs["value_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.ValueAsString) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Pipeline derivative -- + +// AggregationPipelineDerivative is the value returned by a +// Derivative aggregation. +type AggregationPipelineDerivative struct { + Aggregations + + Value *float64 // `json:"value"` + ValueAsString string // `json:"value_as_string"` + NormalizedValue *float64 // `json:"normalized_value"` + NormalizedValueAsString string // `json:"normalized_value_as_string"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineDerivative structure. +func (a *AggregationPipelineDerivative) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["value"]; ok && v != nil { + json.Unmarshal(*v, &a.Value) + } + if v, ok := aggs["value_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.ValueAsString) + } + if v, ok := aggs["normalized_value"]; ok && v != nil { + json.Unmarshal(*v, &a.NormalizedValue) + } + if v, ok := aggs["normalized_value_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.NormalizedValueAsString) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_children.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_children.go new file mode 100644 index 000000000..903e5461f --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_children.go @@ -0,0 +1,76 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ChildrenAggregation is a special single bucket aggregation that enables +// aggregating from buckets on parent document types to buckets on child documents. +// It is available from 1.4.0.Beta1 upwards. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-children-aggregation.html +type ChildrenAggregation struct { + typ string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewChildrenAggregation() *ChildrenAggregation { + return &ChildrenAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *ChildrenAggregation) Type(typ string) *ChildrenAggregation { + a.typ = typ + return a +} + +func (a *ChildrenAggregation) SubAggregation(name string, subAggregation Aggregation) *ChildrenAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *ChildrenAggregation) Meta(metaData map[string]interface{}) *ChildrenAggregation { + a.meta = metaData + return a +} + +func (a *ChildrenAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "to-answers" : { + // "children": { + // "type" : "answer" + // } + // } + // } + // } + // This method returns only the { "type" : ... } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["children"] = opts + opts["type"] = a.typ + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_children_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_children_test.go new file mode 100644 index 000000000..a305073f3 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_children_test.go @@ -0,0 +1,46 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestChildrenAggregation(t *testing.T) { + agg := NewChildrenAggregation().Type("answer") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"children":{"type":"answer"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestChildrenAggregationWithSubAggregation(t *testing.T) { + subAgg := NewTermsAggregation().Field("owner.display_name").Size(10) + agg := NewChildrenAggregation().Type("answer") + agg = agg.SubAggregation("top-names", subAgg) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"top-names":{"terms":{"field":"owner.display_name","size":10}}},"children":{"type":"answer"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_histogram.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_histogram.go new file mode 100644 index 000000000..231c51ef8 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_histogram.go @@ -0,0 +1,285 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// DateHistogramAggregation is a multi-bucket aggregation similar to the +// histogram except it can only be applied on date values. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html +type DateHistogramAggregation struct { + field string + script *Script + missing interface{} + subAggregations map[string]Aggregation + meta map[string]interface{} + + interval string + order string + orderAsc bool + minDocCount *int64 + extendedBoundsMin interface{} + extendedBoundsMax interface{} + timeZone string + format string + offset string +} + +// NewDateHistogramAggregation creates a new DateHistogramAggregation. +func NewDateHistogramAggregation() *DateHistogramAggregation { + return &DateHistogramAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +// Field on which the aggregation is processed. +func (a *DateHistogramAggregation) Field(field string) *DateHistogramAggregation { + a.field = field + return a +} + +func (a *DateHistogramAggregation) Script(script *Script) *DateHistogramAggregation { + a.script = script + return a +} + +// Missing configures the value to use when documents miss a value. +func (a *DateHistogramAggregation) Missing(missing interface{}) *DateHistogramAggregation { + a.missing = missing + return a +} + +func (a *DateHistogramAggregation) SubAggregation(name string, subAggregation Aggregation) *DateHistogramAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *DateHistogramAggregation) Meta(metaData map[string]interface{}) *DateHistogramAggregation { + a.meta = metaData + return a +} + +// Interval by which the aggregation gets processed. +// Allowed values are: "year", "quarter", "month", "week", "day", +// "hour", "minute". It also supports time settings like "1.5h" +// (up to "w" for weeks). +func (a *DateHistogramAggregation) Interval(interval string) *DateHistogramAggregation { + a.interval = interval + return a +} + +// Order specifies the sort order. Valid values for order are: +// "_key", "_count", a sub-aggregation name, or a sub-aggregation name +// with a metric. +func (a *DateHistogramAggregation) Order(order string, asc bool) *DateHistogramAggregation { + a.order = order + a.orderAsc = asc + return a +} + +func (a *DateHistogramAggregation) OrderByCount(asc bool) *DateHistogramAggregation { + // "order" : { "_count" : "asc" } + a.order = "_count" + a.orderAsc = asc + return a +} + +func (a *DateHistogramAggregation) OrderByCountAsc() *DateHistogramAggregation { + return a.OrderByCount(true) +} + +func (a *DateHistogramAggregation) OrderByCountDesc() *DateHistogramAggregation { + return a.OrderByCount(false) +} + +func (a *DateHistogramAggregation) OrderByKey(asc bool) *DateHistogramAggregation { + // "order" : { "_key" : "asc" } + a.order = "_key" + a.orderAsc = asc + return a +} + +func (a *DateHistogramAggregation) OrderByKeyAsc() *DateHistogramAggregation { + return a.OrderByKey(true) +} + +func (a *DateHistogramAggregation) OrderByKeyDesc() *DateHistogramAggregation { + return a.OrderByKey(false) +} + +// OrderByAggregation creates a bucket ordering strategy which sorts buckets +// based on a single-valued calc get. +func (a *DateHistogramAggregation) OrderByAggregation(aggName string, asc bool) *DateHistogramAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "avg_height" : "desc" } + // }, + // "aggs" : { + // "avg_height" : { "avg" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + a.orderAsc = asc + return a +} + +// OrderByAggregationAndMetric creates a bucket ordering strategy which +// sorts buckets based on a multi-valued calc get. +func (a *DateHistogramAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) *DateHistogramAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "height_stats.avg" : "desc" } + // }, + // "aggs" : { + // "height_stats" : { "stats" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + "." + metric + a.orderAsc = asc + return a +} + +// MinDocCount sets the minimum document count per bucket. +// Buckets with less documents than this min value will not be returned. +func (a *DateHistogramAggregation) MinDocCount(minDocCount int64) *DateHistogramAggregation { + a.minDocCount = &minDocCount + return a +} + +// TimeZone sets the timezone in which to translate dates before computing buckets. +func (a *DateHistogramAggregation) TimeZone(timeZone string) *DateHistogramAggregation { + a.timeZone = timeZone + return a +} + +// Format sets the format to use for dates. +func (a *DateHistogramAggregation) Format(format string) *DateHistogramAggregation { + a.format = format + return a +} + +// Offset sets the offset of time intervals in the histogram, e.g. "+6h". +func (a *DateHistogramAggregation) Offset(offset string) *DateHistogramAggregation { + a.offset = offset + return a +} + +// ExtendedBounds accepts int, int64, string, or time.Time values. +// In case the lower value in the histogram would be greater than min or the +// upper value would be less than max, empty buckets will be generated. +func (a *DateHistogramAggregation) ExtendedBounds(min, max interface{}) *DateHistogramAggregation { + a.extendedBoundsMin = min + a.extendedBoundsMax = max + return a +} + +// ExtendedBoundsMin accepts int, int64, string, or time.Time values. +func (a *DateHistogramAggregation) ExtendedBoundsMin(min interface{}) *DateHistogramAggregation { + a.extendedBoundsMin = min + return a +} + +// ExtendedBoundsMax accepts int, int64, string, or time.Time values. +func (a *DateHistogramAggregation) ExtendedBoundsMax(max interface{}) *DateHistogramAggregation { + a.extendedBoundsMax = max + return a +} + +func (a *DateHistogramAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "articles_over_time" : { + // "date_histogram" : { + // "field" : "date", + // "interval" : "month" + // } + // } + // } + // } + // + // This method returns only the { "date_histogram" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["date_histogram"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.missing != nil { + opts["missing"] = a.missing + } + + opts["interval"] = a.interval + if a.minDocCount != nil { + opts["min_doc_count"] = *a.minDocCount + } + if a.order != "" { + o := make(map[string]interface{}) + if a.orderAsc { + o[a.order] = "asc" + } else { + o[a.order] = "desc" + } + opts["order"] = o + } + if a.timeZone != "" { + opts["time_zone"] = a.timeZone + } + if a.offset != "" { + opts["offset"] = a.offset + } + if a.format != "" { + opts["format"] = a.format + } + if a.extendedBoundsMin != nil || a.extendedBoundsMax != nil { + bounds := make(map[string]interface{}) + if a.extendedBoundsMin != nil { + bounds["min"] = a.extendedBoundsMin + } + if a.extendedBoundsMax != nil { + bounds["max"] = a.extendedBoundsMax + } + opts["extended_bounds"] = bounds + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_histogram_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_histogram_test.go new file mode 100644 index 000000000..3c826ce9e --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_histogram_test.go @@ -0,0 +1,49 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestDateHistogramAggregation(t *testing.T) { + agg := NewDateHistogramAggregation(). + Field("date"). + Interval("month"). + Format("YYYY-MM"). + TimeZone("UTC"). + Offset("+6h") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"date_histogram":{"field":"date","format":"YYYY-MM","interval":"month","offset":"+6h","time_zone":"UTC"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestDateHistogramAggregationWithMissing(t *testing.T) { + agg := NewDateHistogramAggregation().Field("date").Interval("year").Missing("1900") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"date_histogram":{"field":"date","interval":"year","missing":"1900"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_range.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_range.go new file mode 100644 index 000000000..82de0696b --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_range.go @@ -0,0 +1,234 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "time" +) + +// DateRangeAggregation is a range aggregation that is dedicated for +// date values. The main difference between this aggregation and the +// normal range aggregation is that the from and to values can be expressed +// in Date Math expressions, and it is also possible to specify a +// date format by which the from and to response fields will be returned. +// Note that this aggregration includes the from value and excludes the to +// value for each range. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-daterange-aggregation.html +type DateRangeAggregation struct { + field string + script *Script + subAggregations map[string]Aggregation + meta map[string]interface{} + keyed *bool + unmapped *bool + format string + entries []DateRangeAggregationEntry +} + +type DateRangeAggregationEntry struct { + Key string + From interface{} + To interface{} +} + +func NewDateRangeAggregation() *DateRangeAggregation { + return &DateRangeAggregation{ + subAggregations: make(map[string]Aggregation), + entries: make([]DateRangeAggregationEntry, 0), + } +} + +func (a *DateRangeAggregation) Field(field string) *DateRangeAggregation { + a.field = field + return a +} + +func (a *DateRangeAggregation) Script(script *Script) *DateRangeAggregation { + a.script = script + return a +} + +func (a *DateRangeAggregation) SubAggregation(name string, subAggregation Aggregation) *DateRangeAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *DateRangeAggregation) Meta(metaData map[string]interface{}) *DateRangeAggregation { + a.meta = metaData + return a +} + +func (a *DateRangeAggregation) Keyed(keyed bool) *DateRangeAggregation { + a.keyed = &keyed + return a +} + +func (a *DateRangeAggregation) Unmapped(unmapped bool) *DateRangeAggregation { + a.unmapped = &unmapped + return a +} + +func (a *DateRangeAggregation) Format(format string) *DateRangeAggregation { + a.format = format + return a +} + +func (a *DateRangeAggregation) AddRange(from, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: to}) + return a +} + +func (a *DateRangeAggregation) AddRangeWithKey(key string, from, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: to}) + return a +} + +func (a *DateRangeAggregation) AddUnboundedTo(from interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: nil}) + return a +} + +func (a *DateRangeAggregation) AddUnboundedToWithKey(key string, from interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: nil}) + return a +} + +func (a *DateRangeAggregation) AddUnboundedFrom(to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: nil, To: to}) + return a +} + +func (a *DateRangeAggregation) AddUnboundedFromWithKey(key string, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: nil, To: to}) + return a +} + +func (a *DateRangeAggregation) Lt(to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: nil, To: to}) + return a +} + +func (a *DateRangeAggregation) LtWithKey(key string, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: nil, To: to}) + return a +} + +func (a *DateRangeAggregation) Between(from, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: to}) + return a +} + +func (a *DateRangeAggregation) BetweenWithKey(key string, from, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: to}) + return a +} + +func (a *DateRangeAggregation) Gt(from interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: nil}) + return a +} + +func (a *DateRangeAggregation) GtWithKey(key string, from interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: nil}) + return a +} + +func (a *DateRangeAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "range" : { + // "date_range": { + // "field": "date", + // "format": "MM-yyy", + // "ranges": [ + // { "to": "now-10M/M" }, + // { "from": "now-10M/M" } + // ] + // } + // } + // } + // } + // } + // + // This method returns only the { "date_range" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["date_range"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + + if a.keyed != nil { + opts["keyed"] = *a.keyed + } + if a.unmapped != nil { + opts["unmapped"] = *a.unmapped + } + if a.format != "" { + opts["format"] = a.format + } + + ranges := make([]interface{}, 0) + for _, ent := range a.entries { + r := make(map[string]interface{}) + if ent.Key != "" { + r["key"] = ent.Key + } + if ent.From != nil { + switch from := ent.From.(type) { + case int, int16, int32, int64, float32, float64: + r["from"] = from + case time.Time: + r["from"] = from.Format(time.RFC3339) + case string: + r["from"] = from + } + } + if ent.To != nil { + switch to := ent.To.(type) { + case int, int16, int32, int64, float32, float64: + r["to"] = to + case time.Time: + r["to"] = to.Format(time.RFC3339) + case string: + r["to"] = to + } + } + ranges = append(ranges, r) + } + opts["ranges"] = ranges + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_range_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_range_test.go new file mode 100644 index 000000000..42c525121 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_date_range_test.go @@ -0,0 +1,130 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestDateRangeAggregation(t *testing.T) { + agg := NewDateRangeAggregation().Field("created_at") + agg = agg.AddRange(nil, "2012-12-31") + agg = agg.AddRange("2013-01-01", "2013-12-31") + agg = agg.AddRange("2014-01-01", nil) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestDateRangeAggregationWithUnbounded(t *testing.T) { + agg := NewDateRangeAggregation().Field("created_at"). + AddUnboundedFrom("2012-12-31"). + AddRange("2013-01-01", "2013-12-31"). + AddUnboundedTo("2014-01-01") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestDateRangeAggregationWithLtAndCo(t *testing.T) { + agg := NewDateRangeAggregation().Field("created_at"). + Lt("2012-12-31"). + Between("2013-01-01", "2013-12-31"). + Gt("2014-01-01") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestDateRangeAggregationWithKeyedFlag(t *testing.T) { + agg := NewDateRangeAggregation().Field("created_at"). + Keyed(true). + Lt("2012-12-31"). + Between("2013-01-01", "2013-12-31"). + Gt("2014-01-01") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"date_range":{"field":"created_at","keyed":true,"ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestDateRangeAggregationWithKeys(t *testing.T) { + agg := NewDateRangeAggregation().Field("created_at"). + Keyed(true). + LtWithKey("pre-2012", "2012-12-31"). + BetweenWithKey("2013", "2013-01-01", "2013-12-31"). + GtWithKey("post-2013", "2014-01-01") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"date_range":{"field":"created_at","keyed":true,"ranges":[{"key":"pre-2012","to":"2012-12-31"},{"from":"2013-01-01","key":"2013","to":"2013-12-31"},{"from":"2014-01-01","key":"post-2013"}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestDateRangeAggregationWithSpecialNames(t *testing.T) { + agg := NewDateRangeAggregation().Field("created_at"). + AddRange("now-10M/M", "now+10M/M") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"date_range":{"field":"created_at","ranges":[{"from":"now-10M/M","to":"now+10M/M"}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filter.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filter.go new file mode 100644 index 000000000..101399882 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filter.go @@ -0,0 +1,77 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// FilterAggregation defines a single bucket of all the documents +// in the current document set context that match a specified filter. +// Often this will be used to narrow down the current aggregation context +// to a specific set of documents. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filter-aggregation.html +type FilterAggregation struct { + filter Query + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewFilterAggregation() *FilterAggregation { + return &FilterAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *FilterAggregation) SubAggregation(name string, subAggregation Aggregation) *FilterAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *FilterAggregation) Meta(metaData map[string]interface{}) *FilterAggregation { + a.meta = metaData + return a +} + +func (a *FilterAggregation) Filter(filter Query) *FilterAggregation { + a.filter = filter + return a +} + +func (a *FilterAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "in_stock_products" : { + // "filter" : { "range" : { "stock" : { "gt" : 0 } } } + // } + // } + // } + // This method returns only the { "filter" : {} } part. + + src, err := a.filter.Source() + if err != nil { + return nil, err + } + source := make(map[string]interface{}) + source["filter"] = src + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filter_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filter_test.go new file mode 100644 index 000000000..5c6262a26 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filter_test.go @@ -0,0 +1,66 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestFilterAggregation(t *testing.T) { + filter := NewRangeQuery("stock").Gt(0) + agg := NewFilterAggregation().Filter(filter) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"filter":{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFilterAggregationWithSubAggregation(t *testing.T) { + avgPriceAgg := NewAvgAggregation().Field("price") + filter := NewRangeQuery("stock").Gt(0) + agg := NewFilterAggregation().Filter(filter). + SubAggregation("avg_price", avgPriceAgg) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"avg_price":{"avg":{"field":"price"}}},"filter":{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFilterAggregationWithMeta(t *testing.T) { + filter := NewRangeQuery("stock").Gt(0) + agg := NewFilterAggregation().Filter(filter).Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"filter":{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},"meta":{"name":"Oliver"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filters.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filters.go new file mode 100644 index 000000000..6dda39c61 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filters.go @@ -0,0 +1,96 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// FiltersAggregation defines a multi bucket aggregations where each bucket +// is associated with a filter. Each bucket will collect all documents that +// match its associated filter. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filters-aggregation.html +type FiltersAggregation struct { + filters []Query + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewFiltersAggregation() *FiltersAggregation { + return &FiltersAggregation{ + filters: make([]Query, 0), + subAggregations: make(map[string]Aggregation), + } +} + +func (a *FiltersAggregation) Filter(filter Query) *FiltersAggregation { + a.filters = append(a.filters, filter) + return a +} + +func (a *FiltersAggregation) Filters(filters ...Query) *FiltersAggregation { + if len(filters) > 0 { + a.filters = append(a.filters, filters...) + } + return a +} + +func (a *FiltersAggregation) SubAggregation(name string, subAggregation Aggregation) *FiltersAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *FiltersAggregation) Meta(metaData map[string]interface{}) *FiltersAggregation { + a.meta = metaData + return a +} + +func (a *FiltersAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "messages" : { + // "filters" : { + // "filters" : { + // "errors" : { "term" : { "body" : "error" }}, + // "warnings" : { "term" : { "body" : "warning" }} + // } + // } + // } + // } + // } + // This method returns only the (outer) { "filters" : {} } part. + + source := make(map[string]interface{}) + filters := make(map[string]interface{}) + source["filters"] = filters + + arr := make([]interface{}, len(a.filters)) + for i, filter := range a.filters { + src, err := filter.Source() + if err != nil { + return nil, err + } + arr[i] = src + } + filters["filters"] = arr + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filters_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filters_test.go new file mode 100644 index 000000000..4977d5162 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_filters_test.go @@ -0,0 +1,68 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestFiltersAggregation(t *testing.T) { + f1 := NewRangeQuery("stock").Gt(0) + f2 := NewTermQuery("symbol", "GOOG") + agg := NewFiltersAggregation().Filters(f1, f2) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"filters":{"filters":[{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},{"term":{"symbol":"GOOG"}}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFiltersAggregationWithSubAggregation(t *testing.T) { + avgPriceAgg := NewAvgAggregation().Field("price") + f1 := NewRangeQuery("stock").Gt(0) + f2 := NewTermQuery("symbol", "GOOG") + agg := NewFiltersAggregation().Filters(f1, f2).SubAggregation("avg_price", avgPriceAgg) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"avg_price":{"avg":{"field":"price"}}},"filters":{"filters":[{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},{"term":{"symbol":"GOOG"}}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFiltersAggregationWithMetaData(t *testing.T) { + f1 := NewRangeQuery("stock").Gt(0) + f2 := NewTermQuery("symbol", "GOOG") + agg := NewFiltersAggregation().Filters(f1, f2).Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"filters":{"filters":[{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},{"term":{"symbol":"GOOG"}}]},"meta":{"name":"Oliver"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_geo_distance.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_geo_distance.go new file mode 100644 index 000000000..3a1372221 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_geo_distance.go @@ -0,0 +1,194 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// GeoDistanceAggregation is a multi-bucket aggregation that works on geo_point fields +// and conceptually works very similar to the range aggregation. +// The user can define a point of origin and a set of distance range buckets. +// The aggregation evaluate the distance of each document value from +// the origin point and determines the buckets it belongs to based on +// the ranges (a document belongs to a bucket if the distance between the +// document and the origin falls within the distance range of the bucket). +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-aggregations-bucket-geodistance-aggregation.html +type GeoDistanceAggregation struct { + field string + unit string + distanceType string + point string + ranges []geoDistAggRange + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +type geoDistAggRange struct { + Key string + From interface{} + To interface{} +} + +func NewGeoDistanceAggregation() *GeoDistanceAggregation { + return &GeoDistanceAggregation{ + subAggregations: make(map[string]Aggregation), + ranges: make([]geoDistAggRange, 0), + } +} + +func (a *GeoDistanceAggregation) Field(field string) *GeoDistanceAggregation { + a.field = field + return a +} + +func (a *GeoDistanceAggregation) Unit(unit string) *GeoDistanceAggregation { + a.unit = unit + return a +} + +func (a *GeoDistanceAggregation) DistanceType(distanceType string) *GeoDistanceAggregation { + a.distanceType = distanceType + return a +} + +func (a *GeoDistanceAggregation) Point(latLon string) *GeoDistanceAggregation { + a.point = latLon + return a +} + +func (a *GeoDistanceAggregation) SubAggregation(name string, subAggregation Aggregation) *GeoDistanceAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *GeoDistanceAggregation) Meta(metaData map[string]interface{}) *GeoDistanceAggregation { + a.meta = metaData + return a +} +func (a *GeoDistanceAggregation) AddRange(from, to interface{}) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{From: from, To: to}) + return a +} + +func (a *GeoDistanceAggregation) AddRangeWithKey(key string, from, to interface{}) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: to}) + return a +} + +func (a *GeoDistanceAggregation) AddUnboundedTo(from float64) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{From: from, To: nil}) + return a +} + +func (a *GeoDistanceAggregation) AddUnboundedToWithKey(key string, from float64) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: nil}) + return a +} + +func (a *GeoDistanceAggregation) AddUnboundedFrom(to float64) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{From: nil, To: to}) + return a +} + +func (a *GeoDistanceAggregation) AddUnboundedFromWithKey(key string, to float64) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: nil, To: to}) + return a +} + +func (a *GeoDistanceAggregation) Between(from, to interface{}) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{From: from, To: to}) + return a +} + +func (a *GeoDistanceAggregation) BetweenWithKey(key string, from, to interface{}) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: to}) + return a +} + +func (a *GeoDistanceAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "rings_around_amsterdam" : { + // "geo_distance" : { + // "field" : "location", + // "origin" : "52.3760, 4.894", + // "ranges" : [ + // { "to" : 100 }, + // { "from" : 100, "to" : 300 }, + // { "from" : 300 } + // ] + // } + // } + // } + // } + // + // This method returns only the { "range" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["geo_distance"] = opts + + if a.field != "" { + opts["field"] = a.field + } + if a.unit != "" { + opts["unit"] = a.unit + } + if a.distanceType != "" { + opts["distance_type"] = a.distanceType + } + if a.point != "" { + opts["origin"] = a.point + } + + ranges := make([]interface{}, 0) + for _, ent := range a.ranges { + r := make(map[string]interface{}) + if ent.Key != "" { + r["key"] = ent.Key + } + if ent.From != nil { + switch from := ent.From.(type) { + case int, int16, int32, int64, float32, float64: + r["from"] = from + case *int, *int16, *int32, *int64, *float32, *float64: + r["from"] = from + case string: + r["from"] = from + } + } + if ent.To != nil { + switch to := ent.To.(type) { + case int, int16, int32, int64, float32, float64: + r["to"] = to + case *int, *int16, *int32, *int64, *float32, *float64: + r["to"] = to + case string: + r["to"] = to + } + } + ranges = append(ranges, r) + } + opts["ranges"] = ranges + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_geo_distance_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_geo_distance_test.go new file mode 100644 index 000000000..4cb0cd9f8 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_geo_distance_test.go @@ -0,0 +1,71 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestGeoDistanceAggregation(t *testing.T) { + agg := NewGeoDistanceAggregation().Field("location").Point("52.3760, 4.894") + agg = agg.AddRange(nil, 100) + agg = agg.AddRange(100, 300) + agg = agg.AddRange(300, nil) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_distance":{"field":"location","origin":"52.3760, 4.894","ranges":[{"to":100},{"from":100,"to":300},{"from":300}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoDistanceAggregationWithUnbounded(t *testing.T) { + agg := NewGeoDistanceAggregation().Field("location").Point("52.3760, 4.894") + agg = agg.AddUnboundedFrom(100) + agg = agg.AddRange(100, 300) + agg = agg.AddUnboundedTo(300) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_distance":{"field":"location","origin":"52.3760, 4.894","ranges":[{"to":100},{"from":100,"to":300},{"from":300}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoDistanceAggregationWithMetaData(t *testing.T) { + agg := NewGeoDistanceAggregation().Field("location").Point("52.3760, 4.894") + agg = agg.AddRange(nil, 100) + agg = agg.AddRange(100, 300) + agg = agg.AddRange(300, nil) + agg = agg.Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_distance":{"field":"location","origin":"52.3760, 4.894","ranges":[{"to":100},{"from":100,"to":300},{"from":300}]},"meta":{"name":"Oliver"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_global.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_global.go new file mode 100644 index 000000000..49e24d60f --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_global.go @@ -0,0 +1,71 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// GlobalAggregation defines a single bucket of all the documents within +// the search execution context. This context is defined by the indices +// and the document types you’re searching on, but is not influenced +// by the search query itself. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-global-aggregation.html +type GlobalAggregation struct { + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewGlobalAggregation() *GlobalAggregation { + return &GlobalAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *GlobalAggregation) SubAggregation(name string, subAggregation Aggregation) *GlobalAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *GlobalAggregation) Meta(metaData map[string]interface{}) *GlobalAggregation { + a.meta = metaData + return a +} + +func (a *GlobalAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "all_products" : { + // "global" : {}, + // "aggs" : { + // "avg_price" : { "avg" : { "field" : "price" } } + // } + // } + // } + // } + // This method returns only the { "global" : {} } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["global"] = opts + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_global_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_global_test.go new file mode 100644 index 000000000..8b55010c7 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_global_test.go @@ -0,0 +1,44 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestGlobalAggregation(t *testing.T) { + agg := NewGlobalAggregation() + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"global":{}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGlobalAggregationWithMetaData(t *testing.T) { + agg := NewGlobalAggregation().Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"global":{},"meta":{"name":"Oliver"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_histogram.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_histogram.go new file mode 100644 index 000000000..7821adbc0 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_histogram.go @@ -0,0 +1,253 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// HistogramAggregation is a multi-bucket values source based aggregation +// that can be applied on numeric values extracted from the documents. +// It dynamically builds fixed size (a.k.a. interval) buckets over the +// values. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-histogram-aggregation.html +type HistogramAggregation struct { + field string + script *Script + missing interface{} + subAggregations map[string]Aggregation + meta map[string]interface{} + + interval int64 + order string + orderAsc bool + minDocCount *int64 + extendedBoundsMin *int64 + extendedBoundsMax *int64 + offset *int64 +} + +func NewHistogramAggregation() *HistogramAggregation { + return &HistogramAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *HistogramAggregation) Field(field string) *HistogramAggregation { + a.field = field + return a +} + +func (a *HistogramAggregation) Script(script *Script) *HistogramAggregation { + a.script = script + return a +} + +// Missing configures the value to use when documents miss a value. +func (a *HistogramAggregation) Missing(missing interface{}) *HistogramAggregation { + a.missing = missing + return a +} + +func (a *HistogramAggregation) SubAggregation(name string, subAggregation Aggregation) *HistogramAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *HistogramAggregation) Meta(metaData map[string]interface{}) *HistogramAggregation { + a.meta = metaData + return a +} + +func (a *HistogramAggregation) Interval(interval int64) *HistogramAggregation { + a.interval = interval + return a +} + +// Order specifies the sort order. Valid values for order are: +// "_key", "_count", a sub-aggregation name, or a sub-aggregation name +// with a metric. +func (a *HistogramAggregation) Order(order string, asc bool) *HistogramAggregation { + a.order = order + a.orderAsc = asc + return a +} + +func (a *HistogramAggregation) OrderByCount(asc bool) *HistogramAggregation { + // "order" : { "_count" : "asc" } + a.order = "_count" + a.orderAsc = asc + return a +} + +func (a *HistogramAggregation) OrderByCountAsc() *HistogramAggregation { + return a.OrderByCount(true) +} + +func (a *HistogramAggregation) OrderByCountDesc() *HistogramAggregation { + return a.OrderByCount(false) +} + +func (a *HistogramAggregation) OrderByKey(asc bool) *HistogramAggregation { + // "order" : { "_key" : "asc" } + a.order = "_key" + a.orderAsc = asc + return a +} + +func (a *HistogramAggregation) OrderByKeyAsc() *HistogramAggregation { + return a.OrderByKey(true) +} + +func (a *HistogramAggregation) OrderByKeyDesc() *HistogramAggregation { + return a.OrderByKey(false) +} + +// OrderByAggregation creates a bucket ordering strategy which sorts buckets +// based on a single-valued calc get. +func (a *HistogramAggregation) OrderByAggregation(aggName string, asc bool) *HistogramAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "avg_height" : "desc" } + // }, + // "aggs" : { + // "avg_height" : { "avg" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + a.orderAsc = asc + return a +} + +// OrderByAggregationAndMetric creates a bucket ordering strategy which +// sorts buckets based on a multi-valued calc get. +func (a *HistogramAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) *HistogramAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "height_stats.avg" : "desc" } + // }, + // "aggs" : { + // "height_stats" : { "stats" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + "." + metric + a.orderAsc = asc + return a +} + +func (a *HistogramAggregation) MinDocCount(minDocCount int64) *HistogramAggregation { + a.minDocCount = &minDocCount + return a +} + +func (a *HistogramAggregation) ExtendedBounds(min, max int64) *HistogramAggregation { + a.extendedBoundsMin = &min + a.extendedBoundsMax = &max + return a +} + +func (a *HistogramAggregation) ExtendedBoundsMin(min int64) *HistogramAggregation { + a.extendedBoundsMin = &min + return a +} + +func (a *HistogramAggregation) ExtendedBoundsMax(max int64) *HistogramAggregation { + a.extendedBoundsMax = &max + return a +} + +func (a *HistogramAggregation) Offset(offset int64) *HistogramAggregation { + a.offset = &offset + return a +} + +func (a *HistogramAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "prices" : { + // "histogram" : { + // "field" : "price", + // "interval" : 50 + // } + // } + // } + // } + // + // This method returns only the { "histogram" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["histogram"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.missing != nil { + opts["missing"] = a.missing + } + + opts["interval"] = a.interval + if a.order != "" { + o := make(map[string]interface{}) + if a.orderAsc { + o[a.order] = "asc" + } else { + o[a.order] = "desc" + } + opts["order"] = o + } + if a.offset != nil { + opts["offset"] = *a.offset + } + if a.minDocCount != nil { + opts["min_doc_count"] = *a.minDocCount + } + if a.extendedBoundsMin != nil || a.extendedBoundsMax != nil { + bounds := make(map[string]interface{}) + if a.extendedBoundsMin != nil { + bounds["min"] = a.extendedBoundsMin + } + if a.extendedBoundsMax != nil { + bounds["max"] = a.extendedBoundsMax + } + opts["extended_bounds"] = bounds + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_histogram_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_histogram_test.go new file mode 100644 index 000000000..6a5d5fb92 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_histogram_test.go @@ -0,0 +1,61 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestHistogramAggregation(t *testing.T) { + agg := NewHistogramAggregation().Field("price").Interval(50) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"histogram":{"field":"price","interval":50}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestHistogramAggregationWithMetaData(t *testing.T) { + agg := NewHistogramAggregation().Field("price").Offset(10).Interval(50).Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"histogram":{"field":"price","interval":50,"offset":10},"meta":{"name":"Oliver"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestHistogramAggregationWithMissing(t *testing.T) { + agg := NewHistogramAggregation().Field("price").Interval(50).Missing("n/a") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"histogram":{"field":"price","interval":50,"missing":"n/a"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_missing.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_missing.go new file mode 100644 index 000000000..ca610c953 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_missing.go @@ -0,0 +1,81 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MissingAggregation is a field data based single bucket aggregation, +// that creates a bucket of all documents in the current document set context +// that are missing a field value (effectively, missing a field or having +// the configured NULL value set). This aggregator will often be used in +// conjunction with other field data bucket aggregators (such as ranges) +// to return information for all the documents that could not be placed +// in any of the other buckets due to missing field data values. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-missing-aggregation.html +type MissingAggregation struct { + field string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewMissingAggregation() *MissingAggregation { + return &MissingAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *MissingAggregation) Field(field string) *MissingAggregation { + a.field = field + return a +} + +func (a *MissingAggregation) SubAggregation(name string, subAggregation Aggregation) *MissingAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MissingAggregation) Meta(metaData map[string]interface{}) *MissingAggregation { + a.meta = metaData + return a +} + +func (a *MissingAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "products_without_a_price" : { + // "missing" : { "field" : "price" } + // } + // } + // } + // This method returns only the { "missing" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["missing"] = opts + + if a.field != "" { + opts["field"] = a.field + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_missing_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_missing_test.go new file mode 100644 index 000000000..b52a96511 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_missing_test.go @@ -0,0 +1,44 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMissingAggregation(t *testing.T) { + agg := NewMissingAggregation().Field("price") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"missing":{"field":"price"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMissingAggregationWithMetaData(t *testing.T) { + agg := NewMissingAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"missing":{"field":"price"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_nested.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_nested.go new file mode 100644 index 000000000..f65da8048 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_nested.go @@ -0,0 +1,82 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// NestedAggregation is a special single bucket aggregation that enables +// aggregating nested documents. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-aggregations-bucket-nested-aggregation.html +type NestedAggregation struct { + path string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewNestedAggregation() *NestedAggregation { + return &NestedAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *NestedAggregation) SubAggregation(name string, subAggregation Aggregation) *NestedAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *NestedAggregation) Meta(metaData map[string]interface{}) *NestedAggregation { + a.meta = metaData + return a +} + +func (a *NestedAggregation) Path(path string) *NestedAggregation { + a.path = path + return a +} + +func (a *NestedAggregation) Source() (interface{}, error) { + // Example: + // { + // "query" : { + // "match" : { "name" : "led tv" } + // } + // "aggs" : { + // "resellers" : { + // "nested" : { + // "path" : "resellers" + // }, + // "aggs" : { + // "min_price" : { "min" : { "field" : "resellers.price" } } + // } + // } + // } + // } + // This method returns only the { "nested" : {} } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["nested"] = opts + + opts["path"] = a.path + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_nested_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_nested_test.go new file mode 100644 index 000000000..c55612f07 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_nested_test.go @@ -0,0 +1,62 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestNestedAggregation(t *testing.T) { + agg := NewNestedAggregation().Path("resellers") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"nested":{"path":"resellers"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestNestedAggregationWithSubAggregation(t *testing.T) { + minPriceAgg := NewMinAggregation().Field("resellers.price") + agg := NewNestedAggregation().Path("resellers").SubAggregation("min_price", minPriceAgg) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"min_price":{"min":{"field":"resellers.price"}}},"nested":{"path":"resellers"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestNestedAggregationWithMetaData(t *testing.T) { + agg := NewNestedAggregation().Path("resellers").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"nested":{"path":"resellers"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_range.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_range.go new file mode 100644 index 000000000..bc017c60f --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_range.go @@ -0,0 +1,232 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "time" +) + +// RangeAggregation is a multi-bucket value source based aggregation that +// enables the user to define a set of ranges - each representing a bucket. +// During the aggregation process, the values extracted from each document +// will be checked against each bucket range and "bucket" the +// relevant/matching document. Note that this aggregration includes the +// from value and excludes the to value for each range. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-range-aggregation.html +type RangeAggregation struct { + field string + script *Script + missing interface{} + subAggregations map[string]Aggregation + meta map[string]interface{} + keyed *bool + unmapped *bool + entries []rangeAggregationEntry +} + +type rangeAggregationEntry struct { + Key string + From interface{} + To interface{} +} + +func NewRangeAggregation() *RangeAggregation { + return &RangeAggregation{ + subAggregations: make(map[string]Aggregation), + entries: make([]rangeAggregationEntry, 0), + } +} + +func (a *RangeAggregation) Field(field string) *RangeAggregation { + a.field = field + return a +} + +func (a *RangeAggregation) Script(script *Script) *RangeAggregation { + a.script = script + return a +} + +// Missing configures the value to use when documents miss a value. +func (a *RangeAggregation) Missing(missing interface{}) *RangeAggregation { + a.missing = missing + return a +} + +func (a *RangeAggregation) SubAggregation(name string, subAggregation Aggregation) *RangeAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *RangeAggregation) Meta(metaData map[string]interface{}) *RangeAggregation { + a.meta = metaData + return a +} + +func (a *RangeAggregation) Keyed(keyed bool) *RangeAggregation { + a.keyed = &keyed + return a +} + +func (a *RangeAggregation) Unmapped(unmapped bool) *RangeAggregation { + a.unmapped = &unmapped + return a +} + +func (a *RangeAggregation) AddRange(from, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: from, To: to}) + return a +} + +func (a *RangeAggregation) AddRangeWithKey(key string, from, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: to}) + return a +} + +func (a *RangeAggregation) AddUnboundedTo(from interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: from, To: nil}) + return a +} + +func (a *RangeAggregation) AddUnboundedToWithKey(key string, from interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: nil}) + return a +} + +func (a *RangeAggregation) AddUnboundedFrom(to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: nil, To: to}) + return a +} + +func (a *RangeAggregation) AddUnboundedFromWithKey(key string, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: nil, To: to}) + return a +} + +func (a *RangeAggregation) Lt(to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: nil, To: to}) + return a +} + +func (a *RangeAggregation) LtWithKey(key string, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: nil, To: to}) + return a +} + +func (a *RangeAggregation) Between(from, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: from, To: to}) + return a +} + +func (a *RangeAggregation) BetweenWithKey(key string, from, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: to}) + return a +} + +func (a *RangeAggregation) Gt(from interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: from, To: nil}) + return a +} + +func (a *RangeAggregation) GtWithKey(key string, from interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: nil}) + return a +} + +func (a *RangeAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "price_ranges" : { + // "range" : { + // "field" : "price", + // "ranges" : [ + // { "to" : 50 }, + // { "from" : 50, "to" : 100 }, + // { "from" : 100 } + // ] + // } + // } + // } + // } + // + // This method returns only the { "range" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["range"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.missing != nil { + opts["missing"] = a.missing + } + + if a.keyed != nil { + opts["keyed"] = *a.keyed + } + if a.unmapped != nil { + opts["unmapped"] = *a.unmapped + } + + ranges := make([]interface{}, 0) + for _, ent := range a.entries { + r := make(map[string]interface{}) + if ent.Key != "" { + r["key"] = ent.Key + } + if ent.From != nil { + switch from := ent.From.(type) { + case int, int16, int32, int64, float32, float64: + r["from"] = from + case time.Time: + r["from"] = from.Format(time.RFC3339) + case string: + r["from"] = from + } + } + if ent.To != nil { + switch to := ent.To.(type) { + case int, int16, int32, int64, float32, float64: + r["to"] = to + case time.Time: + r["to"] = to.Format(time.RFC3339) + case string: + r["to"] = to + } + } + ranges = append(ranges, r) + } + opts["ranges"] = ranges + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_range_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_range_test.go new file mode 100644 index 000000000..f0fd5f5fd --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_range_test.go @@ -0,0 +1,156 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestRangeAggregation(t *testing.T) { + agg := NewRangeAggregation().Field("price") + agg = agg.AddRange(nil, 50) + agg = agg.AddRange(50, 100) + agg = agg.AddRange(100, nil) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"range":{"field":"price","ranges":[{"to":50},{"from":50,"to":100},{"from":100}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestRangeAggregationWithUnbounded(t *testing.T) { + agg := NewRangeAggregation().Field("field_name"). + AddUnboundedFrom(50). + AddRange(20, 70). + AddRange(70, 120). + AddUnboundedTo(150) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"range":{"field":"field_name","ranges":[{"to":50},{"from":20,"to":70},{"from":70,"to":120},{"from":150}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestRangeAggregationWithLtAndCo(t *testing.T) { + agg := NewRangeAggregation().Field("field_name"). + Lt(50). + Between(20, 70). + Between(70, 120). + Gt(150) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"range":{"field":"field_name","ranges":[{"to":50},{"from":20,"to":70},{"from":70,"to":120},{"from":150}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestRangeAggregationWithKeyedFlag(t *testing.T) { + agg := NewRangeAggregation().Field("field_name"). + Keyed(true). + Lt(50). + Between(20, 70). + Between(70, 120). + Gt(150) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"range":{"field":"field_name","keyed":true,"ranges":[{"to":50},{"from":20,"to":70},{"from":70,"to":120},{"from":150}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestRangeAggregationWithKeys(t *testing.T) { + agg := NewRangeAggregation().Field("field_name"). + Keyed(true). + LtWithKey("cheap", 50). + BetweenWithKey("affordable", 20, 70). + BetweenWithKey("average", 70, 120). + GtWithKey("expensive", 150) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"range":{"field":"field_name","keyed":true,"ranges":[{"key":"cheap","to":50},{"from":20,"key":"affordable","to":70},{"from":70,"key":"average","to":120},{"from":150,"key":"expensive"}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestRangeAggregationWithMetaData(t *testing.T) { + agg := NewRangeAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"}) + agg = agg.AddRange(nil, 50) + agg = agg.AddRange(50, 100) + agg = agg.AddRange(100, nil) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"range":{"field":"price","ranges":[{"to":50},{"from":50,"to":100},{"from":100}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestRangeAggregationWithMissing(t *testing.T) { + agg := NewRangeAggregation().Field("price").Missing(0) + agg = agg.AddRange(nil, 50) + agg = agg.AddRange(50, 100) + agg = agg.AddRange(100, nil) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"range":{"field":"price","missing":0,"ranges":[{"to":50},{"from":50,"to":100},{"from":100}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_sampler.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_sampler.go new file mode 100644 index 000000000..9a6df15ec --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_sampler.go @@ -0,0 +1,145 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SamplerAggregation is a filtering aggregation used to limit any +// sub aggregations' processing to a sample of the top-scoring documents. +// Optionally, diversity settings can be used to limit the number of matches +// that share a common value such as an "author". +// See: https://www.elastic.co/guide/en/elasticsearch/reference/2.x/search-aggregations-bucket-sampler-aggregation.html +type SamplerAggregation struct { + field string + script *Script + missing interface{} + subAggregations map[string]Aggregation + meta map[string]interface{} + + shardSize int + maxDocsPerValue int + executionHint string +} + +func NewSamplerAggregation() *SamplerAggregation { + return &SamplerAggregation{ + shardSize: -1, + maxDocsPerValue: -1, + subAggregations: make(map[string]Aggregation), + } +} + +func (a *SamplerAggregation) Field(field string) *SamplerAggregation { + a.field = field + return a +} + +func (a *SamplerAggregation) Script(script *Script) *SamplerAggregation { + a.script = script + return a +} + +// Missing configures the value to use when documents miss a value. +func (a *SamplerAggregation) Missing(missing interface{}) *SamplerAggregation { + a.missing = missing + return a +} + +func (a *SamplerAggregation) SubAggregation(name string, subAggregation Aggregation) *SamplerAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *SamplerAggregation) Meta(metaData map[string]interface{}) *SamplerAggregation { + a.meta = metaData + return a +} + +// ShardSize sets the maximum number of docs returned from each shard. +func (a *SamplerAggregation) ShardSize(shardSize int) *SamplerAggregation { + a.shardSize = shardSize + return a +} + +func (a *SamplerAggregation) MaxDocsPerValue(maxDocsPerValue int) *SamplerAggregation { + a.maxDocsPerValue = maxDocsPerValue + return a +} + +func (a *SamplerAggregation) ExecutionHint(hint string) *SamplerAggregation { + a.executionHint = hint + return a +} + +func (a *SamplerAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "sample" : { + // "sampler" : { + // "field" : "user.id", + // "shard_size" : 200 + // }, + // "aggs": { + // "keywords": { + // "significant_terms": { + // "field": "text" + // } + // } + // } + // } + // } + // } + // + // This method returns only the { "sampler" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["sampler"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.missing != nil { + opts["missing"] = a.missing + } + + if a.shardSize >= 0 { + opts["shard_size"] = a.shardSize + } + if a.maxDocsPerValue >= 0 { + opts["max_docs_per_value"] = a.maxDocsPerValue + } + if a.executionHint != "" { + opts["execution_hint"] = a.executionHint + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_sampler_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_sampler_test.go new file mode 100644 index 000000000..da4ca5534 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_sampler_test.go @@ -0,0 +1,52 @@ +// Copyright 2012-2016 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSamplerAggregation(t *testing.T) { + keywordsAgg := NewSignificantTermsAggregation().Field("text") + agg := NewSamplerAggregation(). + Field("user.id"). + ShardSize(200). + SubAggregation("keywords", keywordsAgg) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"keywords":{"significant_terms":{"field":"text"}}},"sampler":{"field":"user.id","shard_size":200}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSamplerAggregationWithMissing(t *testing.T) { + keywordsAgg := NewSignificantTermsAggregation().Field("text") + agg := NewSamplerAggregation(). + Field("user.id"). + Missing("n/a"). + SubAggregation("keywords", keywordsAgg) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"keywords":{"significant_terms":{"field":"text"}}},"sampler":{"field":"user.id","missing":"n/a"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_significant_terms.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_significant_terms.go new file mode 100644 index 000000000..1008887f0 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_significant_terms.go @@ -0,0 +1,141 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SignificantSignificantTermsAggregation is an aggregation that returns interesting +// or unusual occurrences of terms in a set. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html +type SignificantTermsAggregation struct { + field string + subAggregations map[string]Aggregation + meta map[string]interface{} + + minDocCount *int + shardMinDocCount *int + requiredSize *int + shardSize *int + filter Query + executionHint string +} + +func NewSignificantTermsAggregation() *SignificantTermsAggregation { + return &SignificantTermsAggregation{ + subAggregations: make(map[string]Aggregation, 0), + } +} + +func (a *SignificantTermsAggregation) Field(field string) *SignificantTermsAggregation { + a.field = field + return a +} + +func (a *SignificantTermsAggregation) SubAggregation(name string, subAggregation Aggregation) *SignificantTermsAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *SignificantTermsAggregation) Meta(metaData map[string]interface{}) *SignificantTermsAggregation { + a.meta = metaData + return a +} + +func (a *SignificantTermsAggregation) MinDocCount(minDocCount int) *SignificantTermsAggregation { + a.minDocCount = &minDocCount + return a +} + +func (a *SignificantTermsAggregation) ShardMinDocCount(shardMinDocCount int) *SignificantTermsAggregation { + a.shardMinDocCount = &shardMinDocCount + return a +} + +func (a *SignificantTermsAggregation) RequiredSize(requiredSize int) *SignificantTermsAggregation { + a.requiredSize = &requiredSize + return a +} + +func (a *SignificantTermsAggregation) ShardSize(shardSize int) *SignificantTermsAggregation { + a.shardSize = &shardSize + return a +} + +func (a *SignificantTermsAggregation) BackgroundFilter(filter Query) *SignificantTermsAggregation { + a.filter = filter + return a +} + +func (a *SignificantTermsAggregation) ExecutionHint(hint string) *SignificantTermsAggregation { + a.executionHint = hint + return a +} + +func (a *SignificantTermsAggregation) Source() (interface{}, error) { + // Example: + // { + // "query" : { + // "terms" : {"force" : [ "British Transport Police" ]} + // }, + // "aggregations" : { + // "significantCrimeTypes" : { + // "significant_terms" : { "field" : "crime_type" } + // } + // } + // } + // + // This method returns only the + // { "significant_terms" : { "field" : "crime_type" } + // part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["significant_terms"] = opts + + if a.field != "" { + opts["field"] = a.field + } + if a.requiredSize != nil { + opts["size"] = *a.requiredSize // not a typo! + } + if a.shardSize != nil { + opts["shard_size"] = *a.shardSize + } + if a.minDocCount != nil { + opts["min_doc_count"] = *a.minDocCount + } + if a.shardMinDocCount != nil { + opts["shard_min_doc_count"] = *a.shardMinDocCount + } + if a.filter != nil { + src, err := a.filter.Source() + if err != nil { + return nil, err + } + opts["background_filter"] = src + } + if a.executionHint != "" { + opts["execution_hint"] = a.executionHint + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_significant_terms_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_significant_terms_test.go new file mode 100644 index 000000000..d24f3c9d1 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_significant_terms_test.go @@ -0,0 +1,86 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSignificantTermsAggregation(t *testing.T) { + agg := NewSignificantTermsAggregation().Field("crime_type") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"significant_terms":{"field":"crime_type"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSignificantTermsAggregationWithArgs(t *testing.T) { + agg := NewSignificantTermsAggregation(). + Field("crime_type"). + ExecutionHint("map"). + ShardSize(5). + MinDocCount(10). + BackgroundFilter(NewTermQuery("city", "London")) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"significant_terms":{"background_filter":{"term":{"city":"London"}},"execution_hint":"map","field":"crime_type","min_doc_count":10,"shard_size":5}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSignificantTermsAggregationSubAggregation(t *testing.T) { + crimeTypesAgg := NewSignificantTermsAggregation().Field("crime_type") + agg := NewTermsAggregation().Field("force") + agg = agg.SubAggregation("significantCrimeTypes", crimeTypesAgg) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"significantCrimeTypes":{"significant_terms":{"field":"crime_type"}}},"terms":{"field":"force"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSignificantTermsAggregationWithMetaData(t *testing.T) { + agg := NewSignificantTermsAggregation().Field("crime_type") + agg = agg.Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"significant_terms":{"field":"crime_type"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_terms.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_terms.go new file mode 100644 index 000000000..2d3c0d1ad --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_terms.go @@ -0,0 +1,341 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TermsAggregation is a multi-bucket value source based aggregation +// where buckets are dynamically built - one per unique value. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html +type TermsAggregation struct { + field string + script *Script + missing interface{} + subAggregations map[string]Aggregation + meta map[string]interface{} + + size *int + shardSize *int + requiredSize *int + minDocCount *int + shardMinDocCount *int + valueType string + order string + orderAsc bool + includePattern string + includeFlags *int + excludePattern string + excludeFlags *int + executionHint string + collectionMode string + showTermDocCountError *bool + includeTerms []string + excludeTerms []string +} + +func NewTermsAggregation() *TermsAggregation { + return &TermsAggregation{ + subAggregations: make(map[string]Aggregation, 0), + includeTerms: make([]string, 0), + excludeTerms: make([]string, 0), + } +} + +func (a *TermsAggregation) Field(field string) *TermsAggregation { + a.field = field + return a +} + +func (a *TermsAggregation) Script(script *Script) *TermsAggregation { + a.script = script + return a +} + +// Missing configures the value to use when documents miss a value. +func (a *TermsAggregation) Missing(missing interface{}) *TermsAggregation { + a.missing = missing + return a +} + +func (a *TermsAggregation) SubAggregation(name string, subAggregation Aggregation) *TermsAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *TermsAggregation) Meta(metaData map[string]interface{}) *TermsAggregation { + a.meta = metaData + return a +} + +func (a *TermsAggregation) Size(size int) *TermsAggregation { + a.size = &size + return a +} + +func (a *TermsAggregation) RequiredSize(requiredSize int) *TermsAggregation { + a.requiredSize = &requiredSize + return a +} + +func (a *TermsAggregation) ShardSize(shardSize int) *TermsAggregation { + a.shardSize = &shardSize + return a +} + +func (a *TermsAggregation) MinDocCount(minDocCount int) *TermsAggregation { + a.minDocCount = &minDocCount + return a +} + +func (a *TermsAggregation) ShardMinDocCount(shardMinDocCount int) *TermsAggregation { + a.shardMinDocCount = &shardMinDocCount + return a +} + +func (a *TermsAggregation) Include(regexp string) *TermsAggregation { + a.includePattern = regexp + return a +} + +func (a *TermsAggregation) IncludeWithFlags(regexp string, flags int) *TermsAggregation { + a.includePattern = regexp + a.includeFlags = &flags + return a +} + +func (a *TermsAggregation) Exclude(regexp string) *TermsAggregation { + a.excludePattern = regexp + return a +} + +func (a *TermsAggregation) ExcludeWithFlags(regexp string, flags int) *TermsAggregation { + a.excludePattern = regexp + a.excludeFlags = &flags + return a +} + +// ValueType can be string, long, or double. +func (a *TermsAggregation) ValueType(valueType string) *TermsAggregation { + a.valueType = valueType + return a +} + +func (a *TermsAggregation) Order(order string, asc bool) *TermsAggregation { + a.order = order + a.orderAsc = asc + return a +} + +func (a *TermsAggregation) OrderByCount(asc bool) *TermsAggregation { + // "order" : { "_count" : "asc" } + a.order = "_count" + a.orderAsc = asc + return a +} + +func (a *TermsAggregation) OrderByCountAsc() *TermsAggregation { + return a.OrderByCount(true) +} + +func (a *TermsAggregation) OrderByCountDesc() *TermsAggregation { + return a.OrderByCount(false) +} + +func (a *TermsAggregation) OrderByTerm(asc bool) *TermsAggregation { + // "order" : { "_term" : "asc" } + a.order = "_term" + a.orderAsc = asc + return a +} + +func (a *TermsAggregation) OrderByTermAsc() *TermsAggregation { + return a.OrderByTerm(true) +} + +func (a *TermsAggregation) OrderByTermDesc() *TermsAggregation { + return a.OrderByTerm(false) +} + +// OrderByAggregation creates a bucket ordering strategy which sorts buckets +// based on a single-valued calc get. +func (a *TermsAggregation) OrderByAggregation(aggName string, asc bool) *TermsAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "avg_height" : "desc" } + // }, + // "aggs" : { + // "avg_height" : { "avg" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + a.orderAsc = asc + return a +} + +// OrderByAggregationAndMetric creates a bucket ordering strategy which +// sorts buckets based on a multi-valued calc get. +func (a *TermsAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) *TermsAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "height_stats.avg" : "desc" } + // }, + // "aggs" : { + // "height_stats" : { "stats" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + "." + metric + a.orderAsc = asc + return a +} + +func (a *TermsAggregation) ExecutionHint(hint string) *TermsAggregation { + a.executionHint = hint + return a +} + +// Collection mode can be depth_first or breadth_first as of 1.4.0. +func (a *TermsAggregation) CollectionMode(collectionMode string) *TermsAggregation { + a.collectionMode = collectionMode + return a +} + +func (a *TermsAggregation) ShowTermDocCountError(showTermDocCountError bool) *TermsAggregation { + a.showTermDocCountError = &showTermDocCountError + return a +} + +func (a *TermsAggregation) IncludeTerms(terms ...string) *TermsAggregation { + a.includeTerms = append(a.includeTerms, terms...) + return a +} + +func (a *TermsAggregation) ExcludeTerms(terms ...string) *TermsAggregation { + a.excludeTerms = append(a.excludeTerms, terms...) + return a +} + +func (a *TermsAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "genders" : { + // "terms" : { "field" : "gender" } + // } + // } + // } + // This method returns only the { "terms" : { "field" : "gender" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["terms"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.missing != nil { + opts["missing"] = a.missing + } + + // TermsBuilder + if a.size != nil && *a.size >= 0 { + opts["size"] = *a.size + } + if a.shardSize != nil && *a.shardSize >= 0 { + opts["shard_size"] = *a.shardSize + } + if a.requiredSize != nil && *a.requiredSize >= 0 { + opts["required_size"] = *a.requiredSize + } + if a.minDocCount != nil && *a.minDocCount >= 0 { + opts["min_doc_count"] = *a.minDocCount + } + if a.shardMinDocCount != nil && *a.shardMinDocCount >= 0 { + opts["shard_min_doc_count"] = *a.shardMinDocCount + } + if a.showTermDocCountError != nil { + opts["show_term_doc_count_error"] = *a.showTermDocCountError + } + if a.collectionMode != "" { + opts["collect_mode"] = a.collectionMode + } + if a.valueType != "" { + opts["value_type"] = a.valueType + } + if a.order != "" { + o := make(map[string]interface{}) + if a.orderAsc { + o[a.order] = "asc" + } else { + o[a.order] = "desc" + } + opts["order"] = o + } + if len(a.includeTerms) > 0 { + opts["include"] = a.includeTerms + } + if a.includePattern != "" { + if a.includeFlags == nil || *a.includeFlags == 0 { + opts["include"] = a.includePattern + } else { + p := make(map[string]interface{}) + p["pattern"] = a.includePattern + p["flags"] = *a.includeFlags + opts["include"] = p + } + } + if len(a.excludeTerms) > 0 { + opts["exclude"] = a.excludeTerms + } + if a.excludePattern != "" { + if a.excludeFlags == nil || *a.excludeFlags == 0 { + opts["exclude"] = a.excludePattern + } else { + p := make(map[string]interface{}) + p["pattern"] = a.excludePattern + p["flags"] = *a.excludeFlags + opts["exclude"] = p + } + } + if a.executionHint != "" { + opts["execution_hint"] = a.executionHint + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_terms_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_terms_test.go new file mode 100644 index 000000000..e5f979333 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_bucket_terms_test.go @@ -0,0 +1,104 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestTermsAggregation(t *testing.T) { + agg := NewTermsAggregation().Field("gender").Size(10).OrderByTermDesc() + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"terms":{"field":"gender","order":{"_term":"desc"},"size":10}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestTermsAggregationWithSubAggregation(t *testing.T) { + subAgg := NewAvgAggregation().Field("height") + agg := NewTermsAggregation().Field("gender").Size(10). + OrderByAggregation("avg_height", false) + agg = agg.SubAggregation("avg_height", subAgg) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"avg_height":{"avg":{"field":"height"}}},"terms":{"field":"gender","order":{"avg_height":"desc"},"size":10}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestTermsAggregationWithMultipleSubAggregation(t *testing.T) { + subAgg1 := NewAvgAggregation().Field("height") + subAgg2 := NewAvgAggregation().Field("width") + agg := NewTermsAggregation().Field("gender").Size(10). + OrderByAggregation("avg_height", false) + agg = agg.SubAggregation("avg_height", subAgg1) + agg = agg.SubAggregation("avg_width", subAgg2) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"avg_height":{"avg":{"field":"height"}},"avg_width":{"avg":{"field":"width"}}},"terms":{"field":"gender","order":{"avg_height":"desc"},"size":10}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestTermsAggregationWithMetaData(t *testing.T) { + agg := NewTermsAggregation().Field("gender").Size(10).OrderByTermDesc() + agg = agg.Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"terms":{"field":"gender","order":{"_term":"desc"},"size":10}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestTermsAggregationWithMissing(t *testing.T) { + agg := NewTermsAggregation().Field("gender").Size(10).Missing("n/a") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"terms":{"field":"gender","missing":"n/a","size":10}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_avg.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_avg.go new file mode 100644 index 000000000..37ec2b7ad --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_avg.go @@ -0,0 +1,101 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// AvgAggregation is a single-value metrics aggregation that computes +// the average of numeric values that are extracted from the +// aggregated documents. These values can be extracted either from +// specific numeric fields in the documents, or be generated by +// a provided script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-avg-aggregation.html +type AvgAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewAvgAggregation() *AvgAggregation { + return &AvgAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *AvgAggregation) Field(field string) *AvgAggregation { + a.field = field + return a +} + +func (a *AvgAggregation) Script(script *Script) *AvgAggregation { + a.script = script + return a +} + +func (a *AvgAggregation) Format(format string) *AvgAggregation { + a.format = format + return a +} + +func (a *AvgAggregation) SubAggregation(name string, subAggregation Aggregation) *AvgAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *AvgAggregation) Meta(metaData map[string]interface{}) *AvgAggregation { + a.meta = metaData + return a +} + +func (a *AvgAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "avg_grade" : { "avg" : { "field" : "grade" } } + // } + // } + // This method returns only the { "avg" : { "field" : "grade" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["avg"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_avg_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_avg_test.go new file mode 100644 index 000000000..c8539d12d --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_avg_test.go @@ -0,0 +1,61 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestAvgAggregation(t *testing.T) { + agg := NewAvgAggregation().Field("grade") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"avg":{"field":"grade"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestAvgAggregationWithFormat(t *testing.T) { + agg := NewAvgAggregation().Field("grade").Format("000.0") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"avg":{"field":"grade","format":"000.0"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestAvgAggregationWithMetaData(t *testing.T) { + agg := NewAvgAggregation().Field("grade").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"avg":{"field":"grade"},"meta":{"name":"Oliver"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_cardinality.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_cardinality.go new file mode 100644 index 000000000..ebf247c79 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_cardinality.go @@ -0,0 +1,120 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// CardinalityAggregation is a single-value metrics aggregation that +// calculates an approximate count of distinct values. +// Values can be extracted either from specific fields in the document +// or generated by a script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html +type CardinalityAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} + precisionThreshold *int64 + rehash *bool +} + +func NewCardinalityAggregation() *CardinalityAggregation { + return &CardinalityAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *CardinalityAggregation) Field(field string) *CardinalityAggregation { + a.field = field + return a +} + +func (a *CardinalityAggregation) Script(script *Script) *CardinalityAggregation { + a.script = script + return a +} + +func (a *CardinalityAggregation) Format(format string) *CardinalityAggregation { + a.format = format + return a +} + +func (a *CardinalityAggregation) SubAggregation(name string, subAggregation Aggregation) *CardinalityAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *CardinalityAggregation) Meta(metaData map[string]interface{}) *CardinalityAggregation { + a.meta = metaData + return a +} + +func (a *CardinalityAggregation) PrecisionThreshold(threshold int64) *CardinalityAggregation { + a.precisionThreshold = &threshold + return a +} + +func (a *CardinalityAggregation) Rehash(rehash bool) *CardinalityAggregation { + a.rehash = &rehash + return a +} + +func (a *CardinalityAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "author_count" : { + // "cardinality" : { "field" : "author" } + // } + // } + // } + // This method returns only the "cardinality" : { "field" : "author" } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["cardinality"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + + if a.format != "" { + opts["format"] = a.format + } + if a.precisionThreshold != nil { + opts["precision_threshold"] = *a.precisionThreshold + } + if a.rehash != nil { + opts["rehash"] = *a.rehash + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_cardinality_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_cardinality_test.go new file mode 100644 index 000000000..bccfa7aae --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_cardinality_test.go @@ -0,0 +1,78 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestCardinalityAggregation(t *testing.T) { + agg := NewCardinalityAggregation().Field("author.hash") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"cardinality":{"field":"author.hash"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestCardinalityAggregationWithOptions(t *testing.T) { + agg := NewCardinalityAggregation().Field("author.hash").PrecisionThreshold(100).Rehash(true) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"cardinality":{"field":"author.hash","precision_threshold":100,"rehash":true}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestCardinalityAggregationWithFormat(t *testing.T) { + agg := NewCardinalityAggregation().Field("author.hash").Format("00000") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"cardinality":{"field":"author.hash","format":"00000"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestCardinalityAggregationWithMetaData(t *testing.T) { + agg := NewCardinalityAggregation().Field("author.hash").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"cardinality":{"field":"author.hash"},"meta":{"name":"Oliver"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_extended_stats.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_extended_stats.go new file mode 100644 index 000000000..69447409c --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_extended_stats.go @@ -0,0 +1,99 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ExtendedExtendedStatsAggregation is a multi-value metrics aggregation that +// computes stats over numeric values extracted from the aggregated documents. +// These values can be extracted either from specific numeric fields +// in the documents, or be generated by a provided script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-extendedstats-aggregation.html +type ExtendedStatsAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewExtendedStatsAggregation() *ExtendedStatsAggregation { + return &ExtendedStatsAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *ExtendedStatsAggregation) Field(field string) *ExtendedStatsAggregation { + a.field = field + return a +} + +func (a *ExtendedStatsAggregation) Script(script *Script) *ExtendedStatsAggregation { + a.script = script + return a +} + +func (a *ExtendedStatsAggregation) Format(format string) *ExtendedStatsAggregation { + a.format = format + return a +} + +func (a *ExtendedStatsAggregation) SubAggregation(name string, subAggregation Aggregation) *ExtendedStatsAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *ExtendedStatsAggregation) Meta(metaData map[string]interface{}) *ExtendedStatsAggregation { + a.meta = metaData + return a +} + +func (a *ExtendedStatsAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "grades_stats" : { "extended_stats" : { "field" : "grade" } } + // } + // } + // This method returns only the { "extended_stats" : { "field" : "grade" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["extended_stats"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_extended_stats_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_extended_stats_test.go new file mode 100644 index 000000000..4a80693cf --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_extended_stats_test.go @@ -0,0 +1,44 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestExtendedStatsAggregation(t *testing.T) { + agg := NewExtendedStatsAggregation().Field("grade") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"extended_stats":{"field":"grade"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestExtendedStatsAggregationWithFormat(t *testing.T) { + agg := NewExtendedStatsAggregation().Field("grade").Format("000.0") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"extended_stats":{"field":"grade","format":"000.0"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_geo_bounds.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_geo_bounds.go new file mode 100644 index 000000000..647ba5139 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_geo_bounds.go @@ -0,0 +1,105 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// GeoBoundsAggregation is a metric aggregation that computes the +// bounding box containing all geo_point values for a field. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-geobounds-aggregation.html +type GeoBoundsAggregation struct { + field string + script *Script + wrapLongitude *bool + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewGeoBoundsAggregation() *GeoBoundsAggregation { + return &GeoBoundsAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *GeoBoundsAggregation) Field(field string) *GeoBoundsAggregation { + a.field = field + return a +} + +func (a *GeoBoundsAggregation) Script(script *Script) *GeoBoundsAggregation { + a.script = script + return a +} + +func (a *GeoBoundsAggregation) WrapLongitude(wrapLongitude bool) *GeoBoundsAggregation { + a.wrapLongitude = &wrapLongitude + return a +} + +func (a *GeoBoundsAggregation) SubAggregation(name string, subAggregation Aggregation) *GeoBoundsAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *GeoBoundsAggregation) Meta(metaData map[string]interface{}) *GeoBoundsAggregation { + a.meta = metaData + return a +} + +func (a *GeoBoundsAggregation) Source() (interface{}, error) { + // Example: + // { + // "query" : { + // "match" : { "business_type" : "shop" } + // }, + // "aggs" : { + // "viewport" : { + // "geo_bounds" : { + // "field" : "location" + // "wrap_longitude" : "true" + // } + // } + // } + // } + // + // This method returns only the { "geo_bounds" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["geo_bounds"] = opts + + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.wrapLongitude != nil { + opts["wrap_longitude"] = *a.wrapLongitude + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_geo_bounds_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_geo_bounds_test.go new file mode 100644 index 000000000..3096b8ee5 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_geo_bounds_test.go @@ -0,0 +1,61 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestGeoBoundsAggregation(t *testing.T) { + agg := NewGeoBoundsAggregation().Field("location") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_bounds":{"field":"location"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoBoundsAggregationWithWrapLongitude(t *testing.T) { + agg := NewGeoBoundsAggregation().Field("location").WrapLongitude(true) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_bounds":{"field":"location","wrap_longitude":true}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoBoundsAggregationWithMetaData(t *testing.T) { + agg := NewGeoBoundsAggregation().Field("location").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_bounds":{"field":"location"},"meta":{"name":"Oliver"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_max.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_max.go new file mode 100644 index 000000000..334cff020 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_max.go @@ -0,0 +1,99 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MaxAggregation is a single-value metrics aggregation that keeps track and +// returns the maximum value among the numeric values extracted from +// the aggregated documents. These values can be extracted either from +// specific numeric fields in the documents, or be generated by +// a provided script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-max-aggregation.html +type MaxAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewMaxAggregation() *MaxAggregation { + return &MaxAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *MaxAggregation) Field(field string) *MaxAggregation { + a.field = field + return a +} + +func (a *MaxAggregation) Script(script *Script) *MaxAggregation { + a.script = script + return a +} + +func (a *MaxAggregation) Format(format string) *MaxAggregation { + a.format = format + return a +} + +func (a *MaxAggregation) SubAggregation(name string, subAggregation Aggregation) *MaxAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MaxAggregation) Meta(metaData map[string]interface{}) *MaxAggregation { + a.meta = metaData + return a +} +func (a *MaxAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "max_price" : { "max" : { "field" : "price" } } + // } + // } + // This method returns only the { "max" : { "field" : "price" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["max"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_max_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_max_test.go new file mode 100644 index 000000000..b5da00c19 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_max_test.go @@ -0,0 +1,61 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMaxAggregation(t *testing.T) { + agg := NewMaxAggregation().Field("price") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"max":{"field":"price"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMaxAggregationWithFormat(t *testing.T) { + agg := NewMaxAggregation().Field("price").Format("00000.00") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"max":{"field":"price","format":"00000.00"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMaxAggregationWithMetaData(t *testing.T) { + agg := NewMaxAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"max":{"field":"price"},"meta":{"name":"Oliver"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_min.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_min.go new file mode 100644 index 000000000..f9e21f7a8 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_min.go @@ -0,0 +1,100 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MinAggregation is a single-value metrics aggregation that keeps track and +// returns the minimum value among numeric values extracted from the +// aggregated documents. These values can be extracted either from +// specific numeric fields in the documents, or be generated by a +// provided script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-min-aggregation.html +type MinAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewMinAggregation() *MinAggregation { + return &MinAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *MinAggregation) Field(field string) *MinAggregation { + a.field = field + return a +} + +func (a *MinAggregation) Script(script *Script) *MinAggregation { + a.script = script + return a +} + +func (a *MinAggregation) Format(format string) *MinAggregation { + a.format = format + return a +} + +func (a *MinAggregation) SubAggregation(name string, subAggregation Aggregation) *MinAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MinAggregation) Meta(metaData map[string]interface{}) *MinAggregation { + a.meta = metaData + return a +} + +func (a *MinAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "min_price" : { "min" : { "field" : "price" } } + // } + // } + // This method returns only the { "min" : { "field" : "price" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["min"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_min_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_min_test.go new file mode 100644 index 000000000..170650667 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_min_test.go @@ -0,0 +1,61 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMinAggregation(t *testing.T) { + agg := NewMinAggregation().Field("price") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"min":{"field":"price"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMinAggregationWithFormat(t *testing.T) { + agg := NewMinAggregation().Field("price").Format("00000.00") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"min":{"field":"price","format":"00000.00"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMinAggregationWithMetaData(t *testing.T) { + agg := NewMinAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"min":{"field":"price"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentile_ranks.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentile_ranks.go new file mode 100644 index 000000000..c0b3aa663 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentile_ranks.go @@ -0,0 +1,131 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// PercentileRanksAggregation +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-rank-aggregation.html +type PercentileRanksAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} + values []float64 + compression *float64 + estimator string +} + +func NewPercentileRanksAggregation() *PercentileRanksAggregation { + return &PercentileRanksAggregation{ + subAggregations: make(map[string]Aggregation), + values: make([]float64, 0), + } +} + +func (a *PercentileRanksAggregation) Field(field string) *PercentileRanksAggregation { + a.field = field + return a +} + +func (a *PercentileRanksAggregation) Script(script *Script) *PercentileRanksAggregation { + a.script = script + return a +} + +func (a *PercentileRanksAggregation) Format(format string) *PercentileRanksAggregation { + a.format = format + return a +} + +func (a *PercentileRanksAggregation) SubAggregation(name string, subAggregation Aggregation) *PercentileRanksAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *PercentileRanksAggregation) Meta(metaData map[string]interface{}) *PercentileRanksAggregation { + a.meta = metaData + return a +} + +func (a *PercentileRanksAggregation) Values(values ...float64) *PercentileRanksAggregation { + a.values = append(a.values, values...) + return a +} + +func (a *PercentileRanksAggregation) Compression(compression float64) *PercentileRanksAggregation { + a.compression = &compression + return a +} + +func (a *PercentileRanksAggregation) Estimator(estimator string) *PercentileRanksAggregation { + a.estimator = estimator + return a +} + +func (a *PercentileRanksAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "load_time_outlier" : { + // "percentile_ranks" : { + // "field" : "load_time" + // "values" : [15, 30] + // } + // } + // } + // } + // This method returns only the + // { "percentile_ranks" : { "field" : "load_time", "values" : [15, 30] } } + // part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["percentile_ranks"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + if len(a.values) > 0 { + opts["values"] = a.values + } + if a.compression != nil { + opts["compression"] = *a.compression + } + if a.estimator != "" { + opts["estimator"] = a.estimator + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentile_ranks_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentile_ranks_test.go new file mode 100644 index 000000000..df4b7c4a3 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentile_ranks_test.go @@ -0,0 +1,78 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestPercentileRanksAggregation(t *testing.T) { + agg := NewPercentileRanksAggregation().Field("load_time") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"percentile_ranks":{"field":"load_time"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPercentileRanksAggregationWithCustomValues(t *testing.T) { + agg := NewPercentileRanksAggregation().Field("load_time").Values(15, 30) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"percentile_ranks":{"field":"load_time","values":[15,30]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPercentileRanksAggregationWithFormat(t *testing.T) { + agg := NewPercentileRanksAggregation().Field("load_time").Format("000.0") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"percentile_ranks":{"field":"load_time","format":"000.0"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPercentileRanksAggregationWithMetaData(t *testing.T) { + agg := NewPercentileRanksAggregation().Field("load_time").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"percentile_ranks":{"field":"load_time"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentiles.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentiles.go new file mode 100644 index 000000000..b1695ebb3 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentiles.go @@ -0,0 +1,130 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// PercentilesAggregation +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-aggregation.html +type PercentilesAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} + percentiles []float64 + compression *float64 + estimator string +} + +func NewPercentilesAggregation() *PercentilesAggregation { + return &PercentilesAggregation{ + subAggregations: make(map[string]Aggregation), + percentiles: make([]float64, 0), + } +} + +func (a *PercentilesAggregation) Field(field string) *PercentilesAggregation { + a.field = field + return a +} + +func (a *PercentilesAggregation) Script(script *Script) *PercentilesAggregation { + a.script = script + return a +} + +func (a *PercentilesAggregation) Format(format string) *PercentilesAggregation { + a.format = format + return a +} + +func (a *PercentilesAggregation) SubAggregation(name string, subAggregation Aggregation) *PercentilesAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *PercentilesAggregation) Meta(metaData map[string]interface{}) *PercentilesAggregation { + a.meta = metaData + return a +} + +func (a *PercentilesAggregation) Percentiles(percentiles ...float64) *PercentilesAggregation { + a.percentiles = append(a.percentiles, percentiles...) + return a +} + +func (a *PercentilesAggregation) Compression(compression float64) *PercentilesAggregation { + a.compression = &compression + return a +} + +func (a *PercentilesAggregation) Estimator(estimator string) *PercentilesAggregation { + a.estimator = estimator + return a +} + +func (a *PercentilesAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "load_time_outlier" : { + // "percentiles" : { + // "field" : "load_time" + // } + // } + // } + // } + // This method returns only the + // { "percentiles" : { "field" : "load_time" } } + // part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["percentiles"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + if len(a.percentiles) > 0 { + opts["percents"] = a.percentiles + } + if a.compression != nil { + opts["compression"] = *a.compression + } + if a.estimator != "" { + opts["estimator"] = a.estimator + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentiles_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentiles_test.go new file mode 100644 index 000000000..da2d2055e --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_percentiles_test.go @@ -0,0 +1,78 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestPercentilesAggregation(t *testing.T) { + agg := NewPercentilesAggregation().Field("price") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"percentiles":{"field":"price"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPercentilesAggregationWithCustomPercents(t *testing.T) { + agg := NewPercentilesAggregation().Field("price").Percentiles(0.2, 0.5, 0.9) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"percentiles":{"field":"price","percents":[0.2,0.5,0.9]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPercentilesAggregationWithFormat(t *testing.T) { + agg := NewPercentilesAggregation().Field("price").Format("00000.00") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"percentiles":{"field":"price","format":"00000.00"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPercentilesAggregationWithMetaData(t *testing.T) { + agg := NewPercentilesAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"percentiles":{"field":"price"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_stats.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_stats.go new file mode 100644 index 000000000..42da9c854 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_stats.go @@ -0,0 +1,99 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// StatsAggregation is a multi-value metrics aggregation that computes stats +// over numeric values extracted from the aggregated documents. +// These values can be extracted either from specific numeric fields +// in the documents, or be generated by a provided script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-stats-aggregation.html +type StatsAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewStatsAggregation() *StatsAggregation { + return &StatsAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *StatsAggregation) Field(field string) *StatsAggregation { + a.field = field + return a +} + +func (a *StatsAggregation) Script(script *Script) *StatsAggregation { + a.script = script + return a +} + +func (a *StatsAggregation) Format(format string) *StatsAggregation { + a.format = format + return a +} + +func (a *StatsAggregation) SubAggregation(name string, subAggregation Aggregation) *StatsAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *StatsAggregation) Meta(metaData map[string]interface{}) *StatsAggregation { + a.meta = metaData + return a +} + +func (a *StatsAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "grades_stats" : { "stats" : { "field" : "grade" } } + // } + // } + // This method returns only the { "stats" : { "field" : "grade" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["stats"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_stats_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_stats_test.go new file mode 100644 index 000000000..0ea0b175d --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_stats_test.go @@ -0,0 +1,61 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestStatsAggregation(t *testing.T) { + agg := NewStatsAggregation().Field("grade") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"stats":{"field":"grade"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestStatsAggregationWithFormat(t *testing.T) { + agg := NewStatsAggregation().Field("grade").Format("0000.0") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"stats":{"field":"grade","format":"0000.0"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestStatsAggregationWithMetaData(t *testing.T) { + agg := NewStatsAggregation().Field("grade").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"stats":{"field":"grade"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_sum.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_sum.go new file mode 100644 index 000000000..6f783e7e1 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_sum.go @@ -0,0 +1,99 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SumAggregation is a single-value metrics aggregation that sums up +// numeric values that are extracted from the aggregated documents. +// These values can be extracted either from specific numeric fields +// in the documents, or be generated by a provided script. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-sum-aggregation.html +type SumAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewSumAggregation() *SumAggregation { + return &SumAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *SumAggregation) Field(field string) *SumAggregation { + a.field = field + return a +} + +func (a *SumAggregation) Script(script *Script) *SumAggregation { + a.script = script + return a +} + +func (a *SumAggregation) Format(format string) *SumAggregation { + a.format = format + return a +} + +func (a *SumAggregation) SubAggregation(name string, subAggregation Aggregation) *SumAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *SumAggregation) Meta(metaData map[string]interface{}) *SumAggregation { + a.meta = metaData + return a +} + +func (a *SumAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "intraday_return" : { "sum" : { "field" : "change" } } + // } + // } + // This method returns only the { "sum" : { "field" : "change" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["sum"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_sum_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_sum_test.go new file mode 100644 index 000000000..737808931 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_sum_test.go @@ -0,0 +1,61 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSumAggregation(t *testing.T) { + agg := NewSumAggregation().Field("price") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"sum":{"field":"price"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSumAggregationWithFormat(t *testing.T) { + agg := NewSumAggregation().Field("price").Format("00000.00") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"sum":{"field":"price","format":"00000.00"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSumAggregationWithMetaData(t *testing.T) { + agg := NewSumAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"sum":{"field":"price"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_top_hits.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_top_hits.go new file mode 100644 index 000000000..c017abb98 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_top_hits.go @@ -0,0 +1,143 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TopHitsAggregation keeps track of the most relevant document +// being aggregated. This aggregator is intended to be used as a +// sub aggregator, so that the top matching documents +// can be aggregated per bucket. +// +// It can effectively be used to group result sets by certain fields via +// a bucket aggregator. One or more bucket aggregators determines by +// which properties a result set get sliced into. +// +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-top-hits-aggregation.html +type TopHitsAggregation struct { + searchSource *SearchSource +} + +func NewTopHitsAggregation() *TopHitsAggregation { + return &TopHitsAggregation{ + searchSource: NewSearchSource(), + } +} + +func (a *TopHitsAggregation) From(from int) *TopHitsAggregation { + a.searchSource = a.searchSource.From(from) + return a +} + +func (a *TopHitsAggregation) Size(size int) *TopHitsAggregation { + a.searchSource = a.searchSource.Size(size) + return a +} + +func (a *TopHitsAggregation) TrackScores(trackScores bool) *TopHitsAggregation { + a.searchSource = a.searchSource.TrackScores(trackScores) + return a +} + +func (a *TopHitsAggregation) Explain(explain bool) *TopHitsAggregation { + a.searchSource = a.searchSource.Explain(explain) + return a +} + +func (a *TopHitsAggregation) Version(version bool) *TopHitsAggregation { + a.searchSource = a.searchSource.Version(version) + return a +} + +func (a *TopHitsAggregation) NoFields() *TopHitsAggregation { + a.searchSource = a.searchSource.NoFields() + return a +} + +func (a *TopHitsAggregation) FetchSource(fetchSource bool) *TopHitsAggregation { + a.searchSource = a.searchSource.FetchSource(fetchSource) + return a +} + +func (a *TopHitsAggregation) FetchSourceContext(fetchSourceContext *FetchSourceContext) *TopHitsAggregation { + a.searchSource = a.searchSource.FetchSourceContext(fetchSourceContext) + return a +} + +func (a *TopHitsAggregation) FieldDataFields(fieldDataFields ...string) *TopHitsAggregation { + a.searchSource = a.searchSource.FieldDataFields(fieldDataFields...) + return a +} + +func (a *TopHitsAggregation) FieldDataField(fieldDataField string) *TopHitsAggregation { + a.searchSource = a.searchSource.FieldDataField(fieldDataField) + return a +} + +func (a *TopHitsAggregation) ScriptFields(scriptFields ...*ScriptField) *TopHitsAggregation { + a.searchSource = a.searchSource.ScriptFields(scriptFields...) + return a +} + +func (a *TopHitsAggregation) ScriptField(scriptField *ScriptField) *TopHitsAggregation { + a.searchSource = a.searchSource.ScriptField(scriptField) + return a +} + +func (a *TopHitsAggregation) Sort(field string, ascending bool) *TopHitsAggregation { + a.searchSource = a.searchSource.Sort(field, ascending) + return a +} + +func (a *TopHitsAggregation) SortWithInfo(info SortInfo) *TopHitsAggregation { + a.searchSource = a.searchSource.SortWithInfo(info) + return a +} + +func (a *TopHitsAggregation) SortBy(sorter ...Sorter) *TopHitsAggregation { + a.searchSource = a.searchSource.SortBy(sorter...) + return a +} + +func (a *TopHitsAggregation) Highlight(highlight *Highlight) *TopHitsAggregation { + a.searchSource = a.searchSource.Highlight(highlight) + return a +} + +func (a *TopHitsAggregation) Highlighter() *Highlight { + return a.searchSource.Highlighter() +} + +func (a *TopHitsAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs": { + // "top_tag_hits": { + // "top_hits": { + // "sort": [ + // { + // "last_activity_date": { + // "order": "desc" + // } + // } + // ], + // "_source": { + // "include": [ + // "title" + // ] + // }, + // "size" : 1 + // } + // } + // } + // } + // This method returns only the { "top_hits" : { ... } } part. + + source := make(map[string]interface{}) + src, err := a.searchSource.Source() + if err != nil { + return nil, err + } + source["top_hits"] = src + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_top_hits_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_top_hits_test.go new file mode 100644 index 000000000..2634a22b6 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_top_hits_test.go @@ -0,0 +1,31 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestTopHitsAggregation(t *testing.T) { + fsc := NewFetchSourceContext(true).Include("title") + agg := NewTopHitsAggregation(). + Sort("last_activity_date", false). + FetchSourceContext(fsc). + Size(1) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"top_hits":{"_source":{"excludes":[],"includes":["title"]},"size":1,"sort":[{"last_activity_date":{"order":"desc"}}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_value_count.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_value_count.go new file mode 100644 index 000000000..b2e3e8241 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_value_count.go @@ -0,0 +1,102 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ValueCountAggregation is a single-value metrics aggregation that counts +// the number of values that are extracted from the aggregated documents. +// These values can be extracted either from specific fields in the documents, +// or be generated by a provided script. Typically, this aggregator will be +// used in conjunction with other single-value aggregations. +// For example, when computing the avg one might be interested in the +// number of values the average is computed over. +// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-valuecount-aggregation.html +type ValueCountAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewValueCountAggregation() *ValueCountAggregation { + return &ValueCountAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *ValueCountAggregation) Field(field string) *ValueCountAggregation { + a.field = field + return a +} + +func (a *ValueCountAggregation) Script(script *Script) *ValueCountAggregation { + a.script = script + return a +} + +func (a *ValueCountAggregation) Format(format string) *ValueCountAggregation { + a.format = format + return a +} + +func (a *ValueCountAggregation) SubAggregation(name string, subAggregation Aggregation) *ValueCountAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *ValueCountAggregation) Meta(metaData map[string]interface{}) *ValueCountAggregation { + a.meta = metaData + return a +} + +func (a *ValueCountAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "grades_count" : { "value_count" : { "field" : "grade" } } + // } + // } + // This method returns only the { "value_count" : { "field" : "grade" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["value_count"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_value_count_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_value_count_test.go new file mode 100644 index 000000000..eee189b51 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_metrics_value_count_test.go @@ -0,0 +1,63 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestValueCountAggregation(t *testing.T) { + agg := NewValueCountAggregation().Field("grade") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"value_count":{"field":"grade"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestValueCountAggregationWithFormat(t *testing.T) { + // Format comes with 1.5.0+ + agg := NewValueCountAggregation().Field("grade").Format("0000.0") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"value_count":{"field":"grade","format":"0000.0"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestValueCountAggregationWithMetaData(t *testing.T) { + agg := NewValueCountAggregation().Field("grade") + agg = agg.Meta(map[string]interface{}{"name": "Oliver"}) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"meta":{"name":"Oliver"},"value_count":{"field":"grade"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_avg_bucket.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_avg_bucket.go new file mode 100644 index 000000000..5cd93d5cc --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_avg_bucket.go @@ -0,0 +1,113 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// AvgBucketAggregation is a sibling pipeline aggregation which calculates +// the (mean) average value of a specified metric in a sibling aggregation. +// The specified metric must be numeric and the sibling aggregation must +// be a multi-bucket aggregation. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-avg-bucket-aggregation.html +type AvgBucketAggregation struct { + format string + gapPolicy string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewAvgBucketAggregation creates and initializes a new AvgBucketAggregation. +func NewAvgBucketAggregation() *AvgBucketAggregation { + return &AvgBucketAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *AvgBucketAggregation) Format(format string) *AvgBucketAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *AvgBucketAggregation) GapPolicy(gapPolicy string) *AvgBucketAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *AvgBucketAggregation) GapInsertZeros() *AvgBucketAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *AvgBucketAggregation) GapSkip() *AvgBucketAggregation { + a.gapPolicy = "skip" + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *AvgBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *AvgBucketAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *AvgBucketAggregation) Meta(metaData map[string]interface{}) *AvgBucketAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *AvgBucketAggregation) BucketsPath(bucketsPaths ...string) *AvgBucketAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *AvgBucketAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["avg_bucket"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_avg_bucket_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_avg_bucket_test.go new file mode 100644 index 000000000..0e6509ecb --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_avg_bucket_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestAvgBucketAggregation(t *testing.T) { + agg := NewAvgBucketAggregation().BucketsPath("the_sum").GapPolicy("skip") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"avg_bucket":{"buckets_path":"the_sum","gap_policy":"skip"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_script.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_script.go new file mode 100644 index 000000000..44d6bc624 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_script.go @@ -0,0 +1,132 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// BucketScriptAggregation is a parent pipeline aggregation which executes +// a script which can perform per bucket computations on specified metrics +// in the parent multi-bucket aggregation. The specified metric must be +// numeric and the script must return a numeric value. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-bucket-script-aggregation.html +type BucketScriptAggregation struct { + format string + gapPolicy string + script *Script + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPathsMap map[string]string +} + +// NewBucketScriptAggregation creates and initializes a new BucketScriptAggregation. +func NewBucketScriptAggregation() *BucketScriptAggregation { + return &BucketScriptAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPathsMap: make(map[string]string), + } +} + +func (a *BucketScriptAggregation) Format(format string) *BucketScriptAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *BucketScriptAggregation) GapPolicy(gapPolicy string) *BucketScriptAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *BucketScriptAggregation) GapInsertZeros() *BucketScriptAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *BucketScriptAggregation) GapSkip() *BucketScriptAggregation { + a.gapPolicy = "skip" + return a +} + +// Script is the script to run. +func (a *BucketScriptAggregation) Script(script *Script) *BucketScriptAggregation { + a.script = script + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *BucketScriptAggregation) SubAggregation(name string, subAggregation Aggregation) *BucketScriptAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *BucketScriptAggregation) Meta(metaData map[string]interface{}) *BucketScriptAggregation { + a.meta = metaData + return a +} + +// BucketsPathsMap sets the paths to the buckets to use for this pipeline aggregator. +func (a *BucketScriptAggregation) BucketsPathsMap(bucketsPathsMap map[string]string) *BucketScriptAggregation { + a.bucketsPathsMap = bucketsPathsMap + return a +} + +// AddBucketsPath adds a bucket path to use for this pipeline aggregator. +func (a *BucketScriptAggregation) AddBucketsPath(name, path string) *BucketScriptAggregation { + if a.bucketsPathsMap == nil { + a.bucketsPathsMap = make(map[string]string) + } + a.bucketsPathsMap[name] = path + return a +} + +func (a *BucketScriptAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["bucket_script"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + params["script"] = src + } + + // Add buckets paths + if len(a.bucketsPathsMap) > 0 { + params["buckets_path"] = a.bucketsPathsMap + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_script_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_script_test.go new file mode 100644 index 000000000..7f4d966d0 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_script_test.go @@ -0,0 +1,30 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestBucketScriptAggregation(t *testing.T) { + agg := NewBucketScriptAggregation(). + AddBucketsPath("tShirtSales", "t-shirts>sales"). + AddBucketsPath("totalSales", "total_sales"). + Script(NewScript("tShirtSales / totalSales * 100")) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"bucket_script":{"buckets_path":{"tShirtSales":"t-shirts\u003esales","totalSales":"total_sales"},"script":"tShirtSales / totalSales * 100"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_selector.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_selector.go new file mode 100644 index 000000000..ce17ec1f6 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_selector.go @@ -0,0 +1,134 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// BucketSelectorAggregation is a parent pipeline aggregation which +// determines whether the current bucket will be retained in the parent +// multi-bucket aggregation. The specific metric must be numeric and +// the script must return a boolean value. If the script language is +// expression then a numeric return value is permitted. In this case 0.0 +// will be evaluated as false and all other values will evaluate to true. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-bucket-selector-aggregation.html +type BucketSelectorAggregation struct { + format string + gapPolicy string + script *Script + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPathsMap map[string]string +} + +// NewBucketSelectorAggregation creates and initializes a new BucketSelectorAggregation. +func NewBucketSelectorAggregation() *BucketSelectorAggregation { + return &BucketSelectorAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPathsMap: make(map[string]string), + } +} + +func (a *BucketSelectorAggregation) Format(format string) *BucketSelectorAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *BucketSelectorAggregation) GapPolicy(gapPolicy string) *BucketSelectorAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *BucketSelectorAggregation) GapInsertZeros() *BucketSelectorAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *BucketSelectorAggregation) GapSkip() *BucketSelectorAggregation { + a.gapPolicy = "skip" + return a +} + +// Script is the script to run. +func (a *BucketSelectorAggregation) Script(script *Script) *BucketSelectorAggregation { + a.script = script + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *BucketSelectorAggregation) SubAggregation(name string, subAggregation Aggregation) *BucketSelectorAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *BucketSelectorAggregation) Meta(metaData map[string]interface{}) *BucketSelectorAggregation { + a.meta = metaData + return a +} + +// BucketsPathsMap sets the paths to the buckets to use for this pipeline aggregator. +func (a *BucketSelectorAggregation) BucketsPathsMap(bucketsPathsMap map[string]string) *BucketSelectorAggregation { + a.bucketsPathsMap = bucketsPathsMap + return a +} + +// AddBucketsPath adds a bucket path to use for this pipeline aggregator. +func (a *BucketSelectorAggregation) AddBucketsPath(name, path string) *BucketSelectorAggregation { + if a.bucketsPathsMap == nil { + a.bucketsPathsMap = make(map[string]string) + } + a.bucketsPathsMap[name] = path + return a +} + +func (a *BucketSelectorAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["bucket_selector"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + params["script"] = src + } + + // Add buckets paths + if len(a.bucketsPathsMap) > 0 { + params["buckets_path"] = a.bucketsPathsMap + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_selector_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_selector_test.go new file mode 100644 index 000000000..d4e0206de --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_bucket_selector_test.go @@ -0,0 +1,29 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestBucketSelectorAggregation(t *testing.T) { + agg := NewBucketSelectorAggregation(). + AddBucketsPath("totalSales", "total_sales"). + Script(NewScript("totalSales >= 1000")) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"bucket_selector":{"buckets_path":{"totalSales":"total_sales"},"script":"totalSales \u003e= 1000"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_cumulative_sum.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_cumulative_sum.go new file mode 100644 index 000000000..018eb918f --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_cumulative_sum.go @@ -0,0 +1,90 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// CumulativeSumAggregation is a parent pipeline aggregation which calculates +// the cumulative sum of a specified metric in a parent histogram (or date_histogram) +// aggregation. The specified metric must be numeric and the enclosing +// histogram must have min_doc_count set to 0 (default for histogram aggregations). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-cumulative-sum-aggregation.html +type CumulativeSumAggregation struct { + format string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewCumulativeSumAggregation creates and initializes a new CumulativeSumAggregation. +func NewCumulativeSumAggregation() *CumulativeSumAggregation { + return &CumulativeSumAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *CumulativeSumAggregation) Format(format string) *CumulativeSumAggregation { + a.format = format + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *CumulativeSumAggregation) SubAggregation(name string, subAggregation Aggregation) *CumulativeSumAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *CumulativeSumAggregation) Meta(metaData map[string]interface{}) *CumulativeSumAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *CumulativeSumAggregation) BucketsPath(bucketsPaths ...string) *CumulativeSumAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *CumulativeSumAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["cumulative_sum"] = params + + if a.format != "" { + params["format"] = a.format + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_cumulative_sum_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_cumulative_sum_test.go new file mode 100644 index 000000000..a4023d84e --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_cumulative_sum_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestCumulativeSumAggregation(t *testing.T) { + agg := NewCumulativeSumAggregation().BucketsPath("sales") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"cumulative_sum":{"buckets_path":"sales"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_derivative.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_derivative.go new file mode 100644 index 000000000..66611f46e --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_derivative.go @@ -0,0 +1,124 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// DerivativeAggregation is a parent pipeline aggregation which calculates +// the derivative of a specified metric in a parent histogram (or date_histogram) +// aggregation. The specified metric must be numeric and the enclosing +// histogram must have min_doc_count set to 0 (default for histogram aggregations). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-derivative-aggregation.html +type DerivativeAggregation struct { + format string + gapPolicy string + unit string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewDerivativeAggregation creates and initializes a new DerivativeAggregation. +func NewDerivativeAggregation() *DerivativeAggregation { + return &DerivativeAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *DerivativeAggregation) Format(format string) *DerivativeAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *DerivativeAggregation) GapPolicy(gapPolicy string) *DerivativeAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *DerivativeAggregation) GapInsertZeros() *DerivativeAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *DerivativeAggregation) GapSkip() *DerivativeAggregation { + a.gapPolicy = "skip" + return a +} + +// Unit sets the unit provided, e.g. "1d" or "1y". +// It is only useful when calculating the derivative using a date_histogram. +func (a *DerivativeAggregation) Unit(unit string) *DerivativeAggregation { + a.unit = unit + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *DerivativeAggregation) SubAggregation(name string, subAggregation Aggregation) *DerivativeAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *DerivativeAggregation) Meta(metaData map[string]interface{}) *DerivativeAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *DerivativeAggregation) BucketsPath(bucketsPaths ...string) *DerivativeAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *DerivativeAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["derivative"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + if a.unit != "" { + params["unit"] = a.unit + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_derivative_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_derivative_test.go new file mode 100644 index 000000000..1d2ec2d38 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_derivative_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestDerivativeAggregation(t *testing.T) { + agg := NewDerivativeAggregation().BucketsPath("sales") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"derivative":{"buckets_path":"sales"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_max_bucket.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_max_bucket.go new file mode 100644 index 000000000..da6f9ef36 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_max_bucket.go @@ -0,0 +1,114 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MaxBucketAggregation is a sibling pipeline aggregation which identifies +// the bucket(s) with the maximum value of a specified metric in a sibling +// aggregation and outputs both the value and the key(s) of the bucket(s). +// The specified metric must be numeric and the sibling aggregation must +// be a multi-bucket aggregation. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-max-bucket-aggregation.html +type MaxBucketAggregation struct { + format string + gapPolicy string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewMaxBucketAggregation creates and initializes a new MaxBucketAggregation. +func NewMaxBucketAggregation() *MaxBucketAggregation { + return &MaxBucketAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *MaxBucketAggregation) Format(format string) *MaxBucketAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *MaxBucketAggregation) GapPolicy(gapPolicy string) *MaxBucketAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *MaxBucketAggregation) GapInsertZeros() *MaxBucketAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *MaxBucketAggregation) GapSkip() *MaxBucketAggregation { + a.gapPolicy = "skip" + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *MaxBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *MaxBucketAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MaxBucketAggregation) Meta(metaData map[string]interface{}) *MaxBucketAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *MaxBucketAggregation) BucketsPath(bucketsPaths ...string) *MaxBucketAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *MaxBucketAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["max_bucket"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_max_bucket_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_max_bucket_test.go new file mode 100644 index 000000000..8bdde8fcd --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_max_bucket_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMaxBucketAggregation(t *testing.T) { + agg := NewMaxBucketAggregation().BucketsPath("the_sum").GapPolicy("skip") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"max_bucket":{"buckets_path":"the_sum","gap_policy":"skip"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_min_bucket.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_min_bucket.go new file mode 100644 index 000000000..325f00f03 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_min_bucket.go @@ -0,0 +1,114 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MinBucketAggregation is a sibling pipeline aggregation which identifies +// the bucket(s) with the maximum value of a specified metric in a sibling +// aggregation and outputs both the value and the key(s) of the bucket(s). +// The specified metric must be numeric and the sibling aggregation must +// be a multi-bucket aggregation. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-min-bucket-aggregation.html +type MinBucketAggregation struct { + format string + gapPolicy string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewMinBucketAggregation creates and initializes a new MinBucketAggregation. +func NewMinBucketAggregation() *MinBucketAggregation { + return &MinBucketAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *MinBucketAggregation) Format(format string) *MinBucketAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *MinBucketAggregation) GapPolicy(gapPolicy string) *MinBucketAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *MinBucketAggregation) GapInsertZeros() *MinBucketAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *MinBucketAggregation) GapSkip() *MinBucketAggregation { + a.gapPolicy = "skip" + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *MinBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *MinBucketAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MinBucketAggregation) Meta(metaData map[string]interface{}) *MinBucketAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *MinBucketAggregation) BucketsPath(bucketsPaths ...string) *MinBucketAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *MinBucketAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["min_bucket"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_min_bucket_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_min_bucket_test.go new file mode 100644 index 000000000..86fc9cd7f --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_min_bucket_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMinBucketAggregation(t *testing.T) { + agg := NewMinBucketAggregation().BucketsPath("sales_per_month>sales").GapPolicy("skip") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"min_bucket":{"buckets_path":"sales_per_month\u003esales","gap_policy":"skip"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_mov_avg.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_mov_avg.go new file mode 100644 index 000000000..021144ddc --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_mov_avg.go @@ -0,0 +1,393 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MovAvgAggregation operates on a series of data. It will slide a window +// across the data and emit the average value of that window. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html +type MovAvgAggregation struct { + format string + gapPolicy string + model MovAvgModel + window *int + predict *int + minimize *bool + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewMovAvgAggregation creates and initializes a new MovAvgAggregation. +func NewMovAvgAggregation() *MovAvgAggregation { + return &MovAvgAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *MovAvgAggregation) Format(format string) *MovAvgAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *MovAvgAggregation) GapPolicy(gapPolicy string) *MovAvgAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *MovAvgAggregation) GapInsertZeros() *MovAvgAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *MovAvgAggregation) GapSkip() *MovAvgAggregation { + a.gapPolicy = "skip" + return a +} + +// Model is used to define what type of moving average you want to use +// in the series. +func (a *MovAvgAggregation) Model(model MovAvgModel) *MovAvgAggregation { + a.model = model + return a +} + +// Window sets the window size for the moving average. This window will +// "slide" across the series, and the values inside that window will +// be used to calculate the moving avg value. +func (a *MovAvgAggregation) Window(window int) *MovAvgAggregation { + a.window = &window + return a +} + +// Predict sets the number of predictions that should be returned. +// Each prediction will be spaced at the intervals in the histogram. +// E.g. a predict of 2 will return two new buckets at the end of the +// histogram with the predicted values. +func (a *MovAvgAggregation) Predict(numPredictions int) *MovAvgAggregation { + a.predict = &numPredictions + return a +} + +// Minimize determines if the model should be fit to the data using a +// cost minimizing algorithm. +func (a *MovAvgAggregation) Minimize(minimize bool) *MovAvgAggregation { + a.minimize = &minimize + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *MovAvgAggregation) SubAggregation(name string, subAggregation Aggregation) *MovAvgAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MovAvgAggregation) Meta(metaData map[string]interface{}) *MovAvgAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *MovAvgAggregation) BucketsPath(bucketsPaths ...string) *MovAvgAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *MovAvgAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["moving_avg"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + if a.model != nil { + params["model"] = a.model.Name() + settings := a.model.Settings() + if len(settings) > 0 { + params["settings"] = settings + } + } + if a.window != nil { + params["window"] = *a.window + } + if a.predict != nil { + params["predict"] = *a.predict + } + if a.minimize != nil { + params["minimize"] = *a.minimize + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} + +// -- Models for moving averages -- +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_models + +// MovAvgModel specifies the model to use with the MovAvgAggregation. +type MovAvgModel interface { + Name() string + Settings() map[string]interface{} +} + +// -- EWMA -- + +// EWMAMovAvgModel calculates an exponentially weighted moving average. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_ewma_exponentially_weighted +type EWMAMovAvgModel struct { + alpha *float64 +} + +// NewEWMAMovAvgModel creates and initializes a new EWMAMovAvgModel. +func NewEWMAMovAvgModel() *EWMAMovAvgModel { + return &EWMAMovAvgModel{} +} + +// Alpha controls the smoothing of the data. Alpha = 1 retains no memory +// of past values (e.g. a random walk), while alpha = 0 retains infinite +// memory of past values (e.g. the series mean). Useful values are somewhere +// in between. Defaults to 0.5. +func (m *EWMAMovAvgModel) Alpha(alpha float64) *EWMAMovAvgModel { + m.alpha = &alpha + return m +} + +// Name of the model. +func (m *EWMAMovAvgModel) Name() string { + return "ewma" +} + +// Settings of the model. +func (m *EWMAMovAvgModel) Settings() map[string]interface{} { + settings := make(map[string]interface{}) + if m.alpha != nil { + settings["alpha"] = *m.alpha + } + return settings +} + +// -- Holt linear -- + +// HoltLinearMovAvgModel calculates a doubly exponential weighted moving average. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_holt_linear +type HoltLinearMovAvgModel struct { + alpha *float64 + beta *float64 +} + +// NewHoltLinearMovAvgModel creates and initializes a new HoltLinearMovAvgModel. +func NewHoltLinearMovAvgModel() *HoltLinearMovAvgModel { + return &HoltLinearMovAvgModel{} +} + +// Alpha controls the smoothing of the data. Alpha = 1 retains no memory +// of past values (e.g. a random walk), while alpha = 0 retains infinite +// memory of past values (e.g. the series mean). Useful values are somewhere +// in between. Defaults to 0.5. +func (m *HoltLinearMovAvgModel) Alpha(alpha float64) *HoltLinearMovAvgModel { + m.alpha = &alpha + return m +} + +// Beta is equivalent to Alpha but controls the smoothing of the trend +// instead of the data. +func (m *HoltLinearMovAvgModel) Beta(beta float64) *HoltLinearMovAvgModel { + m.beta = &beta + return m +} + +// Name of the model. +func (m *HoltLinearMovAvgModel) Name() string { + return "holt" +} + +// Settings of the model. +func (m *HoltLinearMovAvgModel) Settings() map[string]interface{} { + settings := make(map[string]interface{}) + if m.alpha != nil { + settings["alpha"] = *m.alpha + } + if m.beta != nil { + settings["beta"] = *m.beta + } + return settings +} + +// -- Holt Winters -- + +// HoltWintersMovAvgModel calculates a triple exponential weighted moving average. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_holt_winters +type HoltWintersMovAvgModel struct { + alpha *float64 + beta *float64 + gamma *float64 + period *int + seasonalityType string + pad *bool +} + +// NewHoltWintersMovAvgModel creates and initializes a new HoltWintersMovAvgModel. +func NewHoltWintersMovAvgModel() *HoltWintersMovAvgModel { + return &HoltWintersMovAvgModel{} +} + +// Alpha controls the smoothing of the data. Alpha = 1 retains no memory +// of past values (e.g. a random walk), while alpha = 0 retains infinite +// memory of past values (e.g. the series mean). Useful values are somewhere +// in between. Defaults to 0.5. +func (m *HoltWintersMovAvgModel) Alpha(alpha float64) *HoltWintersMovAvgModel { + m.alpha = &alpha + return m +} + +// Beta is equivalent to Alpha but controls the smoothing of the trend +// instead of the data. +func (m *HoltWintersMovAvgModel) Beta(beta float64) *HoltWintersMovAvgModel { + m.beta = &beta + return m +} + +func (m *HoltWintersMovAvgModel) Gamma(gamma float64) *HoltWintersMovAvgModel { + m.gamma = &gamma + return m +} + +func (m *HoltWintersMovAvgModel) Period(period int) *HoltWintersMovAvgModel { + m.period = &period + return m +} + +func (m *HoltWintersMovAvgModel) SeasonalityType(typ string) *HoltWintersMovAvgModel { + m.seasonalityType = typ + return m +} + +func (m *HoltWintersMovAvgModel) Pad(pad bool) *HoltWintersMovAvgModel { + m.pad = &pad + return m +} + +// Name of the model. +func (m *HoltWintersMovAvgModel) Name() string { + return "holt_winters" +} + +// Settings of the model. +func (m *HoltWintersMovAvgModel) Settings() map[string]interface{} { + settings := make(map[string]interface{}) + if m.alpha != nil { + settings["alpha"] = *m.alpha + } + if m.beta != nil { + settings["beta"] = *m.beta + } + if m.gamma != nil { + settings["gamma"] = *m.gamma + } + if m.period != nil { + settings["period"] = *m.period + } + if m.pad != nil { + settings["pad"] = *m.pad + } + if m.seasonalityType != "" { + settings["type"] = m.seasonalityType + } + return settings +} + +// -- Linear -- + +// LinearMovAvgModel calculates a linearly weighted moving average, such +// that older values are linearly less important. "Time" is determined +// by position in collection. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_linear +type LinearMovAvgModel struct { +} + +// NewLinearMovAvgModel creates and initializes a new LinearMovAvgModel. +func NewLinearMovAvgModel() *LinearMovAvgModel { + return &LinearMovAvgModel{} +} + +// Name of the model. +func (m *LinearMovAvgModel) Name() string { + return "linear" +} + +// Settings of the model. +func (m *LinearMovAvgModel) Settings() map[string]interface{} { + return nil +} + +// -- Simple -- + +// SimpleMovAvgModel calculates a simple unweighted (arithmetic) moving average. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_simple +type SimpleMovAvgModel struct { +} + +// NewSimpleMovAvgModel creates and initializes a new SimpleMovAvgModel. +func NewSimpleMovAvgModel() *SimpleMovAvgModel { + return &SimpleMovAvgModel{} +} + +// Name of the model. +func (m *SimpleMovAvgModel) Name() string { + return "simple" +} + +// Settings of the model. +func (m *SimpleMovAvgModel) Settings() map[string]interface{} { + return nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_mov_avg_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_mov_avg_test.go new file mode 100644 index 000000000..e17c1c0a0 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_mov_avg_test.go @@ -0,0 +1,132 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMovAvgAggregation(t *testing.T) { + agg := NewMovAvgAggregation().BucketsPath("the_sum") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"moving_avg":{"buckets_path":"the_sum"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMovAvgAggregationWithSimpleModel(t *testing.T) { + agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30).Model(NewSimpleMovAvgModel()) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"moving_avg":{"buckets_path":"the_sum","model":"simple","window":30}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMovAvgAggregationWithLinearModel(t *testing.T) { + agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30).Model(NewLinearMovAvgModel()) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"moving_avg":{"buckets_path":"the_sum","model":"linear","window":30}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMovAvgAggregationWithEWMAModel(t *testing.T) { + agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30).Model(NewEWMAMovAvgModel().Alpha(0.5)) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"moving_avg":{"buckets_path":"the_sum","model":"ewma","settings":{"alpha":0.5},"window":30}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMovAvgAggregationWithHoltLinearModel(t *testing.T) { + agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30). + Model(NewHoltLinearMovAvgModel().Alpha(0.5).Beta(0.4)) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"moving_avg":{"buckets_path":"the_sum","model":"holt","settings":{"alpha":0.5,"beta":0.4},"window":30}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMovAvgAggregationWithHoltWintersModel(t *testing.T) { + agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30).Predict(10).Minimize(true). + Model(NewHoltWintersMovAvgModel().Alpha(0.5).Beta(0.4).Gamma(0.3).Period(7).Pad(true)) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"moving_avg":{"buckets_path":"the_sum","minimize":true,"model":"holt_winters","predict":10,"settings":{"alpha":0.5,"beta":0.4,"gamma":0.3,"pad":true,"period":7},"window":30}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMovAvgAggregationWithSubAggs(t *testing.T) { + agg := NewMovAvgAggregation().BucketsPath("the_sum") + agg = agg.SubAggregation("avg_sum", NewAvgAggregation().Field("height")) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"aggregations":{"avg_sum":{"avg":{"field":"height"}}},"moving_avg":{"buckets_path":"the_sum"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_serial_diff.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_serial_diff.go new file mode 100644 index 000000000..db81d3cf4 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_serial_diff.go @@ -0,0 +1,124 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SerialDiffAggregation implements serial differencing. +// Serial differencing is a technique where values in a time series are +// subtracted from itself at different time lags or periods. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-serialdiff-aggregation.html +type SerialDiffAggregation struct { + format string + gapPolicy string + lag *int + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewSerialDiffAggregation creates and initializes a new SerialDiffAggregation. +func NewSerialDiffAggregation() *SerialDiffAggregation { + return &SerialDiffAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *SerialDiffAggregation) Format(format string) *SerialDiffAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *SerialDiffAggregation) GapPolicy(gapPolicy string) *SerialDiffAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *SerialDiffAggregation) GapInsertZeros() *SerialDiffAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *SerialDiffAggregation) GapSkip() *SerialDiffAggregation { + a.gapPolicy = "skip" + return a +} + +// Lag specifies the historical bucket to subtract from the current value. +// E.g. a lag of 7 will subtract the current value from the value 7 buckets +// ago. Lag must be a positive, non-zero integer. +func (a *SerialDiffAggregation) Lag(lag int) *SerialDiffAggregation { + a.lag = &lag + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *SerialDiffAggregation) SubAggregation(name string, subAggregation Aggregation) *SerialDiffAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *SerialDiffAggregation) Meta(metaData map[string]interface{}) *SerialDiffAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *SerialDiffAggregation) BucketsPath(bucketsPaths ...string) *SerialDiffAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *SerialDiffAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["serial_diff"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + if a.lag != nil { + params["lag"] = *a.lag + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_serial_diff_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_serial_diff_test.go new file mode 100644 index 000000000..17e512c5d --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_serial_diff_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSerialDiffAggregation(t *testing.T) { + agg := NewSerialDiffAggregation().BucketsPath("the_sum").Lag(7) + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"serial_diff":{"buckets_path":"the_sum","lag":7}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_sum_bucket.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_sum_bucket.go new file mode 100644 index 000000000..16ef64986 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_sum_bucket.go @@ -0,0 +1,113 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SumBucketAggregation is a sibling pipeline aggregation which calculates +// the sum across all buckets of a specified metric in a sibling aggregation. +// The specified metric must be numeric and the sibling aggregation must +// be a multi-bucket aggregation. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-sum-bucket-aggregation.html +type SumBucketAggregation struct { + format string + gapPolicy string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewSumBucketAggregation creates and initializes a new SumBucketAggregation. +func NewSumBucketAggregation() *SumBucketAggregation { + return &SumBucketAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *SumBucketAggregation) Format(format string) *SumBucketAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *SumBucketAggregation) GapPolicy(gapPolicy string) *SumBucketAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *SumBucketAggregation) GapInsertZeros() *SumBucketAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *SumBucketAggregation) GapSkip() *SumBucketAggregation { + a.gapPolicy = "skip" + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *SumBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *SumBucketAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *SumBucketAggregation) Meta(metaData map[string]interface{}) *SumBucketAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *SumBucketAggregation) BucketsPath(bucketsPaths ...string) *SumBucketAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *SumBucketAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["sum_bucket"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_sum_bucket_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_sum_bucket_test.go new file mode 100644 index 000000000..a1c84026d --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_sum_bucket_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSumBucketAggregation(t *testing.T) { + agg := NewSumBucketAggregation().BucketsPath("the_sum") + src, err := agg.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"sum_bucket":{"buckets_path":"the_sum"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_test.go new file mode 100644 index 000000000..be6bbfc87 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_pipeline_test.go @@ -0,0 +1,1000 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "testing" + +func TestAggsIntegrationAvgBucket(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("sales", NewSumAggregation().Field("price")) + builder = builder.Aggregation("sales_per_month", h) + builder = builder.Aggregation("avg_monthly_sales", NewAvgBucketAggregation().BucketsPath("sales_per_month>sales")) + + res, err := builder.Do() + if err != nil { + t.Fatal(err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.AvgBucket("avg_monthly_sales") + if !found { + t.Fatal("expected avg_monthly_sales aggregation") + } + if agg == nil { + t.Fatal("expected avg_monthly_sales aggregation") + } + if agg.Value == nil { + t.Fatal("expected avg_monthly_sales.value != nil") + } + if got, want := *agg.Value, float64(939.2); got != want { + t.Fatalf("expected avg_monthly_sales.value=%v; got: %v", want, got) + } +} + +func TestAggsIntegrationDerivative(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("sales", NewSumAggregation().Field("price")) + h = h.SubAggregation("sales_deriv", NewDerivativeAggregation().BucketsPath("sales")) + builder = builder.Aggregation("sales_per_month", h) + + res, err := builder.Do() + if err != nil { + t.Fatal(err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.DateHistogram("sales_per_month") + if !found { + t.Fatal("expected sales_per_month aggregation") + } + if agg == nil { + t.Fatal("expected sales_per_month aggregation") + } + if got, want := len(agg.Buckets), 6; got != want { + t.Fatalf("expected %d buckets; got: %d", want, got) + } + + if got, want := agg.Buckets[0].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[1].DocCount, int64(0); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[2].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[3].DocCount, int64(3); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[4].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[5].DocCount, int64(2); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + + d, found := agg.Buckets[0].Derivative("sales_deriv") + if found { + t.Fatal("expected no sales_deriv aggregation") + } + if d != nil { + t.Fatal("expected no sales_deriv aggregation") + } + + d, found = agg.Buckets[1].Derivative("sales_deriv") + if !found { + t.Fatal("expected sales_deriv aggregation") + } + if d == nil { + t.Fatal("expected sales_deriv aggregation") + } + if d.Value != nil { + t.Fatal("expected sales_deriv value == nil") + } + + d, found = agg.Buckets[2].Derivative("sales_deriv") + if !found { + t.Fatal("expected sales_deriv aggregation") + } + if d == nil { + t.Fatal("expected sales_deriv aggregation") + } + if d.Value != nil { + t.Fatal("expected sales_deriv value == nil") + } + + d, found = agg.Buckets[3].Derivative("sales_deriv") + if !found { + t.Fatal("expected sales_deriv aggregation") + } + if d == nil { + t.Fatal("expected sales_deriv aggregation") + } + if d.Value == nil { + t.Fatal("expected sales_deriv value != nil") + } + if got, want := *d.Value, float64(2348.0); got != want { + t.Fatalf("expected sales_deriv.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[4].Derivative("sales_deriv") + if !found { + t.Fatal("expected sales_deriv aggregation") + } + if d == nil { + t.Fatal("expected sales_deriv aggregation") + } + if d.Value == nil { + t.Fatal("expected sales_deriv value != nil") + } + if got, want := *d.Value, float64(-1658.0); got != want { + t.Fatalf("expected sales_deriv.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[5].Derivative("sales_deriv") + if !found { + t.Fatal("expected sales_deriv aggregation") + } + if d == nil { + t.Fatal("expected sales_deriv aggregation") + } + if d.Value == nil { + t.Fatal("expected sales_deriv value != nil") + } + if got, want := *d.Value, float64(-722.0); got != want { + t.Fatalf("expected sales_deriv.value=%v; got: %v", want, got) + } +} + +func TestAggsIntegrationMaxBucket(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("sales", NewSumAggregation().Field("price")) + builder = builder.Aggregation("sales_per_month", h) + builder = builder.Aggregation("max_monthly_sales", NewMaxBucketAggregation().BucketsPath("sales_per_month>sales")) + + res, err := builder.Do() + if err != nil { + t.Fatal(err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.MaxBucket("max_monthly_sales") + if !found { + t.Fatal("expected max_monthly_sales aggregation") + } + if agg == nil { + t.Fatal("expected max_monthly_sales aggregation") + } + if got, want := len(agg.Keys), 1; got != want { + t.Fatalf("expected len(max_monthly_sales.keys)=%d; got: %d", want, got) + } + if got, want := agg.Keys[0], "2015-04-01"; got != want { + t.Fatalf("expected max_monthly_sales.keys[0]=%v; got: %v", want, got) + } + if agg.Value == nil { + t.Fatal("expected max_monthly_sales.value != nil") + } + if got, want := *agg.Value, float64(2448); got != want { + t.Fatalf("expected max_monthly_sales.value=%v; got: %v", want, got) + } +} + +func TestAggsIntegrationMinBucket(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("sales", NewSumAggregation().Field("price")) + builder = builder.Aggregation("sales_per_month", h) + builder = builder.Aggregation("min_monthly_sales", NewMinBucketAggregation().BucketsPath("sales_per_month>sales")) + + res, err := builder.Do() + if err != nil { + t.Fatal(err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.MinBucket("min_monthly_sales") + if !found { + t.Fatal("expected min_monthly_sales aggregation") + } + if agg == nil { + t.Fatal("expected min_monthly_sales aggregation") + } + if got, want := len(agg.Keys), 1; got != want { + t.Fatalf("expected len(min_monthly_sales.keys)=%d; got: %d", want, got) + } + if got, want := agg.Keys[0], "2015-06-01"; got != want { + t.Fatalf("expected min_monthly_sales.keys[0]=%v; got: %v", want, got) + } + if agg.Value == nil { + t.Fatal("expected min_monthly_sales.value != nil") + } + if got, want := *agg.Value, float64(68); got != want { + t.Fatalf("expected min_monthly_sales.value=%v; got: %v", want, got) + } +} + +func TestAggsIntegrationSumBucket(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("sales", NewSumAggregation().Field("price")) + builder = builder.Aggregation("sales_per_month", h) + builder = builder.Aggregation("sum_monthly_sales", NewSumBucketAggregation().BucketsPath("sales_per_month>sales")) + + res, err := builder.Do() + if err != nil { + t.Fatal(err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.SumBucket("sum_monthly_sales") + if !found { + t.Fatal("expected sum_monthly_sales aggregation") + } + if agg == nil { + t.Fatal("expected sum_monthly_sales aggregation") + } + if agg.Value == nil { + t.Fatal("expected sum_monthly_sales.value != nil") + } + if got, want := *agg.Value, float64(4696.0); got != want { + t.Fatalf("expected sum_monthly_sales.value=%v; got: %v", want, got) + } +} + +func TestAggsIntegrationMovAvg(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("the_sum", NewSumAggregation().Field("price")) + h = h.SubAggregation("the_movavg", NewMovAvgAggregation().BucketsPath("the_sum")) + builder = builder.Aggregation("my_date_histo", h) + + res, err := builder.Do() + if err != nil { + t.Fatal(err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.DateHistogram("my_date_histo") + if !found { + t.Fatal("expected sum_monthly_sales aggregation") + } + if agg == nil { + t.Fatal("expected sum_monthly_sales aggregation") + } + if got, want := len(agg.Buckets), 6; got != want { + t.Fatalf("expected %d buckets; got: %d", want, got) + } + + d, found := agg.Buckets[0].MovAvg("the_movavg") + if found { + t.Fatal("expected no the_movavg aggregation") + } + if d != nil { + t.Fatal("expected no the_movavg aggregation") + } + + d, found = agg.Buckets[1].MovAvg("the_movavg") + if found { + t.Fatal("expected no the_movavg aggregation") + } + if d != nil { + t.Fatal("expected no the_movavg aggregation") + } + + d, found = agg.Buckets[2].MovAvg("the_movavg") + if !found { + t.Fatal("expected the_movavg aggregation") + } + if d == nil { + t.Fatal("expected the_movavg aggregation") + } + if d.Value == nil { + t.Fatal("expected the_movavg value") + } + if got, want := *d.Value, float64(1290.0); got != want { + t.Fatalf("expected %v buckets; got: %v", want, got) + } + + d, found = agg.Buckets[3].MovAvg("the_movavg") + if !found { + t.Fatal("expected the_movavg aggregation") + } + if d == nil { + t.Fatal("expected the_movavg aggregation") + } + if d.Value == nil { + t.Fatal("expected the_movavg value") + } + if got, want := *d.Value, float64(695.0); got != want { + t.Fatalf("expected %v buckets; got: %v", want, got) + } + + d, found = agg.Buckets[4].MovAvg("the_movavg") + if !found { + t.Fatal("expected the_movavg aggregation") + } + if d == nil { + t.Fatal("expected the_movavg aggregation") + } + if d.Value == nil { + t.Fatal("expected the_movavg value") + } + if got, want := *d.Value, float64(1279.3333333333333); got != want { + t.Fatalf("expected %v buckets; got: %v", want, got) + } + + d, found = agg.Buckets[5].MovAvg("the_movavg") + if !found { + t.Fatal("expected the_movavg aggregation") + } + if d == nil { + t.Fatal("expected the_movavg aggregation") + } + if d.Value == nil { + t.Fatal("expected the_movavg value") + } + if got, want := *d.Value, float64(1157.0); got != want { + t.Fatalf("expected %v buckets; got: %v", want, got) + } +} + +func TestAggsIntegrationCumulativeSum(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("sales", NewSumAggregation().Field("price")) + h = h.SubAggregation("cumulative_sales", NewCumulativeSumAggregation().BucketsPath("sales")) + builder = builder.Aggregation("sales_per_month", h) + + res, err := builder.Do() + if err != nil { + t.Fatal(err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.DateHistogram("sales_per_month") + if !found { + t.Fatal("expected sales_per_month aggregation") + } + if agg == nil { + t.Fatal("expected sales_per_month aggregation") + } + if got, want := len(agg.Buckets), 6; got != want { + t.Fatalf("expected %d buckets; got: %d", want, got) + } + + if got, want := agg.Buckets[0].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[1].DocCount, int64(0); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[2].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[3].DocCount, int64(3); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[4].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[5].DocCount, int64(2); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + + d, found := agg.Buckets[0].CumulativeSum("cumulative_sales") + if !found { + t.Fatal("expected cumulative_sales aggregation") + } + if d == nil { + t.Fatal("expected cumulative_sales aggregation") + } + if d.Value == nil { + t.Fatal("expected cumulative_sales value != nil") + } + if got, want := *d.Value, float64(1290.0); got != want { + t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[1].CumulativeSum("cumulative_sales") + if !found { + t.Fatal("expected cumulative_sales aggregation") + } + if d == nil { + t.Fatal("expected cumulative_sales aggregation") + } + if d.Value == nil { + t.Fatal("expected cumulative_sales value != nil") + } + if got, want := *d.Value, float64(1290.0); got != want { + t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[2].CumulativeSum("cumulative_sales") + if !found { + t.Fatal("expected cumulative_sales aggregation") + } + if d == nil { + t.Fatal("expected cumulative_sales aggregation") + } + if d.Value == nil { + t.Fatal("expected cumulative_sales value != nil") + } + if got, want := *d.Value, float64(1390.0); got != want { + t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[3].CumulativeSum("cumulative_sales") + if !found { + t.Fatal("expected cumulative_sales aggregation") + } + if d == nil { + t.Fatal("expected cumulative_sales aggregation") + } + if d.Value == nil { + t.Fatal("expected cumulative_sales value != nil") + } + if got, want := *d.Value, float64(3838.0); got != want { + t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[4].CumulativeSum("cumulative_sales") + if !found { + t.Fatal("expected cumulative_sales aggregation") + } + if d == nil { + t.Fatal("expected cumulative_sales aggregation") + } + if d.Value == nil { + t.Fatal("expected cumulative_sales value != nil") + } + if got, want := *d.Value, float64(4628.0); got != want { + t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[5].CumulativeSum("cumulative_sales") + if !found { + t.Fatal("expected cumulative_sales aggregation") + } + if d == nil { + t.Fatal("expected cumulative_sales aggregation") + } + if d.Value == nil { + t.Fatal("expected cumulative_sales value != nil") + } + if got, want := *d.Value, float64(4696.0); got != want { + t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got) + } +} + +func TestAggsIntegrationBucketScript(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("total_sales", NewSumAggregation().Field("price")) + appleFilter := NewFilterAggregation().Filter(NewTermQuery("manufacturer", "Apple")) + appleFilter = appleFilter.SubAggregation("sales", NewSumAggregation().Field("price")) + h = h.SubAggregation("apple_sales", appleFilter) + h = h.SubAggregation("apple_percentage", + NewBucketScriptAggregation(). + GapPolicy("insert_zeros"). + AddBucketsPath("appleSales", "apple_sales>sales"). + AddBucketsPath("totalSales", "total_sales"). + Script(NewScript("appleSales / totalSales * 100"))) + builder = builder.Aggregation("sales_per_month", h) + + res, err := builder.Do() + if err != nil { + t.Fatalf("%v (maybe scripting is disabled?)", err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.DateHistogram("sales_per_month") + if !found { + t.Fatal("expected sales_per_month aggregation") + } + if agg == nil { + t.Fatal("expected sales_per_month aggregation") + } + if got, want := len(agg.Buckets), 6; got != want { + t.Fatalf("expected %d buckets; got: %d", want, got) + } + + if got, want := agg.Buckets[0].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[1].DocCount, int64(0); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[2].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[3].DocCount, int64(3); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[4].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[5].DocCount, int64(2); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + + d, found := agg.Buckets[0].BucketScript("apple_percentage") + if !found { + t.Fatal("expected apple_percentage aggregation") + } + if d == nil { + t.Fatal("expected apple_percentage aggregation") + } + if d.Value == nil { + t.Fatal("expected apple_percentage value != nil") + } + if got, want := *d.Value, float64(100.0); got != want { + t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[1].BucketScript("apple_percentage") + if !found { + t.Fatal("expected apple_percentage aggregation") + } + if d == nil { + t.Fatal("expected apple_percentage aggregation") + } + if d.Value != nil { + t.Fatal("expected apple_percentage value == nil") + } + + d, found = agg.Buckets[2].BucketScript("apple_percentage") + if !found { + t.Fatal("expected apple_percentage aggregation") + } + if d == nil { + t.Fatal("expected apple_percentage aggregation") + } + if d.Value == nil { + t.Fatal("expected apple_percentage value != nil") + } + if got, want := *d.Value, float64(0.0); got != want { + t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[3].BucketScript("apple_percentage") + if !found { + t.Fatal("expected apple_percentage aggregation") + } + if d == nil { + t.Fatal("expected apple_percentage aggregation") + } + if d.Value == nil { + t.Fatal("expected apple_percentage value != nil") + } + if got, want := *d.Value, float64(34.64052287581699); got != want { + t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[4].BucketScript("apple_percentage") + if !found { + t.Fatal("expected apple_percentage aggregation") + } + if d == nil { + t.Fatal("expected apple_percentage aggregation") + } + if d.Value == nil { + t.Fatal("expected apple_percentage value != nil") + } + if got, want := *d.Value, float64(0.0); got != want { + t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[5].BucketScript("apple_percentage") + if !found { + t.Fatal("expected apple_percentage aggregation") + } + if d == nil { + t.Fatal("expected apple_percentage aggregation") + } + if d.Value == nil { + t.Fatal("expected apple_percentage value != nil") + } + if got, want := *d.Value, float64(0.0); got != want { + t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got) + } +} + +func TestAggsIntegrationBucketSelector(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("total_sales", NewSumAggregation().Field("price")) + h = h.SubAggregation("sales_bucket_filter", + NewBucketSelectorAggregation(). + AddBucketsPath("totalSales", "total_sales"). + Script(NewScript("totalSales <= 100"))) + builder = builder.Aggregation("sales_per_month", h) + + res, err := builder.Do() + if err != nil { + t.Fatalf("%v (maybe scripting is disabled?)", err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.DateHistogram("sales_per_month") + if !found { + t.Fatal("expected sales_per_month aggregation") + } + if agg == nil { + t.Fatal("expected sales_per_month aggregation") + } + if got, want := len(agg.Buckets), 2; got != want { + t.Fatalf("expected %d buckets; got: %d", want, got) + } + + if got, want := agg.Buckets[0].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[1].DocCount, int64(2); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } +} + +func TestAggsIntegrationSerialDiff(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + if esversion < "2.0" { + t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) + return + } + + // Match all should return all documents + builder := client.Search(). + Index(testIndexName). + Type("order"). + Query(NewMatchAllQuery()). + Pretty(true) + h := NewDateHistogramAggregation().Field("time").Interval("month") + h = h.SubAggregation("sales", NewSumAggregation().Field("price")) + h = h.SubAggregation("the_diff", NewSerialDiffAggregation().BucketsPath("sales").Lag(1)) + builder = builder.Aggregation("sales_per_month", h) + + res, err := builder.Do() + if err != nil { + t.Fatal(err) + } + if res.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + + aggs := res.Aggregations + if aggs == nil { + t.Fatal("expected aggregations != nil; got: nil") + } + + agg, found := aggs.DateHistogram("sales_per_month") + if !found { + t.Fatal("expected sales_per_month aggregation") + } + if agg == nil { + t.Fatal("expected sales_per_month aggregation") + } + if got, want := len(agg.Buckets), 6; got != want { + t.Fatalf("expected %d buckets; got: %d", want, got) + } + + if got, want := agg.Buckets[0].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[1].DocCount, int64(0); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[2].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[3].DocCount, int64(3); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[4].DocCount, int64(1); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + if got, want := agg.Buckets[5].DocCount, int64(2); got != want { + t.Fatalf("expected DocCount=%d; got: %d", want, got) + } + + d, found := agg.Buckets[0].SerialDiff("the_diff") + if found { + t.Fatal("expected no the_diff aggregation") + } + if d != nil { + t.Fatal("expected no the_diff aggregation") + } + + d, found = agg.Buckets[1].SerialDiff("the_diff") + if found { + t.Fatal("expected no the_diff aggregation") + } + if d != nil { + t.Fatal("expected no the_diff aggregation") + } + + d, found = agg.Buckets[2].SerialDiff("the_diff") + if found { + t.Fatal("expected no the_diff aggregation") + } + if d != nil { + t.Fatal("expected no the_diff aggregation") + } + + d, found = agg.Buckets[3].SerialDiff("the_diff") + if !found { + t.Fatal("expected the_diff aggregation") + } + if d == nil { + t.Fatal("expected the_diff aggregation") + } + if d.Value == nil { + t.Fatal("expected the_diff value != nil") + } + if got, want := *d.Value, float64(2348.0); got != want { + t.Fatalf("expected the_diff.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[4].SerialDiff("the_diff") + if !found { + t.Fatal("expected the_diff aggregation") + } + if d == nil { + t.Fatal("expected the_diff aggregation") + } + if d.Value == nil { + t.Fatal("expected the_diff value != nil") + } + if got, want := *d.Value, float64(-1658.0); got != want { + t.Fatalf("expected the_diff.value=%v; got: %v", want, got) + } + + d, found = agg.Buckets[5].SerialDiff("the_diff") + if !found { + t.Fatal("expected the_diff aggregation") + } + if d == nil { + t.Fatal("expected the_diff aggregation") + } + if d.Value == nil { + t.Fatal("expected the_diff value != nil") + } + if got, want := *d.Value, float64(-722.0); got != want { + t.Fatalf("expected the_diff.value=%v; got: %v", want, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_test.go new file mode 100644 index 000000000..ef6ec2112 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_aggs_test.go @@ -0,0 +1,2996 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "strings" + "testing" + "time" +) + +func TestAggs(t *testing.T) { + //client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndex(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + + tweet1 := tweet{ + User: "olivere", + Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Image: "http://golang.org/doc/gopher/gophercolor.png", + Tags: []string{"golang", "elasticsearch"}, + Location: "48.1333,11.5667", // lat,lon + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + tweet2 := tweet{ + User: "olivere", + Retweets: 0, + Message: "Another unrelated topic.", + Tags: []string{"golang"}, + Location: "48.1189,11.4289", // lat,lon + Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), + } + tweet3 := tweet{ + User: "sandrae", + Retweets: 12, + Message: "Cycling is fun.", + Tags: []string{"sports", "cycling"}, + Location: "47.7167,11.7167", // lat,lon + Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), + } + + // Add all documents + _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + all := NewMatchAllQuery() + + // Terms Aggregate by user name + globalAgg := NewGlobalAggregation() + usersAgg := NewTermsAggregation().Field("user").Size(10).OrderByCountDesc() + retweetsAgg := NewTermsAggregation().Field("retweets").Size(10) + avgRetweetsAgg := NewAvgAggregation().Field("retweets") + avgRetweetsWithMetaAgg := NewAvgAggregation().Field("retweetsMeta").Meta(map[string]interface{}{"meta": true}) + minRetweetsAgg := NewMinAggregation().Field("retweets") + maxRetweetsAgg := NewMaxAggregation().Field("retweets") + sumRetweetsAgg := NewSumAggregation().Field("retweets") + statsRetweetsAgg := NewStatsAggregation().Field("retweets") + extstatsRetweetsAgg := NewExtendedStatsAggregation().Field("retweets") + valueCountRetweetsAgg := NewValueCountAggregation().Field("retweets") + percentilesRetweetsAgg := NewPercentilesAggregation().Field("retweets") + percentileRanksRetweetsAgg := NewPercentileRanksAggregation().Field("retweets").Values(25, 50, 75) + cardinalityAgg := NewCardinalityAggregation().Field("user") + significantTermsAgg := NewSignificantTermsAggregation().Field("message") + samplerAgg := NewSamplerAggregation().Field("user").SubAggregation("tagged_with", NewTermsAggregation().Field("tags")) + retweetsRangeAgg := NewRangeAggregation().Field("retweets").Lt(10).Between(10, 100).Gt(100) + retweetsKeyedRangeAgg := NewRangeAggregation().Field("retweets").Keyed(true).Lt(10).Between(10, 100).Gt(100) + dateRangeAgg := NewDateRangeAggregation().Field("created").Lt("2012-01-01").Between("2012-01-01", "2013-01-01").Gt("2013-01-01") + missingTagsAgg := NewMissingAggregation().Field("tags") + retweetsHistoAgg := NewHistogramAggregation().Field("retweets").Interval(100) + dateHistoAgg := NewDateHistogramAggregation().Field("created").Interval("year") + retweetsFilterAgg := NewFilterAggregation().Filter( + NewRangeQuery("created").Gte("2012-01-01").Lte("2012-12-31")). + SubAggregation("avgRetweetsSub", NewAvgAggregation().Field("retweets")) + queryFilterAgg := NewFilterAggregation().Filter(NewTermQuery("tags", "golang")) + topTagsHitsAgg := NewTopHitsAggregation().Sort("created", false).Size(5).FetchSource(true) + topTagsAgg := NewTermsAggregation().Field("tags").Size(3).SubAggregation("top_tag_hits", topTagsHitsAgg) + geoBoundsAgg := NewGeoBoundsAggregation().Field("location") + + // Run query + builder := client.Search().Index(testIndexName).Query(all).Pretty(true) + builder = builder.Aggregation("global", globalAgg) + builder = builder.Aggregation("users", usersAgg) + builder = builder.Aggregation("retweets", retweetsAgg) + builder = builder.Aggregation("avgRetweets", avgRetweetsAgg) + if esversion >= "2.0" { + builder = builder.Aggregation("avgRetweetsWithMeta", avgRetweetsWithMetaAgg) + } + builder = builder.Aggregation("minRetweets", minRetweetsAgg) + builder = builder.Aggregation("maxRetweets", maxRetweetsAgg) + builder = builder.Aggregation("sumRetweets", sumRetweetsAgg) + builder = builder.Aggregation("statsRetweets", statsRetweetsAgg) + builder = builder.Aggregation("extstatsRetweets", extstatsRetweetsAgg) + builder = builder.Aggregation("valueCountRetweets", valueCountRetweetsAgg) + builder = builder.Aggregation("percentilesRetweets", percentilesRetweetsAgg) + builder = builder.Aggregation("percentileRanksRetweets", percentileRanksRetweetsAgg) + builder = builder.Aggregation("usersCardinality", cardinalityAgg) + builder = builder.Aggregation("significantTerms", significantTermsAgg) + builder = builder.Aggregation("sample", samplerAgg) + builder = builder.Aggregation("retweetsRange", retweetsRangeAgg) + builder = builder.Aggregation("retweetsKeyedRange", retweetsKeyedRangeAgg) + builder = builder.Aggregation("dateRange", dateRangeAgg) + builder = builder.Aggregation("missingTags", missingTagsAgg) + builder = builder.Aggregation("retweetsHisto", retweetsHistoAgg) + builder = builder.Aggregation("dateHisto", dateHistoAgg) + builder = builder.Aggregation("retweetsFilter", retweetsFilterAgg) + builder = builder.Aggregation("queryFilter", queryFilterAgg) + builder = builder.Aggregation("top-tags", topTagsAgg) + builder = builder.Aggregation("viewport", geoBoundsAgg) + if esversion >= "1.4" { + countByUserAgg := NewFiltersAggregation().Filters(NewTermQuery("user", "olivere"), NewTermQuery("user", "sandrae")) + builder = builder.Aggregation("countByUser", countByUserAgg) + } + if esversion >= "2.0" { + // AvgBucket + dateHisto := NewDateHistogramAggregation().Field("created").Interval("year") + dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) + builder = builder.Aggregation("avgBucketDateHisto", dateHisto) + builder = builder.Aggregation("avgSumOfRetweets", NewAvgBucketAggregation().BucketsPath("avgBucketDateHisto>sumOfRetweets")) + // MinBucket + dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") + dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) + builder = builder.Aggregation("minBucketDateHisto", dateHisto) + builder = builder.Aggregation("minBucketSumOfRetweets", NewMinBucketAggregation().BucketsPath("minBucketDateHisto>sumOfRetweets")) + // MaxBucket + dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") + dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) + builder = builder.Aggregation("maxBucketDateHisto", dateHisto) + builder = builder.Aggregation("maxBucketSumOfRetweets", NewMaxBucketAggregation().BucketsPath("maxBucketDateHisto>sumOfRetweets")) + // SumBucket + dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") + dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) + builder = builder.Aggregation("sumBucketDateHisto", dateHisto) + builder = builder.Aggregation("sumBucketSumOfRetweets", NewSumBucketAggregation().BucketsPath("sumBucketDateHisto>sumOfRetweets")) + // MovAvg + dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") + dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) + dateHisto = dateHisto.SubAggregation("movingAvg", NewMovAvgAggregation().BucketsPath("sumOfRetweets")) + builder = builder.Aggregation("movingAvgDateHisto", dateHisto) + } + searchResult, err := builder.Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected Hits != nil; got: nil") + } + if searchResult.Hits.TotalHits != 3 { + t.Errorf("expected Hits.TotalHits = %d; got: %d", 3, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 3 { + t.Errorf("expected len(Hits.Hits) = %d; got: %d", 3, len(searchResult.Hits.Hits)) + } + agg := searchResult.Aggregations + if agg == nil { + t.Fatalf("expected Aggregations != nil; got: nil") + } + + // Search for non-existent aggregate should return (nil, false) + unknownAgg, found := agg.Terms("no-such-aggregate") + if found { + t.Errorf("expected unknown aggregation to not be found; got: %v", found) + } + if unknownAgg != nil { + t.Errorf("expected unknown aggregation to return %v; got %v", nil, unknownAgg) + } + + // Global + globalAggRes, found := agg.Global("global") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if globalAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if globalAggRes.DocCount != 3 { + t.Errorf("expected DocCount = %d; got: %d", 3, globalAggRes.DocCount) + } + + // Search for existent aggregate (by name) should return (aggregate, true) + termsAggRes, found := agg.Terms("users") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if termsAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if len(termsAggRes.Buckets) != 2 { + t.Fatalf("expected %d; got: %d", 2, len(termsAggRes.Buckets)) + } + if termsAggRes.Buckets[0].Key != "olivere" { + t.Errorf("expected %q; got: %q", "olivere", termsAggRes.Buckets[0].Key) + } + if termsAggRes.Buckets[0].DocCount != 2 { + t.Errorf("expected %d; got: %d", 2, termsAggRes.Buckets[0].DocCount) + } + if termsAggRes.Buckets[1].Key != "sandrae" { + t.Errorf("expected %q; got: %q", "sandrae", termsAggRes.Buckets[1].Key) + } + if termsAggRes.Buckets[1].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, termsAggRes.Buckets[1].DocCount) + } + + // A terms aggregate with keys that are not strings + retweetsAggRes, found := agg.Terms("retweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if retweetsAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if len(retweetsAggRes.Buckets) != 3 { + t.Fatalf("expected %d; got: %d", 3, len(retweetsAggRes.Buckets)) + } + + if retweetsAggRes.Buckets[0].Key != float64(0) { + t.Errorf("expected %v; got: %v", float64(0), retweetsAggRes.Buckets[0].Key) + } + if got, err := retweetsAggRes.Buckets[0].KeyNumber.Int64(); err != nil { + t.Errorf("expected %d; got: %v", 0, retweetsAggRes.Buckets[0].Key) + } else if got != 0 { + t.Errorf("expected %d; got: %d", 0, got) + } + if retweetsAggRes.Buckets[0].KeyNumber != "0" { + t.Errorf("expected %q; got: %q", "0", retweetsAggRes.Buckets[0].KeyNumber) + } + if retweetsAggRes.Buckets[0].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, retweetsAggRes.Buckets[0].DocCount) + } + + if retweetsAggRes.Buckets[1].Key != float64(12) { + t.Errorf("expected %v; got: %v", float64(12), retweetsAggRes.Buckets[1].Key) + } + if got, err := retweetsAggRes.Buckets[1].KeyNumber.Int64(); err != nil { + t.Errorf("expected %d; got: %v", 0, retweetsAggRes.Buckets[1].KeyNumber) + } else if got != 12 { + t.Errorf("expected %d; got: %d", 12, got) + } + if retweetsAggRes.Buckets[1].KeyNumber != "12" { + t.Errorf("expected %q; got: %q", "12", retweetsAggRes.Buckets[1].KeyNumber) + } + if retweetsAggRes.Buckets[1].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, retweetsAggRes.Buckets[1].DocCount) + } + + if retweetsAggRes.Buckets[2].Key != float64(108) { + t.Errorf("expected %v; got: %v", float64(108), retweetsAggRes.Buckets[2].Key) + } + if got, err := retweetsAggRes.Buckets[2].KeyNumber.Int64(); err != nil { + t.Errorf("expected %d; got: %v", 108, retweetsAggRes.Buckets[2].KeyNumber) + } else if got != 108 { + t.Errorf("expected %d; got: %d", 108, got) + } + if retweetsAggRes.Buckets[2].KeyNumber != "108" { + t.Errorf("expected %q; got: %q", "108", retweetsAggRes.Buckets[2].KeyNumber) + } + if retweetsAggRes.Buckets[2].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, retweetsAggRes.Buckets[2].DocCount) + } + + // avgRetweets + avgAggRes, found := agg.Avg("avgRetweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if avgAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if avgAggRes.Value == nil { + t.Fatalf("expected != nil; got: %v", *avgAggRes.Value) + } + if *avgAggRes.Value != 40.0 { + t.Errorf("expected %v; got: %v", 40.0, *avgAggRes.Value) + } + + // avgRetweetsWithMeta + if esversion >= "2.0" { + avgMetaAggRes, found := agg.Avg("avgRetweetsWithMeta") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if avgMetaAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if avgMetaAggRes.Meta == nil { + t.Fatalf("expected != nil; got: %v", avgMetaAggRes.Meta) + } + metaDataValue, found := avgMetaAggRes.Meta["meta"] + if !found { + t.Fatalf("expected to return meta data key %q; got: %v", "meta", found) + } + if flag, ok := metaDataValue.(bool); !ok { + t.Fatalf("expected to return meta data key type %T; got: %T", true, metaDataValue) + } else if flag != true { + t.Fatalf("expected to return meta data key value %v; got: %v", true, flag) + } + } + + // minRetweets + minAggRes, found := agg.Min("minRetweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if minAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if minAggRes.Value == nil { + t.Fatalf("expected != nil; got: %v", *minAggRes.Value) + } + if *minAggRes.Value != 0.0 { + t.Errorf("expected %v; got: %v", 0.0, *minAggRes.Value) + } + + // maxRetweets + maxAggRes, found := agg.Max("maxRetweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if maxAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if maxAggRes.Value == nil { + t.Fatalf("expected != nil; got: %v", *maxAggRes.Value) + } + if *maxAggRes.Value != 108.0 { + t.Errorf("expected %v; got: %v", 108.0, *maxAggRes.Value) + } + + // sumRetweets + sumAggRes, found := agg.Sum("sumRetweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if sumAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if sumAggRes.Value == nil { + t.Fatalf("expected != nil; got: %v", *sumAggRes.Value) + } + if *sumAggRes.Value != 120.0 { + t.Errorf("expected %v; got: %v", 120.0, *sumAggRes.Value) + } + + // statsRetweets + statsAggRes, found := agg.Stats("statsRetweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if statsAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if statsAggRes.Count != 3 { + t.Errorf("expected %d; got: %d", 3, statsAggRes.Count) + } + if statsAggRes.Min == nil { + t.Fatalf("expected != nil; got: %v", *statsAggRes.Min) + } + if *statsAggRes.Min != 0.0 { + t.Errorf("expected %v; got: %v", 0.0, *statsAggRes.Min) + } + if statsAggRes.Max == nil { + t.Fatalf("expected != nil; got: %v", *statsAggRes.Max) + } + if *statsAggRes.Max != 108.0 { + t.Errorf("expected %v; got: %v", 108.0, *statsAggRes.Max) + } + if statsAggRes.Avg == nil { + t.Fatalf("expected != nil; got: %v", *statsAggRes.Avg) + } + if *statsAggRes.Avg != 40.0 { + t.Errorf("expected %v; got: %v", 40.0, *statsAggRes.Avg) + } + if statsAggRes.Sum == nil { + t.Fatalf("expected != nil; got: %v", *statsAggRes.Sum) + } + if *statsAggRes.Sum != 120.0 { + t.Errorf("expected %v; got: %v", 120.0, *statsAggRes.Sum) + } + + // extstatsRetweets + extStatsAggRes, found := agg.ExtendedStats("extstatsRetweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if extStatsAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if extStatsAggRes.Count != 3 { + t.Errorf("expected %d; got: %d", 3, extStatsAggRes.Count) + } + if extStatsAggRes.Min == nil { + t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Min) + } + if *extStatsAggRes.Min != 0.0 { + t.Errorf("expected %v; got: %v", 0.0, *extStatsAggRes.Min) + } + if extStatsAggRes.Max == nil { + t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Max) + } + if *extStatsAggRes.Max != 108.0 { + t.Errorf("expected %v; got: %v", 108.0, *extStatsAggRes.Max) + } + if extStatsAggRes.Avg == nil { + t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Avg) + } + if *extStatsAggRes.Avg != 40.0 { + t.Errorf("expected %v; got: %v", 40.0, *extStatsAggRes.Avg) + } + if extStatsAggRes.Sum == nil { + t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Sum) + } + if *extStatsAggRes.Sum != 120.0 { + t.Errorf("expected %v; got: %v", 120.0, *extStatsAggRes.Sum) + } + if extStatsAggRes.SumOfSquares == nil { + t.Fatalf("expected != nil; got: %v", *extStatsAggRes.SumOfSquares) + } + if *extStatsAggRes.SumOfSquares != 11808.0 { + t.Errorf("expected %v; got: %v", 11808.0, *extStatsAggRes.SumOfSquares) + } + if extStatsAggRes.Variance == nil { + t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Variance) + } + if *extStatsAggRes.Variance != 2336.0 { + t.Errorf("expected %v; got: %v", 2336.0, *extStatsAggRes.Variance) + } + if extStatsAggRes.StdDeviation == nil { + t.Fatalf("expected != nil; got: %v", *extStatsAggRes.StdDeviation) + } + if *extStatsAggRes.StdDeviation != 48.33218389437829 { + t.Errorf("expected %v; got: %v", 48.33218389437829, *extStatsAggRes.StdDeviation) + } + + // valueCountRetweets + valueCountAggRes, found := agg.ValueCount("valueCountRetweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if valueCountAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if valueCountAggRes.Value == nil { + t.Fatalf("expected != nil; got: %v", *valueCountAggRes.Value) + } + if *valueCountAggRes.Value != 3.0 { + t.Errorf("expected %v; got: %v", 3.0, *valueCountAggRes.Value) + } + + // percentilesRetweets + percentilesAggRes, found := agg.Percentiles("percentilesRetweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if percentilesAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + // ES 1.4.x returns 7: {"1.0":...} + // ES 1.5.0 returns 14: {"1.0":..., "1.0_as_string":...} + // So we're relaxing the test here. + if len(percentilesAggRes.Values) == 0 { + t.Errorf("expected at least %d value; got: %d\nValues are: %#v", 1, len(percentilesAggRes.Values), percentilesAggRes.Values) + } + if _, found := percentilesAggRes.Values["0.0"]; found { + t.Errorf("expected %v; got: %v", false, found) + } + if percentilesAggRes.Values["1.0"] != 0.24 { + t.Errorf("expected %v; got: %v", 0.24, percentilesAggRes.Values["1.0"]) + } + if percentilesAggRes.Values["25.0"] != 6.0 { + t.Errorf("expected %v; got: %v", 6.0, percentilesAggRes.Values["25.0"]) + } + if percentilesAggRes.Values["99.0"] != 106.08 { + t.Errorf("expected %v; got: %v", 106.08, percentilesAggRes.Values["99.0"]) + } + + // percentileRanksRetweets + percentileRanksAggRes, found := agg.PercentileRanks("percentileRanksRetweets") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if percentileRanksAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if len(percentileRanksAggRes.Values) == 0 { + t.Errorf("expected at least %d value; got %d\nValues are: %#v", 1, len(percentileRanksAggRes.Values), percentileRanksAggRes.Values) + } + if _, found := percentileRanksAggRes.Values["0.0"]; found { + t.Errorf("expected %v; got: %v", true, found) + } + if percentileRanksAggRes.Values["25.0"] != 21.180555555555557 { + t.Errorf("expected %v; got: %v", 21.180555555555557, percentileRanksAggRes.Values["25.0"]) + } + if percentileRanksAggRes.Values["50.0"] != 29.86111111111111 { + t.Errorf("expected %v; got: %v", 29.86111111111111, percentileRanksAggRes.Values["50.0"]) + } + if percentileRanksAggRes.Values["75.0"] != 38.54166666666667 { + t.Errorf("expected %v; got: %v", 38.54166666666667, percentileRanksAggRes.Values["75.0"]) + } + + // usersCardinality + cardAggRes, found := agg.Cardinality("usersCardinality") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if cardAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if cardAggRes.Value == nil { + t.Fatalf("expected != nil; got: %v", *cardAggRes.Value) + } + if *cardAggRes.Value != 2 { + t.Errorf("expected %v; got: %v", 2, *cardAggRes.Value) + } + + // retweetsFilter + filterAggRes, found := agg.Filter("retweetsFilter") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if filterAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if filterAggRes.DocCount != 2 { + t.Fatalf("expected %v; got: %v", 2, filterAggRes.DocCount) + } + + // Retrieve sub-aggregation + avgRetweetsAggRes, found := filterAggRes.Avg("avgRetweetsSub") + if !found { + t.Error("expected sub-aggregation \"avgRetweets\" to be found; got false") + } + if avgRetweetsAggRes == nil { + t.Fatal("expected sub-aggregation \"avgRetweets\"; got nil") + } + if avgRetweetsAggRes.Value == nil { + t.Fatalf("expected != nil; got: %v", avgRetweetsAggRes.Value) + } + if *avgRetweetsAggRes.Value != 54.0 { + t.Errorf("expected %v; got: %v", 54.0, *avgRetweetsAggRes.Value) + } + + // queryFilter + queryFilterAggRes, found := agg.Filter("queryFilter") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if queryFilterAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if queryFilterAggRes.DocCount != 2 { + t.Fatalf("expected %v; got: %v", 2, queryFilterAggRes.DocCount) + } + + // significantTerms + stAggRes, found := agg.SignificantTerms("significantTerms") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if stAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if stAggRes.DocCount != 3 { + t.Errorf("expected %v; got: %v", 3, stAggRes.DocCount) + } + if len(stAggRes.Buckets) != 0 { + t.Errorf("expected %v; got: %v", 0, len(stAggRes.Buckets)) + } + + // sampler + samplerAggRes, found := agg.Sampler("sample") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if samplerAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if samplerAggRes.DocCount != 2 { + t.Errorf("expected %v; got: %v", 2, samplerAggRes.DocCount) + } + sub, found := samplerAggRes.Aggregations["tagged_with"] + if !found { + t.Fatalf("expected sub aggregation %q", "tagged_with") + } + if sub == nil { + t.Fatalf("expected sub aggregation %q; got: %v", "tagged_with", sub) + } + + // retweetsRange + rangeAggRes, found := agg.Range("retweetsRange") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if rangeAggRes == nil { + t.Fatal("expected != nil; got: nil") + } + if len(rangeAggRes.Buckets) != 3 { + t.Fatalf("expected %d; got: %d", 3, len(rangeAggRes.Buckets)) + } + if rangeAggRes.Buckets[0].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, rangeAggRes.Buckets[0].DocCount) + } + if rangeAggRes.Buckets[1].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, rangeAggRes.Buckets[1].DocCount) + } + if rangeAggRes.Buckets[2].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, rangeAggRes.Buckets[2].DocCount) + } + + // retweetsKeyedRange + keyedRangeAggRes, found := agg.KeyedRange("retweetsKeyedRange") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if keyedRangeAggRes == nil { + t.Fatal("expected != nil; got: nil") + } + if len(keyedRangeAggRes.Buckets) != 3 { + t.Fatalf("expected %d; got: %d", 3, len(keyedRangeAggRes.Buckets)) + } + _, found = keyedRangeAggRes.Buckets["no-such-key"] + if found { + t.Fatalf("expected bucket to not be found; got: %v", found) + } + bucket, found := keyedRangeAggRes.Buckets["*-10.0"] + if !found { + t.Fatalf("expected bucket to be found; got: %v", found) + } + if bucket.DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, bucket.DocCount) + } + bucket, found = keyedRangeAggRes.Buckets["10.0-100.0"] + if !found { + t.Fatalf("expected bucket to be found; got: %v", found) + } + if bucket.DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, bucket.DocCount) + } + bucket, found = keyedRangeAggRes.Buckets["100.0-*"] + if !found { + t.Fatalf("expected bucket to be found; got: %v", found) + } + if bucket.DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, bucket.DocCount) + } + + // dateRange + dateRangeRes, found := agg.DateRange("dateRange") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if dateRangeRes == nil { + t.Fatal("expected != nil; got: nil") + } + if dateRangeRes.Buckets[0].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, dateRangeRes.Buckets[0].DocCount) + } + if dateRangeRes.Buckets[0].From != nil { + t.Fatal("expected From to be nil") + } + if dateRangeRes.Buckets[0].To == nil { + t.Fatal("expected To to be != nil") + } + if *dateRangeRes.Buckets[0].To != 1.325376e+12 { + t.Errorf("expected %v; got: %v", 1.325376e+12, *dateRangeRes.Buckets[0].To) + } + if dateRangeRes.Buckets[0].ToAsString != "2012-01-01T00:00:00.000Z" { + t.Errorf("expected %q; got: %q", "2012-01-01T00:00:00.000Z", dateRangeRes.Buckets[0].ToAsString) + } + if dateRangeRes.Buckets[1].DocCount != 2 { + t.Errorf("expected %d; got: %d", 2, dateRangeRes.Buckets[1].DocCount) + } + if dateRangeRes.Buckets[1].From == nil { + t.Fatal("expected From to be != nil") + } + if *dateRangeRes.Buckets[1].From != 1.325376e+12 { + t.Errorf("expected From = %v; got: %v", 1.325376e+12, *dateRangeRes.Buckets[1].From) + } + if dateRangeRes.Buckets[1].FromAsString != "2012-01-01T00:00:00.000Z" { + t.Errorf("expected FromAsString = %q; got: %q", "2012-01-01T00:00:00.000Z", dateRangeRes.Buckets[1].FromAsString) + } + if dateRangeRes.Buckets[1].To == nil { + t.Fatal("expected To to be != nil") + } + if *dateRangeRes.Buckets[1].To != 1.3569984e+12 { + t.Errorf("expected To = %v; got: %v", 1.3569984e+12, *dateRangeRes.Buckets[1].To) + } + if dateRangeRes.Buckets[1].ToAsString != "2013-01-01T00:00:00.000Z" { + t.Errorf("expected ToAsString = %q; got: %q", "2013-01-01T00:00:00.000Z", dateRangeRes.Buckets[1].ToAsString) + } + if dateRangeRes.Buckets[2].DocCount != 0 { + t.Errorf("expected %d; got: %d", 0, dateRangeRes.Buckets[2].DocCount) + } + if dateRangeRes.Buckets[2].To != nil { + t.Fatal("expected To to be nil") + } + if dateRangeRes.Buckets[2].From == nil { + t.Fatal("expected From to be != nil") + } + if *dateRangeRes.Buckets[2].From != 1.3569984e+12 { + t.Errorf("expected %v; got: %v", 1.3569984e+12, *dateRangeRes.Buckets[2].From) + } + if dateRangeRes.Buckets[2].FromAsString != "2013-01-01T00:00:00.000Z" { + t.Errorf("expected %q; got: %q", "2013-01-01T00:00:00.000Z", dateRangeRes.Buckets[2].FromAsString) + } + + // missingTags + missingRes, found := agg.Missing("missingTags") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if missingRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if missingRes.DocCount != 0 { + t.Errorf("expected searchResult.Aggregations[\"missingTags\"].DocCount = %v; got %v", 0, missingRes.DocCount) + } + + // retweetsHisto + histoRes, found := agg.Histogram("retweetsHisto") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if histoRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if len(histoRes.Buckets) != 2 { + t.Fatalf("expected %d; got: %d", 2, len(histoRes.Buckets)) + } + if histoRes.Buckets[0].DocCount != 2 { + t.Errorf("expected %d; got: %d", 2, histoRes.Buckets[0].DocCount) + } + if histoRes.Buckets[0].Key != 0.0 { + t.Errorf("expected %v; got: %v", 0.0, histoRes.Buckets[0].Key) + } + if histoRes.Buckets[1].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, histoRes.Buckets[1].DocCount) + } + if histoRes.Buckets[1].Key != 100.0 { + t.Errorf("expected %v; got: %v", 100.0, histoRes.Buckets[1].Key) + } + + // dateHisto + dateHistoRes, found := agg.DateHistogram("dateHisto") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if dateHistoRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if len(dateHistoRes.Buckets) != 2 { + t.Fatalf("expected %d; got: %d", 2, len(dateHistoRes.Buckets)) + } + if dateHistoRes.Buckets[0].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, dateHistoRes.Buckets[0].DocCount) + } + if dateHistoRes.Buckets[0].Key != 1.29384e+12 { + t.Errorf("expected %v; got: %v", 1.29384e+12, dateHistoRes.Buckets[0].Key) + } + if dateHistoRes.Buckets[0].KeyAsString == nil { + t.Fatalf("expected != nil; got: %q", dateHistoRes.Buckets[0].KeyAsString) + } + if *dateHistoRes.Buckets[0].KeyAsString != "2011-01-01T00:00:00.000Z" { + t.Errorf("expected %q; got: %q", "2011-01-01T00:00:00.000Z", *dateHistoRes.Buckets[0].KeyAsString) + } + if dateHistoRes.Buckets[1].DocCount != 2 { + t.Errorf("expected %d; got: %d", 2, dateHistoRes.Buckets[1].DocCount) + } + if dateHistoRes.Buckets[1].Key != 1.325376e+12 { + t.Errorf("expected %v; got: %v", 1.325376e+12, dateHistoRes.Buckets[1].Key) + } + if dateHistoRes.Buckets[1].KeyAsString == nil { + t.Fatalf("expected != nil; got: %q", dateHistoRes.Buckets[1].KeyAsString) + } + if *dateHistoRes.Buckets[1].KeyAsString != "2012-01-01T00:00:00.000Z" { + t.Errorf("expected %q; got: %q", "2012-01-01T00:00:00.000Z", *dateHistoRes.Buckets[1].KeyAsString) + } + + // topHits + topTags, found := agg.Terms("top-tags") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if topTags == nil { + t.Fatalf("expected != nil; got: nil") + } + if esversion >= "1.4.0" { + if topTags.DocCountErrorUpperBound != 0 { + t.Errorf("expected %v; got: %v", 0, topTags.DocCountErrorUpperBound) + } + if topTags.SumOfOtherDocCount != 1 { + t.Errorf("expected %v; got: %v", 1, topTags.SumOfOtherDocCount) + } + } + if len(topTags.Buckets) != 3 { + t.Fatalf("expected %d; got: %d", 3, len(topTags.Buckets)) + } + if topTags.Buckets[0].DocCount != 2 { + t.Errorf("expected %d; got: %d", 2, topTags.Buckets[0].DocCount) + } + if topTags.Buckets[0].Key != "golang" { + t.Errorf("expected %v; got: %v", "golang", topTags.Buckets[0].Key) + } + topHits, found := topTags.Buckets[0].TopHits("top_tag_hits") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if topHits == nil { + t.Fatal("expected != nil; got: nil") + } + if topHits.Hits == nil { + t.Fatalf("expected != nil; got: nil") + } + if topHits.Hits.TotalHits != 2 { + t.Errorf("expected %d; got: %d", 2, topHits.Hits.TotalHits) + } + if topHits.Hits.Hits == nil { + t.Fatalf("expected != nil; got: nil") + } + if len(topHits.Hits.Hits) != 2 { + t.Fatalf("expected %d; got: %d", 2, len(topHits.Hits.Hits)) + } + hit := topHits.Hits.Hits[0] + if !found { + t.Fatalf("expected %v; got: %v", true, found) + } + if hit == nil { + t.Fatal("expected != nil; got: nil") + } + var tw tweet + if err := json.Unmarshal(*hit.Source, &tw); err != nil { + t.Fatalf("expected no error; got: %v", err) + } + if tw.Message != "Welcome to Golang and Elasticsearch." { + t.Errorf("expected %q; got: %q", "Welcome to Golang and Elasticsearch.", tw.Message) + } + if topTags.Buckets[1].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, topTags.Buckets[1].DocCount) + } + if topTags.Buckets[1].Key != "cycling" { + t.Errorf("expected %v; got: %v", "cycling", topTags.Buckets[1].Key) + } + topHits, found = topTags.Buckets[1].TopHits("top_tag_hits") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if topHits == nil { + t.Fatal("expected != nil; got: nil") + } + if topHits.Hits == nil { + t.Fatal("expected != nil; got nil") + } + if topHits.Hits.TotalHits != 1 { + t.Errorf("expected %d; got: %d", 1, topHits.Hits.TotalHits) + } + if topTags.Buckets[2].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, topTags.Buckets[2].DocCount) + } + if topTags.Buckets[2].Key != "elasticsearch" { + t.Errorf("expected %v; got: %v", "elasticsearch", topTags.Buckets[2].Key) + } + topHits, found = topTags.Buckets[2].TopHits("top_tag_hits") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if topHits == nil { + t.Fatal("expected != nil; got: nil") + } + if topHits.Hits == nil { + t.Fatal("expected != nil; got: nil") + } + if topHits.Hits.TotalHits != 1 { + t.Errorf("expected %d; got: %d", 1, topHits.Hits.TotalHits) + } + + // viewport via geo_bounds (1.3.0 has an error in that it doesn't output the aggregation name) + geoBoundsRes, found := agg.GeoBounds("viewport") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if geoBoundsRes == nil { + t.Fatalf("expected != nil; got: nil") + } + + if esversion >= "1.4" { + // Filters agg "countByUser" + countByUserAggRes, found := agg.Filters("countByUser") + if !found { + t.Errorf("expected %v; got: %v", true, found) + } + if countByUserAggRes == nil { + t.Fatalf("expected != nil; got: nil") + } + if len(countByUserAggRes.Buckets) != 2 { + t.Fatalf("expected %d; got: %d", 2, len(countByUserAggRes.Buckets)) + } + if countByUserAggRes.Buckets[0].DocCount != 2 { + t.Errorf("expected %d; got: %d", 2, countByUserAggRes.Buckets[0].DocCount) + } + if countByUserAggRes.Buckets[1].DocCount != 1 { + t.Errorf("expected %d; got: %d", 1, countByUserAggRes.Buckets[1].DocCount) + } + } +} + +// TestAggsMarshal ensures that marshaling aggregations back into a string +// does not yield base64 encoded data. See https://github.com/olivere/elastic/issues/51 +// and https://groups.google.com/forum/#!topic/Golang-Nuts/38ShOlhxAYY for details. +func TestAggsMarshal(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", + Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Image: "http://golang.org/doc/gopher/gophercolor.png", + Tags: []string{"golang", "elasticsearch"}, + Location: "48.1333,11.5667", // lat,lon + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + all := NewMatchAllQuery() + dhagg := NewDateHistogramAggregation().Field("created").Interval("year") + + // Run query + builder := client.Search().Index(testIndexName).Query(all) + builder = builder.Aggregation("dhagg", dhagg) + searchResult, err := builder.Do() + if err != nil { + t.Fatal(err) + } + if searchResult.TotalHits() != 1 { + t.Errorf("expected Hits.TotalHits = %d; got: %d", 1, searchResult.TotalHits()) + } + if _, found := searchResult.Aggregations["dhagg"]; !found { + t.Fatalf("expected aggregation %q", "dhagg") + } + buf, err := json.Marshal(searchResult) + if err != nil { + t.Fatal(err) + } + s := string(buf) + if i := strings.Index(s, `{"dhagg":{"buckets":[{"key_as_string":"2012-01-01`); i < 0 { + t.Errorf("expected to serialize aggregation into string; got: %v", s) + } +} + +func TestAggsMetricsMin(t *testing.T) { + s := `{ + "min_price": { + "value": 10 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Min("min_price") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(10) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(10), *agg.Value) + } +} + +func TestAggsMetricsMax(t *testing.T) { + s := `{ + "max_price": { + "value": 35 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Max("max_price") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(35) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(35), *agg.Value) + } +} + +func TestAggsMetricsSum(t *testing.T) { + s := `{ + "intraday_return": { + "value": 2.18 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Sum("intraday_return") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(2.18) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(2.18), *agg.Value) + } +} + +func TestAggsMetricsAvg(t *testing.T) { + s := `{ + "avg_grade": { + "value": 75 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Avg("avg_grade") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(75) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(75), *agg.Value) + } +} + +func TestAggsMetricsValueCount(t *testing.T) { + s := `{ + "grades_count": { + "value": 10 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.ValueCount("grades_count") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(10) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(10), *agg.Value) + } +} + +func TestAggsMetricsCardinality(t *testing.T) { + s := `{ + "author_count": { + "value": 12 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Cardinality("author_count") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(12) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(12), *agg.Value) + } +} + +func TestAggsMetricsStats(t *testing.T) { + s := `{ + "grades_stats": { + "count": 6, + "min": 60, + "max": 98, + "avg": 78.5, + "sum": 471 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Stats("grades_stats") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Count != int64(6) { + t.Fatalf("expected aggregation Count = %v; got: %v", int64(6), agg.Count) + } + if agg.Min == nil { + t.Fatalf("expected aggregation Min != nil; got: %v", agg.Min) + } + if *agg.Min != float64(60) { + t.Fatalf("expected aggregation Min = %v; got: %v", float64(60), *agg.Min) + } + if agg.Max == nil { + t.Fatalf("expected aggregation Max != nil; got: %v", agg.Max) + } + if *agg.Max != float64(98) { + t.Fatalf("expected aggregation Max = %v; got: %v", float64(98), *agg.Max) + } + if agg.Avg == nil { + t.Fatalf("expected aggregation Avg != nil; got: %v", agg.Avg) + } + if *agg.Avg != float64(78.5) { + t.Fatalf("expected aggregation Avg = %v; got: %v", float64(78.5), *agg.Avg) + } + if agg.Sum == nil { + t.Fatalf("expected aggregation Sum != nil; got: %v", agg.Sum) + } + if *agg.Sum != float64(471) { + t.Fatalf("expected aggregation Sum = %v; got: %v", float64(471), *agg.Sum) + } +} + +func TestAggsMetricsExtendedStats(t *testing.T) { + s := `{ + "grades_stats": { + "count": 6, + "min": 72, + "max": 117.6, + "avg": 94.2, + "sum": 565.2, + "sum_of_squares": 54551.51999999999, + "variance": 218.2799999999976, + "std_deviation": 14.774302013969987 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.ExtendedStats("grades_stats") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Count != int64(6) { + t.Fatalf("expected aggregation Count = %v; got: %v", int64(6), agg.Count) + } + if agg.Min == nil { + t.Fatalf("expected aggregation Min != nil; got: %v", agg.Min) + } + if *agg.Min != float64(72) { + t.Fatalf("expected aggregation Min = %v; got: %v", float64(72), *agg.Min) + } + if agg.Max == nil { + t.Fatalf("expected aggregation Max != nil; got: %v", agg.Max) + } + if *agg.Max != float64(117.6) { + t.Fatalf("expected aggregation Max = %v; got: %v", float64(117.6), *agg.Max) + } + if agg.Avg == nil { + t.Fatalf("expected aggregation Avg != nil; got: %v", agg.Avg) + } + if *agg.Avg != float64(94.2) { + t.Fatalf("expected aggregation Avg = %v; got: %v", float64(94.2), *agg.Avg) + } + if agg.Sum == nil { + t.Fatalf("expected aggregation Sum != nil; got: %v", agg.Sum) + } + if *agg.Sum != float64(565.2) { + t.Fatalf("expected aggregation Sum = %v; got: %v", float64(565.2), *agg.Sum) + } + if agg.SumOfSquares == nil { + t.Fatalf("expected aggregation sum_of_squares != nil; got: %v", agg.SumOfSquares) + } + if *agg.SumOfSquares != float64(54551.51999999999) { + t.Fatalf("expected aggregation sum_of_squares = %v; got: %v", float64(54551.51999999999), *agg.SumOfSquares) + } + if agg.Variance == nil { + t.Fatalf("expected aggregation Variance != nil; got: %v", agg.Variance) + } + if *agg.Variance != float64(218.2799999999976) { + t.Fatalf("expected aggregation Variance = %v; got: %v", float64(218.2799999999976), *agg.Variance) + } + if agg.StdDeviation == nil { + t.Fatalf("expected aggregation StdDeviation != nil; got: %v", agg.StdDeviation) + } + if *agg.StdDeviation != float64(14.774302013969987) { + t.Fatalf("expected aggregation StdDeviation = %v; got: %v", float64(14.774302013969987), *agg.StdDeviation) + } +} + +func TestAggsMetricsPercentiles(t *testing.T) { + s := `{ + "load_time_outlier": { + "values" : { + "1.0": 15, + "5.0": 20, + "25.0": 23, + "50.0": 25, + "75.0": 29, + "95.0": 60, + "99.0": 150 + } + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Percentiles("load_time_outlier") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Values == nil { + t.Fatalf("expected aggregation Values != nil; got: %v", agg.Values) + } + if len(agg.Values) != 7 { + t.Fatalf("expected %d aggregation Values; got: %d", 7, len(agg.Values)) + } + if agg.Values["1.0"] != float64(15) { + t.Errorf("expected aggregation value for \"1.0\" = %v; got: %v", float64(15), agg.Values["1.0"]) + } + if agg.Values["5.0"] != float64(20) { + t.Errorf("expected aggregation value for \"5.0\" = %v; got: %v", float64(20), agg.Values["5.0"]) + } + if agg.Values["25.0"] != float64(23) { + t.Errorf("expected aggregation value for \"25.0\" = %v; got: %v", float64(23), agg.Values["25.0"]) + } + if agg.Values["50.0"] != float64(25) { + t.Errorf("expected aggregation value for \"50.0\" = %v; got: %v", float64(25), agg.Values["50.0"]) + } + if agg.Values["75.0"] != float64(29) { + t.Errorf("expected aggregation value for \"75.0\" = %v; got: %v", float64(29), agg.Values["75.0"]) + } + if agg.Values["95.0"] != float64(60) { + t.Errorf("expected aggregation value for \"95.0\" = %v; got: %v", float64(60), agg.Values["95.0"]) + } + if agg.Values["99.0"] != float64(150) { + t.Errorf("expected aggregation value for \"99.0\" = %v; got: %v", float64(150), agg.Values["99.0"]) + } +} + +func TestAggsMetricsPercentileRanks(t *testing.T) { + s := `{ + "load_time_outlier": { + "values" : { + "15": 92, + "30": 100 + } + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.PercentileRanks("load_time_outlier") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Values == nil { + t.Fatalf("expected aggregation Values != nil; got: %v", agg.Values) + } + if len(agg.Values) != 2 { + t.Fatalf("expected %d aggregation Values; got: %d", 7, len(agg.Values)) + } + if agg.Values["15"] != float64(92) { + t.Errorf("expected aggregation value for \"15\" = %v; got: %v", float64(92), agg.Values["15"]) + } + if agg.Values["30"] != float64(100) { + t.Errorf("expected aggregation value for \"30\" = %v; got: %v", float64(100), agg.Values["30"]) + } +} + +func TestAggsMetricsTopHits(t *testing.T) { + s := `{ + "top-tags": { + "buckets": [ + { + "key": "windows-7", + "doc_count": 25365, + "top_tags_hits": { + "hits": { + "total": 25365, + "max_score": 1, + "hits": [ + { + "_index": "stack", + "_type": "question", + "_id": "602679", + "_score": 1, + "_source": { + "title": "Windows port opening" + }, + "sort": [ + 1370143231177 + ] + } + ] + } + } + }, + { + "key": "linux", + "doc_count": 18342, + "top_tags_hits": { + "hits": { + "total": 18342, + "max_score": 1, + "hits": [ + { + "_index": "stack", + "_type": "question", + "_id": "602672", + "_score": 1, + "_source": { + "title": "Ubuntu RFID Screensaver lock-unlock" + }, + "sort": [ + 1370143379747 + ] + } + ] + } + } + }, + { + "key": "windows", + "doc_count": 18119, + "top_tags_hits": { + "hits": { + "total": 18119, + "max_score": 1, + "hits": [ + { + "_index": "stack", + "_type": "question", + "_id": "602678", + "_score": 1, + "_source": { + "title": "If I change my computers date / time, what could be affected?" + }, + "sort": [ + 1370142868283 + ] + } + ] + } + } + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Terms("top-tags") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 3 { + t.Errorf("expected %d bucket entries; got: %d", 3, len(agg.Buckets)) + } + if agg.Buckets[0].Key != "windows-7" { + t.Errorf("expected bucket key = %q; got: %q", "windows-7", agg.Buckets[0].Key) + } + if agg.Buckets[1].Key != "linux" { + t.Errorf("expected bucket key = %q; got: %q", "linux", agg.Buckets[1].Key) + } + if agg.Buckets[2].Key != "windows" { + t.Errorf("expected bucket key = %q; got: %q", "windows", agg.Buckets[2].Key) + } + + // Sub-aggregation of top-hits + subAgg, found := agg.Buckets[0].TopHits("top_tags_hits") + if !found { + t.Fatalf("expected sub aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub aggregation != nil; got: %v", subAgg) + } + if subAgg.Hits == nil { + t.Fatalf("expected sub aggregation Hits != nil; got: %v", subAgg.Hits) + } + if subAgg.Hits.TotalHits != 25365 { + t.Fatalf("expected sub aggregation Hits.TotalHits = %d; got: %d", 25365, subAgg.Hits.TotalHits) + } + if subAgg.Hits.MaxScore == nil { + t.Fatalf("expected sub aggregation Hits.MaxScore != %v; got: %v", nil, *subAgg.Hits.MaxScore) + } + if *subAgg.Hits.MaxScore != float64(1.0) { + t.Fatalf("expected sub aggregation Hits.MaxScore = %v; got: %v", float64(1.0), *subAgg.Hits.MaxScore) + } + + subAgg, found = agg.Buckets[1].TopHits("top_tags_hits") + if !found { + t.Fatalf("expected sub aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub aggregation != nil; got: %v", subAgg) + } + if subAgg.Hits == nil { + t.Fatalf("expected sub aggregation Hits != nil; got: %v", subAgg.Hits) + } + if subAgg.Hits.TotalHits != 18342 { + t.Fatalf("expected sub aggregation Hits.TotalHits = %d; got: %d", 18342, subAgg.Hits.TotalHits) + } + if subAgg.Hits.MaxScore == nil { + t.Fatalf("expected sub aggregation Hits.MaxScore != %v; got: %v", nil, *subAgg.Hits.MaxScore) + } + if *subAgg.Hits.MaxScore != float64(1.0) { + t.Fatalf("expected sub aggregation Hits.MaxScore = %v; got: %v", float64(1.0), *subAgg.Hits.MaxScore) + } + + subAgg, found = agg.Buckets[2].TopHits("top_tags_hits") + if !found { + t.Fatalf("expected sub aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub aggregation != nil; got: %v", subAgg) + } + if subAgg.Hits == nil { + t.Fatalf("expected sub aggregation Hits != nil; got: %v", subAgg.Hits) + } + if subAgg.Hits.TotalHits != 18119 { + t.Fatalf("expected sub aggregation Hits.TotalHits = %d; got: %d", 18119, subAgg.Hits.TotalHits) + } + if subAgg.Hits.MaxScore == nil { + t.Fatalf("expected sub aggregation Hits.MaxScore != %v; got: %v", nil, *subAgg.Hits.MaxScore) + } + if *subAgg.Hits.MaxScore != float64(1.0) { + t.Fatalf("expected sub aggregation Hits.MaxScore = %v; got: %v", float64(1.0), *subAgg.Hits.MaxScore) + } +} + +func TestAggsBucketGlobal(t *testing.T) { + s := `{ + "all_products" : { + "doc_count" : 100, + "avg_price" : { + "value" : 56.3 + } + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Global("all_products") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.DocCount != 100 { + t.Fatalf("expected aggregation DocCount = %d; got: %d", 100, agg.DocCount) + } + + // Sub-aggregation + subAgg, found := agg.Avg("avg_price") + if !found { + t.Fatalf("expected sub-aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub-aggregation != nil; got: %v", subAgg) + } + if subAgg.Value == nil { + t.Fatalf("expected sub-aggregation value != nil; got: %v", subAgg.Value) + } + if *subAgg.Value != float64(56.3) { + t.Fatalf("expected sub-aggregation value = %v; got: %v", float64(56.3), *subAgg.Value) + } +} + +func TestAggsBucketFilter(t *testing.T) { + s := `{ + "in_stock_products" : { + "doc_count" : 100, + "avg_price" : { "value" : 56.3 } + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Filter("in_stock_products") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.DocCount != 100 { + t.Fatalf("expected aggregation DocCount = %d; got: %d", 100, agg.DocCount) + } + + // Sub-aggregation + subAgg, found := agg.Avg("avg_price") + if !found { + t.Fatalf("expected sub-aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub-aggregation != nil; got: %v", subAgg) + } + if subAgg.Value == nil { + t.Fatalf("expected sub-aggregation value != nil; got: %v", subAgg.Value) + } + if *subAgg.Value != float64(56.3) { + t.Fatalf("expected sub-aggregation value = %v; got: %v", float64(56.3), *subAgg.Value) + } +} + +func TestAggsBucketFiltersWithBuckets(t *testing.T) { + s := `{ + "messages" : { + "buckets" : [ + { + "doc_count" : 34, + "monthly" : { + "buckets" : [] + } + }, + { + "doc_count" : 439, + "monthly" : { + "buckets" : [] + } + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Filters("messages") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != %v; got: %v", nil, agg.Buckets) + } + if len(agg.Buckets) != 2 { + t.Fatalf("expected %d buckets; got: %d", 2, len(agg.Buckets)) + } + + if agg.Buckets[0].DocCount != 34 { + t.Fatalf("expected DocCount = %d; got: %d", 34, agg.Buckets[0].DocCount) + } + subAgg, found := agg.Buckets[0].Histogram("monthly") + if !found { + t.Fatalf("expected sub aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg) + } + + if agg.Buckets[1].DocCount != 439 { + t.Fatalf("expected DocCount = %d; got: %d", 439, agg.Buckets[1].DocCount) + } + subAgg, found = agg.Buckets[1].Histogram("monthly") + if !found { + t.Fatalf("expected sub aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg) + } +} + +func TestAggsBucketFiltersWithNamedBuckets(t *testing.T) { + s := `{ + "messages" : { + "buckets" : { + "errors" : { + "doc_count" : 34, + "monthly" : { + "buckets" : [] + } + }, + "warnings" : { + "doc_count" : 439, + "monthly" : { + "buckets" : [] + } + } + } + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Filters("messages") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.NamedBuckets == nil { + t.Fatalf("expected aggregation buckets != %v; got: %v", nil, agg.NamedBuckets) + } + if len(agg.NamedBuckets) != 2 { + t.Fatalf("expected %d buckets; got: %d", 2, len(agg.NamedBuckets)) + } + + if agg.NamedBuckets["errors"].DocCount != 34 { + t.Fatalf("expected DocCount = %d; got: %d", 34, agg.NamedBuckets["errors"].DocCount) + } + subAgg, found := agg.NamedBuckets["errors"].Histogram("monthly") + if !found { + t.Fatalf("expected sub aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg) + } + + if agg.NamedBuckets["warnings"].DocCount != 439 { + t.Fatalf("expected DocCount = %d; got: %d", 439, agg.NamedBuckets["warnings"].DocCount) + } + subAgg, found = agg.NamedBuckets["warnings"].Histogram("monthly") + if !found { + t.Fatalf("expected sub aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg) + } +} + +func TestAggsBucketMissing(t *testing.T) { + s := `{ + "products_without_a_price" : { + "doc_count" : 10 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Missing("products_without_a_price") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.DocCount != 10 { + t.Fatalf("expected aggregation DocCount = %d; got: %d", 10, agg.DocCount) + } +} + +func TestAggsBucketNested(t *testing.T) { + s := `{ + "resellers": { + "min_price": { + "value" : 350 + } + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Nested("resellers") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.DocCount != 0 { + t.Fatalf("expected aggregation DocCount = %d; got: %d", 0, agg.DocCount) + } + + // Sub-aggregation + subAgg, found := agg.Avg("min_price") + if !found { + t.Fatalf("expected sub-aggregation to be found; got: %v", found) + } + if subAgg == nil { + t.Fatalf("expected sub-aggregation != nil; got: %v", subAgg) + } + if subAgg.Value == nil { + t.Fatalf("expected sub-aggregation value != nil; got: %v", subAgg.Value) + } + if *subAgg.Value != float64(350) { + t.Fatalf("expected sub-aggregation value = %v; got: %v", float64(350), *subAgg.Value) + } +} + +func TestAggsBucketReverseNested(t *testing.T) { + s := `{ + "comment_to_issue": { + "doc_count" : 10 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.ReverseNested("comment_to_issue") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.DocCount != 10 { + t.Fatalf("expected aggregation DocCount = %d; got: %d", 10, agg.DocCount) + } +} + +func TestAggsBucketChildren(t *testing.T) { + s := `{ + "to-answers": { + "doc_count" : 10 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Children("to-answers") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.DocCount != 10 { + t.Fatalf("expected aggregation DocCount = %d; got: %d", 10, agg.DocCount) + } +} + +func TestAggsBucketTerms(t *testing.T) { + s := `{ + "users" : { + "doc_count_error_upper_bound" : 1, + "sum_other_doc_count" : 2, + "buckets" : [ { + "key" : "olivere", + "doc_count" : 2 + }, { + "key" : "sandrae", + "doc_count" : 1 + } ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Terms("users") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 2 { + t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) + } + if agg.Buckets[0].Key != "olivere" { + t.Errorf("expected key %q; got: %q", "olivere", agg.Buckets[0].Key) + } + if agg.Buckets[0].DocCount != 2 { + t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[0].DocCount) + } + if agg.Buckets[1].Key != "sandrae" { + t.Errorf("expected key %q; got: %q", "sandrae", agg.Buckets[1].Key) + } + if agg.Buckets[1].DocCount != 1 { + t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[1].DocCount) + } +} + +func TestAggsBucketTermsWithNumericKeys(t *testing.T) { + s := `{ + "users" : { + "doc_count_error_upper_bound" : 1, + "sum_other_doc_count" : 2, + "buckets" : [ { + "key" : 17, + "doc_count" : 2 + }, { + "key" : 21, + "doc_count" : 1 + } ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Terms("users") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 2 { + t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) + } + if agg.Buckets[0].Key != float64(17) { + t.Errorf("expected key %v; got: %v", 17, agg.Buckets[0].Key) + } + if got, err := agg.Buckets[0].KeyNumber.Int64(); err != nil { + t.Errorf("expected to convert key to int64; got: %v", err) + } else if got != 17 { + t.Errorf("expected key %v; got: %v", 17, agg.Buckets[0].Key) + } + if agg.Buckets[0].DocCount != 2 { + t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[0].DocCount) + } + if agg.Buckets[1].Key != float64(21) { + t.Errorf("expected key %v; got: %v", 21, agg.Buckets[1].Key) + } + if got, err := agg.Buckets[1].KeyNumber.Int64(); err != nil { + t.Errorf("expected to convert key to int64; got: %v", err) + } else if got != 21 { + t.Errorf("expected key %v; got: %v", 21, agg.Buckets[1].Key) + } + if agg.Buckets[1].DocCount != 1 { + t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[1].DocCount) + } +} + +func TestAggsBucketTermsWithBoolKeys(t *testing.T) { + s := `{ + "users" : { + "doc_count_error_upper_bound" : 1, + "sum_other_doc_count" : 2, + "buckets" : [ { + "key" : true, + "doc_count" : 2 + }, { + "key" : false, + "doc_count" : 1 + } ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Terms("users") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 2 { + t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) + } + if agg.Buckets[0].Key != true { + t.Errorf("expected key %v; got: %v", true, agg.Buckets[0].Key) + } + if agg.Buckets[0].DocCount != 2 { + t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[0].DocCount) + } + if agg.Buckets[1].Key != false { + t.Errorf("expected key %v; got: %v", false, agg.Buckets[1].Key) + } + if agg.Buckets[1].DocCount != 1 { + t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[1].DocCount) + } +} + +func TestAggsBucketSignificantTerms(t *testing.T) { + s := `{ + "significantCrimeTypes" : { + "doc_count": 47347, + "buckets" : [ + { + "key": "Bicycle theft", + "doc_count": 3640, + "score": 0.371235374214817, + "bg_count": 66799 + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.SignificantTerms("significantCrimeTypes") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.DocCount != 47347 { + t.Fatalf("expected aggregation DocCount != %d; got: %d", 47347, agg.DocCount) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 1 { + t.Errorf("expected %d bucket entries; got: %d", 1, len(agg.Buckets)) + } + if agg.Buckets[0].Key != "Bicycle theft" { + t.Errorf("expected key = %q; got: %q", "Bicycle theft", agg.Buckets[0].Key) + } + if agg.Buckets[0].DocCount != 3640 { + t.Errorf("expected doc count = %d; got: %d", 3640, agg.Buckets[0].DocCount) + } + if agg.Buckets[0].Score != float64(0.371235374214817) { + t.Errorf("expected score = %v; got: %v", float64(0.371235374214817), agg.Buckets[0].Score) + } + if agg.Buckets[0].BgCount != 66799 { + t.Errorf("expected BgCount = %d; got: %d", 66799, agg.Buckets[0].BgCount) + } +} + +func TestAggsBucketSampler(t *testing.T) { + s := `{ + "sample" : { + "doc_count": 1000, + "keywords": { + "doc_count": 1000, + "buckets" : [ + { + "key": "bend", + "doc_count": 58, + "score": 37.982536582524276, + "bg_count": 103 + } + ] + } + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Sampler("sample") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.DocCount != 1000 { + t.Fatalf("expected aggregation DocCount != %d; got: %d", 1000, agg.DocCount) + } + sub, found := agg.Aggregations["keywords"] + if !found { + t.Fatal("expected sub aggregation %q", "keywords") + } + if sub == nil { + t.Fatalf("expected sub aggregation %q; got: %v", "keywords", sub) + } +} + +func TestAggsBucketRange(t *testing.T) { + s := `{ + "price_ranges" : { + "buckets": [ + { + "to": 50, + "doc_count": 2 + }, + { + "from": 50, + "to": 100, + "doc_count": 4 + }, + { + "from": 100, + "doc_count": 4 + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Range("price_ranges") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 3 { + t.Errorf("expected %d bucket entries; got: %d", 3, len(agg.Buckets)) + } + if agg.Buckets[0].From != nil { + t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From) + } + if agg.Buckets[0].To == nil { + t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To) + } + if *agg.Buckets[0].To != float64(50) { + t.Errorf("expected To = %v; got: %v", float64(50), *agg.Buckets[0].To) + } + if agg.Buckets[0].DocCount != 2 { + t.Errorf("expected DocCount = %d; got: %d", 2, agg.Buckets[0].DocCount) + } + if agg.Buckets[1].From == nil { + t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From) + } + if *agg.Buckets[1].From != float64(50) { + t.Errorf("expected From = %v; got: %v", float64(50), *agg.Buckets[1].From) + } + if agg.Buckets[1].To == nil { + t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[1].To) + } + if *agg.Buckets[1].To != float64(100) { + t.Errorf("expected To = %v; got: %v", float64(100), *agg.Buckets[1].To) + } + if agg.Buckets[1].DocCount != 4 { + t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[1].DocCount) + } + if agg.Buckets[2].From == nil { + t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[2].From) + } + if *agg.Buckets[2].From != float64(100) { + t.Errorf("expected From = %v; got: %v", float64(100), *agg.Buckets[2].From) + } + if agg.Buckets[2].To != nil { + t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[2].To) + } + if agg.Buckets[2].DocCount != 4 { + t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[2].DocCount) + } +} + +func TestAggsBucketDateRange(t *testing.T) { + s := `{ + "range": { + "buckets": [ + { + "to": 1.3437792E+12, + "to_as_string": "08-2012", + "doc_count": 7 + }, + { + "from": 1.3437792E+12, + "from_as_string": "08-2012", + "doc_count": 2 + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.DateRange("range") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 2 { + t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) + } + if agg.Buckets[0].From != nil { + t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From) + } + if agg.Buckets[0].To == nil { + t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To) + } + if *agg.Buckets[0].To != float64(1.3437792E+12) { + t.Errorf("expected To = %v; got: %v", float64(1.3437792E+12), *agg.Buckets[0].To) + } + if agg.Buckets[0].ToAsString != "08-2012" { + t.Errorf("expected ToAsString = %q; got: %q", "08-2012", agg.Buckets[0].ToAsString) + } + if agg.Buckets[0].DocCount != 7 { + t.Errorf("expected DocCount = %d; got: %d", 7, agg.Buckets[0].DocCount) + } + if agg.Buckets[1].From == nil { + t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From) + } + if *agg.Buckets[1].From != float64(1.3437792E+12) { + t.Errorf("expected From = %v; got: %v", float64(1.3437792E+12), *agg.Buckets[1].From) + } + if agg.Buckets[1].FromAsString != "08-2012" { + t.Errorf("expected FromAsString = %q; got: %q", "08-2012", agg.Buckets[1].FromAsString) + } + if agg.Buckets[1].To != nil { + t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[1].To) + } + if agg.Buckets[1].DocCount != 2 { + t.Errorf("expected DocCount = %d; got: %d", 2, agg.Buckets[1].DocCount) + } +} + +func TestAggsBucketIPv4Range(t *testing.T) { + s := `{ + "ip_ranges": { + "buckets" : [ + { + "to": 167772165, + "to_as_string": "10.0.0.5", + "doc_count": 4 + }, + { + "from": 167772165, + "from_as_string": "10.0.0.5", + "doc_count": 6 + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.IPv4Range("ip_ranges") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 2 { + t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) + } + if agg.Buckets[0].From != nil { + t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From) + } + if agg.Buckets[0].To == nil { + t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To) + } + if *agg.Buckets[0].To != float64(167772165) { + t.Errorf("expected To = %v; got: %v", float64(167772165), *agg.Buckets[0].To) + } + if agg.Buckets[0].ToAsString != "10.0.0.5" { + t.Errorf("expected ToAsString = %q; got: %q", "10.0.0.5", agg.Buckets[0].ToAsString) + } + if agg.Buckets[0].DocCount != 4 { + t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[0].DocCount) + } + if agg.Buckets[1].From == nil { + t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From) + } + if *agg.Buckets[1].From != float64(167772165) { + t.Errorf("expected From = %v; got: %v", float64(167772165), *agg.Buckets[1].From) + } + if agg.Buckets[1].FromAsString != "10.0.0.5" { + t.Errorf("expected FromAsString = %q; got: %q", "10.0.0.5", agg.Buckets[1].FromAsString) + } + if agg.Buckets[1].To != nil { + t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[1].To) + } + if agg.Buckets[1].DocCount != 6 { + t.Errorf("expected DocCount = %d; got: %d", 6, agg.Buckets[1].DocCount) + } +} + +func TestAggsBucketHistogram(t *testing.T) { + s := `{ + "prices" : { + "buckets": [ + { + "key": 0, + "doc_count": 2 + }, + { + "key": 50, + "doc_count": 4 + }, + { + "key": 150, + "doc_count": 3 + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Histogram("prices") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 3 { + t.Errorf("expected %d buckets; got: %d", 3, len(agg.Buckets)) + } + if agg.Buckets[0].Key != 0 { + t.Errorf("expected key = %v; got: %v", 0, agg.Buckets[0].Key) + } + if agg.Buckets[0].KeyAsString != nil { + t.Fatalf("expected key_as_string = %v; got: %q", nil, *agg.Buckets[0].KeyAsString) + } + if agg.Buckets[0].DocCount != 2 { + t.Errorf("expected doc count = %d; got: %d", 2, agg.Buckets[0].DocCount) + } + if agg.Buckets[1].Key != 50 { + t.Errorf("expected key = %v; got: %v", 50, agg.Buckets[1].Key) + } + if agg.Buckets[1].KeyAsString != nil { + t.Fatalf("expected key_as_string = %v; got: %q", nil, *agg.Buckets[1].KeyAsString) + } + if agg.Buckets[1].DocCount != 4 { + t.Errorf("expected doc count = %d; got: %d", 4, agg.Buckets[1].DocCount) + } + if agg.Buckets[2].Key != 150 { + t.Errorf("expected key = %v; got: %v", 150, agg.Buckets[2].Key) + } + if agg.Buckets[2].KeyAsString != nil { + t.Fatalf("expected key_as_string = %v; got: %q", nil, *agg.Buckets[2].KeyAsString) + } + if agg.Buckets[2].DocCount != 3 { + t.Errorf("expected doc count = %d; got: %d", 3, agg.Buckets[2].DocCount) + } +} + +func TestAggsBucketDateHistogram(t *testing.T) { + s := `{ + "articles_over_time": { + "buckets": [ + { + "key_as_string": "2013-02-02", + "key": 1328140800000, + "doc_count": 1 + }, + { + "key_as_string": "2013-03-02", + "key": 1330646400000, + "doc_count": 2 + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.DateHistogram("articles_over_time") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 2 { + t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) + } + if agg.Buckets[0].Key != 1328140800000 { + t.Errorf("expected key %v; got: %v", 1328140800000, agg.Buckets[0].Key) + } + if agg.Buckets[0].KeyAsString == nil { + t.Fatalf("expected key_as_string != nil; got: %v", agg.Buckets[0].KeyAsString) + } + if *agg.Buckets[0].KeyAsString != "2013-02-02" { + t.Errorf("expected key_as_string %q; got: %q", "2013-02-02", *agg.Buckets[0].KeyAsString) + } + if agg.Buckets[0].DocCount != 1 { + t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[0].DocCount) + } + if agg.Buckets[1].Key != 1330646400000 { + t.Errorf("expected key %v; got: %v", 1330646400000, agg.Buckets[1].Key) + } + if agg.Buckets[1].KeyAsString == nil { + t.Fatalf("expected key_as_string != nil; got: %v", agg.Buckets[1].KeyAsString) + } + if *agg.Buckets[1].KeyAsString != "2013-03-02" { + t.Errorf("expected key_as_string %q; got: %q", "2013-03-02", *agg.Buckets[1].KeyAsString) + } + if agg.Buckets[1].DocCount != 2 { + t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[1].DocCount) + } +} + +func TestAggsMetricsGeoBounds(t *testing.T) { + s := `{ + "viewport": { + "bounds": { + "top_left": { + "lat": 80.45, + "lon": -160.22 + }, + "bottom_right": { + "lat": 40.65, + "lon": 42.57 + } + } + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.GeoBounds("viewport") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Bounds.TopLeft.Latitude != float64(80.45) { + t.Fatalf("expected Bounds.TopLeft.Latitude != %v; got: %v", float64(80.45), agg.Bounds.TopLeft.Latitude) + } + if agg.Bounds.TopLeft.Longitude != float64(-160.22) { + t.Fatalf("expected Bounds.TopLeft.Longitude != %v; got: %v", float64(-160.22), agg.Bounds.TopLeft.Longitude) + } + if agg.Bounds.BottomRight.Latitude != float64(40.65) { + t.Fatalf("expected Bounds.BottomRight.Latitude != %v; got: %v", float64(40.65), agg.Bounds.BottomRight.Latitude) + } + if agg.Bounds.BottomRight.Longitude != float64(42.57) { + t.Fatalf("expected Bounds.BottomRight.Longitude != %v; got: %v", float64(42.57), agg.Bounds.BottomRight.Longitude) + } +} + +func TestAggsBucketGeoHash(t *testing.T) { + s := `{ + "myLarge-GrainGeoHashGrid": { + "buckets": [ + { + "key": "svz", + "doc_count": 10964 + }, + { + "key": "sv8", + "doc_count": 3198 + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.GeoHash("myLarge-GrainGeoHashGrid") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 2 { + t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) + } + if agg.Buckets[0].Key != "svz" { + t.Errorf("expected key %q; got: %q", "svz", agg.Buckets[0].Key) + } + if agg.Buckets[0].DocCount != 10964 { + t.Errorf("expected doc count %d; got: %d", 10964, agg.Buckets[0].DocCount) + } + if agg.Buckets[1].Key != "sv8" { + t.Errorf("expected key %q; got: %q", "sv8", agg.Buckets[1].Key) + } + if agg.Buckets[1].DocCount != 3198 { + t.Errorf("expected doc count %d; got: %d", 3198, agg.Buckets[1].DocCount) + } +} + +func TestAggsBucketGeoDistance(t *testing.T) { + s := `{ + "rings" : { + "buckets": [ + { + "unit": "km", + "to": 100.0, + "doc_count": 3 + }, + { + "unit": "km", + "from": 100.0, + "to": 300.0, + "doc_count": 1 + }, + { + "unit": "km", + "from": 300.0, + "doc_count": 7 + } + ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.GeoDistance("rings") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Buckets == nil { + t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) + } + if len(agg.Buckets) != 3 { + t.Errorf("expected %d bucket entries; got: %d", 3, len(agg.Buckets)) + } + if agg.Buckets[0].From != nil { + t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From) + } + if agg.Buckets[0].To == nil { + t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To) + } + if *agg.Buckets[0].To != float64(100.0) { + t.Errorf("expected To = %v; got: %v", float64(100.0), *agg.Buckets[0].To) + } + if agg.Buckets[0].DocCount != 3 { + t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[0].DocCount) + } + + if agg.Buckets[1].From == nil { + t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From) + } + if *agg.Buckets[1].From != float64(100.0) { + t.Errorf("expected From = %v; got: %v", float64(100.0), *agg.Buckets[1].From) + } + if agg.Buckets[1].To == nil { + t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[1].To) + } + if *agg.Buckets[1].To != float64(300.0) { + t.Errorf("expected From = %v; got: %v", float64(300.0), *agg.Buckets[1].To) + } + if agg.Buckets[1].DocCount != 1 { + t.Errorf("expected DocCount = %d; got: %d", 1, agg.Buckets[1].DocCount) + } + + if agg.Buckets[2].From == nil { + t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[2].From) + } + if *agg.Buckets[2].From != float64(300.0) { + t.Errorf("expected From = %v; got: %v", float64(300.0), *agg.Buckets[2].From) + } + if agg.Buckets[2].To != nil { + t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[2].To) + } + if agg.Buckets[2].DocCount != 7 { + t.Errorf("expected DocCount = %d; got: %d", 7, agg.Buckets[2].DocCount) + } +} + +func TestAggsSubAggregates(t *testing.T) { + rs := `{ + "users" : { + "doc_count_error_upper_bound" : 1, + "sum_other_doc_count" : 2, + "buckets" : [ { + "key" : "olivere", + "doc_count" : 2, + "ts" : { + "buckets" : [ { + "key_as_string" : "2012-01-01T00:00:00.000Z", + "key" : 1325376000000, + "doc_count" : 2 + } ] + } + }, { + "key" : "sandrae", + "doc_count" : 1, + "ts" : { + "buckets" : [ { + "key_as_string" : "2011-01-01T00:00:00.000Z", + "key" : 1293840000000, + "doc_count" : 1 + } ] + } + } ] + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(rs), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + // Access top-level aggregation + users, found := aggs.Terms("users") + if !found { + t.Fatalf("expected users aggregation to be found; got: %v", found) + } + if users == nil { + t.Fatalf("expected users aggregation; got: %v", users) + } + if users.Buckets == nil { + t.Fatalf("expected users buckets; got: %v", users.Buckets) + } + if len(users.Buckets) != 2 { + t.Errorf("expected %d bucket entries; got: %d", 2, len(users.Buckets)) + } + if users.Buckets[0].Key != "olivere" { + t.Errorf("expected key %q; got: %q", "olivere", users.Buckets[0].Key) + } + if users.Buckets[0].DocCount != 2 { + t.Errorf("expected doc count %d; got: %d", 2, users.Buckets[0].DocCount) + } + if users.Buckets[1].Key != "sandrae" { + t.Errorf("expected key %q; got: %q", "sandrae", users.Buckets[1].Key) + } + if users.Buckets[1].DocCount != 1 { + t.Errorf("expected doc count %d; got: %d", 1, users.Buckets[1].DocCount) + } + + // Access sub-aggregation + ts, found := users.Buckets[0].DateHistogram("ts") + if !found { + t.Fatalf("expected ts aggregation to be found; got: %v", found) + } + if ts == nil { + t.Fatalf("expected ts aggregation; got: %v", ts) + } + if ts.Buckets == nil { + t.Fatalf("expected ts buckets; got: %v", ts.Buckets) + } + if len(ts.Buckets) != 1 { + t.Errorf("expected %d bucket entries; got: %d", 1, len(ts.Buckets)) + } + if ts.Buckets[0].Key != 1325376000000 { + t.Errorf("expected key %v; got: %v", 1325376000000, ts.Buckets[0].Key) + } + if ts.Buckets[0].KeyAsString == nil { + t.Fatalf("expected key_as_string != %v; got: %v", nil, ts.Buckets[0].KeyAsString) + } + if *ts.Buckets[0].KeyAsString != "2012-01-01T00:00:00.000Z" { + t.Errorf("expected key_as_string %q; got: %q", "2012-01-01T00:00:00.000Z", *ts.Buckets[0].KeyAsString) + } +} + +func TestAggsPipelineAvgBucket(t *testing.T) { + s := `{ + "avg_monthly_sales" : { + "value" : 328.33333333333333 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.AvgBucket("avg_monthly_sales") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(328.33333333333333) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(328.33333333333333), *agg.Value) + } +} + +func TestAggsPipelineSumBucket(t *testing.T) { + s := `{ + "sum_monthly_sales" : { + "value" : 985 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.SumBucket("sum_monthly_sales") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(985) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(985), *agg.Value) + } +} + +func TestAggsPipelineMaxBucket(t *testing.T) { + s := `{ + "max_monthly_sales" : { + "keys": ["2015/01/01 00:00:00"], + "value" : 550 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.MaxBucket("max_monthly_sales") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if len(agg.Keys) != 1 { + t.Fatalf("expected 1 key; got: %d", len(agg.Keys)) + } + if got, want := agg.Keys[0], "2015/01/01 00:00:00"; got != want { + t.Fatalf("expected key %q; got: %v (%T)", want, got, got) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(550) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(550), *agg.Value) + } +} + +func TestAggsPipelineMinBucket(t *testing.T) { + s := `{ + "min_monthly_sales" : { + "keys": ["2015/02/01 00:00:00"], + "value" : 60 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.MinBucket("min_monthly_sales") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if len(agg.Keys) != 1 { + t.Fatalf("expected 1 key; got: %d", len(agg.Keys)) + } + if got, want := agg.Keys[0], "2015/02/01 00:00:00"; got != want { + t.Fatalf("expected key %q; got: %v (%T)", want, got, got) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(60) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(60), *agg.Value) + } +} + +func TestAggsPipelineMovAvg(t *testing.T) { + s := `{ + "the_movavg" : { + "value" : 12.0 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.MovAvg("the_movavg") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(12.0) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(12.0), *agg.Value) + } +} + +func TestAggsPipelineDerivative(t *testing.T) { + s := `{ + "sales_deriv" : { + "value" : 315 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.Derivative("sales_deriv") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(315) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(315), *agg.Value) + } +} + +func TestAggsPipelineCumulativeSum(t *testing.T) { + s := `{ + "cumulative_sales" : { + "value" : 550 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.CumulativeSum("cumulative_sales") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(550) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(550), *agg.Value) + } +} + +func TestAggsPipelineBucketScript(t *testing.T) { + s := `{ + "t-shirt-percentage" : { + "value" : 20 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.BucketScript("t-shirt-percentage") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(20) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(20), *agg.Value) + } +} + +func TestAggsPipelineSerialDiff(t *testing.T) { + s := `{ + "the_diff" : { + "value" : -722.0 + } +}` + + aggs := new(Aggregations) + err := json.Unmarshal([]byte(s), &aggs) + if err != nil { + t.Fatalf("expected no error decoding; got: %v", err) + } + + agg, found := aggs.SerialDiff("the_diff") + if !found { + t.Fatalf("expected aggregation to be found; got: %v", found) + } + if agg == nil { + t.Fatalf("expected aggregation != nil; got: %v", agg) + } + if agg.Value == nil { + t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) + } + if *agg.Value != float64(-722.0) { + t.Fatalf("expected aggregation value = %v; got: %v", float64(20), *agg.Value) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_bool.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_bool.go new file mode 100644 index 000000000..c2cc8697b --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_bool.go @@ -0,0 +1,212 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "fmt" + +// A bool query matches documents matching boolean +// combinations of other queries. +// For more details, see: +// http://www.elasticsearch.org/guide/reference/query-dsl/bool-query.html +type BoolQuery struct { + Query + mustClauses []Query + mustNotClauses []Query + filterClauses []Query + shouldClauses []Query + boost *float64 + disableCoord *bool + minimumShouldMatch string + adjustPureNegative *bool + queryName string +} + +// Creates a new bool query. +func NewBoolQuery() *BoolQuery { + return &BoolQuery{ + mustClauses: make([]Query, 0), + mustNotClauses: make([]Query, 0), + filterClauses: make([]Query, 0), + shouldClauses: make([]Query, 0), + } +} + +func (q *BoolQuery) Must(queries ...Query) *BoolQuery { + q.mustClauses = append(q.mustClauses, queries...) + return q +} + +func (q *BoolQuery) MustNot(queries ...Query) *BoolQuery { + q.mustNotClauses = append(q.mustNotClauses, queries...) + return q +} + +func (q *BoolQuery) Filter(filters ...Query) *BoolQuery { + q.filterClauses = append(q.filterClauses, filters...) + return q +} + +func (q *BoolQuery) Should(queries ...Query) *BoolQuery { + q.shouldClauses = append(q.shouldClauses, queries...) + return q +} + +func (q *BoolQuery) Boost(boost float64) *BoolQuery { + q.boost = &boost + return q +} + +func (q *BoolQuery) DisableCoord(disableCoord bool) *BoolQuery { + q.disableCoord = &disableCoord + return q +} + +func (q *BoolQuery) MinimumShouldMatch(minimumShouldMatch string) *BoolQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +func (q *BoolQuery) MinimumNumberShouldMatch(minimumNumberShouldMatch int) *BoolQuery { + q.minimumShouldMatch = fmt.Sprintf("%d", minimumNumberShouldMatch) + return q +} + +func (q *BoolQuery) AdjustPureNegative(adjustPureNegative bool) *BoolQuery { + q.adjustPureNegative = &adjustPureNegative + return q +} + +func (q *BoolQuery) QueryName(queryName string) *BoolQuery { + q.queryName = queryName + return q +} + +// Creates the query source for the bool query. +func (q *BoolQuery) Source() (interface{}, error) { + // { + // "bool" : { + // "must" : { + // "term" : { "user" : "kimchy" } + // }, + // "must_not" : { + // "range" : { + // "age" : { "from" : 10, "to" : 20 } + // } + // }, + // "filter" : [ + // ... + // ] + // "should" : [ + // { + // "term" : { "tag" : "wow" } + // }, + // { + // "term" : { "tag" : "elasticsearch" } + // } + // ], + // "minimum_number_should_match" : 1, + // "boost" : 1.0 + // } + // } + + query := make(map[string]interface{}) + + boolClause := make(map[string]interface{}) + query["bool"] = boolClause + + // must + if len(q.mustClauses) == 1 { + src, err := q.mustClauses[0].Source() + if err != nil { + return nil, err + } + boolClause["must"] = src + } else if len(q.mustClauses) > 1 { + clauses := make([]interface{}, 0) + for _, subQuery := range q.mustClauses { + src, err := subQuery.Source() + if err != nil { + return nil, err + } + clauses = append(clauses, src) + } + boolClause["must"] = clauses + } + + // must_not + if len(q.mustNotClauses) == 1 { + src, err := q.mustNotClauses[0].Source() + if err != nil { + return nil, err + } + boolClause["must_not"] = src + } else if len(q.mustNotClauses) > 1 { + clauses := make([]interface{}, 0) + for _, subQuery := range q.mustNotClauses { + src, err := subQuery.Source() + if err != nil { + return nil, err + } + clauses = append(clauses, src) + } + boolClause["must_not"] = clauses + } + + // filter + if len(q.filterClauses) == 1 { + src, err := q.filterClauses[0].Source() + if err != nil { + return nil, err + } + boolClause["filter"] = src + } else if len(q.filterClauses) > 1 { + clauses := make([]interface{}, 0) + for _, subQuery := range q.filterClauses { + src, err := subQuery.Source() + if err != nil { + return nil, err + } + clauses = append(clauses, src) + } + boolClause["filter"] = clauses + } + + // should + if len(q.shouldClauses) == 1 { + src, err := q.shouldClauses[0].Source() + if err != nil { + return nil, err + } + boolClause["should"] = src + } else if len(q.shouldClauses) > 1 { + clauses := make([]interface{}, 0) + for _, subQuery := range q.shouldClauses { + src, err := subQuery.Source() + if err != nil { + return nil, err + } + clauses = append(clauses, src) + } + boolClause["should"] = clauses + } + + if q.boost != nil { + boolClause["boost"] = *q.boost + } + if q.disableCoord != nil { + boolClause["disable_coord"] = *q.disableCoord + } + if q.minimumShouldMatch != "" { + boolClause["minimum_should_match"] = q.minimumShouldMatch + } + if q.adjustPureNegative != nil { + boolClause["adjust_pure_negative"] = *q.adjustPureNegative + } + if q.queryName != "" { + boolClause["_name"] = q.queryName + } + + return query, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_bool_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_bool_test.go new file mode 100644 index 000000000..327d3f635 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_bool_test.go @@ -0,0 +1,34 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestBoolQuery(t *testing.T) { + q := NewBoolQuery() + q = q.Must(NewTermQuery("tag", "wow")) + q = q.MustNot(NewRangeQuery("age").From(10).To(20)) + q = q.Filter(NewTermQuery("account", "1")) + q = q.Should(NewTermQuery("tag", "sometag"), NewTermQuery("tag", "sometagtag")) + q = q.Boost(10) + q = q.DisableCoord(true) + q = q.QueryName("Test") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"bool":{"_name":"Test","boost":10,"disable_coord":true,"filter":{"term":{"account":"1"}},"must":{"term":{"tag":"wow"}},"must_not":{"range":{"age":{"from":10,"include_lower":true,"include_upper":true,"to":20}}},"should":[{"term":{"tag":"sometag"}},{"term":{"tag":"sometagtag"}}]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_boosting.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_boosting.go new file mode 100644 index 000000000..7f7a53b8b --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_boosting.go @@ -0,0 +1,97 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// A boosting query can be used to effectively +// demote results that match a given query. +// For more details, see: +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-boosting-query.html +type BoostingQuery struct { + Query + positiveClause Query + negativeClause Query + negativeBoost *float64 + boost *float64 +} + +// Creates a new boosting query. +func NewBoostingQuery() *BoostingQuery { + return &BoostingQuery{} +} + +func (q *BoostingQuery) Positive(positive Query) *BoostingQuery { + q.positiveClause = positive + return q +} + +func (q *BoostingQuery) Negative(negative Query) *BoostingQuery { + q.negativeClause = negative + return q +} + +func (q *BoostingQuery) NegativeBoost(negativeBoost float64) *BoostingQuery { + q.negativeBoost = &negativeBoost + return q +} + +func (q *BoostingQuery) Boost(boost float64) *BoostingQuery { + q.boost = &boost + return q +} + +// Creates the query source for the boosting query. +func (q *BoostingQuery) Source() (interface{}, error) { + // { + // "boosting" : { + // "positive" : { + // "term" : { + // "field1" : "value1" + // } + // }, + // "negative" : { + // "term" : { + // "field2" : "value2" + // } + // }, + // "negative_boost" : 0.2 + // } + // } + + query := make(map[string]interface{}) + + boostingClause := make(map[string]interface{}) + query["boosting"] = boostingClause + + // Negative and positive clause as well as negative boost + // are mandatory in the Java client. + + // positive + if q.positiveClause != nil { + src, err := q.positiveClause.Source() + if err != nil { + return nil, err + } + boostingClause["positive"] = src + } + + // negative + if q.negativeClause != nil { + src, err := q.negativeClause.Source() + if err != nil { + return nil, err + } + boostingClause["negative"] = src + } + + if q.negativeBoost != nil { + boostingClause["negative_boost"] = *q.negativeBoost + } + + if q.boost != nil { + boostingClause["boost"] = *q.boost + } + + return query, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_boosting_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_boosting_test.go new file mode 100644 index 000000000..0ef03dfef --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_boosting_test.go @@ -0,0 +1,30 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestBoostingQuery(t *testing.T) { + q := NewBoostingQuery() + q = q.Positive(NewTermQuery("tag", "wow")) + q = q.Negative(NewRangeQuery("age").From(10).To(20)) + q = q.NegativeBoost(0.2) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"boosting":{"negative":{"range":{"age":{"from":10,"include_lower":true,"include_upper":true,"to":20}}},"negative_boost":0.2,"positive":{"term":{"tag":"wow"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_common_terms.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_common_terms.go new file mode 100644 index 000000000..d45825067 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_common_terms.go @@ -0,0 +1,146 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// CommonTermsQuery is a modern alternative to stopwords +// which improves the precision and recall of search results +// (by taking stopwords into account), without sacrificing performance. +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-common-terms-query.html +type CommonTermsQuery struct { + Query + name string + text interface{} + cutoffFreq *float64 + highFreq *float64 + highFreqOp string + highFreqMinimumShouldMatch string + lowFreq *float64 + lowFreqOp string + lowFreqMinimumShouldMatch string + analyzer string + boost *float64 + disableCoord *bool + queryName string +} + +// NewCommonTermsQuery creates and initializes a new common terms query. +func NewCommonTermsQuery(name string, text interface{}) *CommonTermsQuery { + return &CommonTermsQuery{name: name, text: text} +} + +func (q *CommonTermsQuery) CutoffFrequency(f float64) *CommonTermsQuery { + q.cutoffFreq = &f + return q +} + +func (q *CommonTermsQuery) HighFreq(f float64) *CommonTermsQuery { + q.highFreq = &f + return q +} + +func (q *CommonTermsQuery) HighFreqOperator(op string) *CommonTermsQuery { + q.highFreqOp = op + return q +} + +func (q *CommonTermsQuery) HighFreqMinimumShouldMatch(minShouldMatch string) *CommonTermsQuery { + q.highFreqMinimumShouldMatch = minShouldMatch + return q +} + +func (q *CommonTermsQuery) LowFreq(f float64) *CommonTermsQuery { + q.lowFreq = &f + return q +} + +func (q *CommonTermsQuery) LowFreqOperator(op string) *CommonTermsQuery { + q.lowFreqOp = op + return q +} + +func (q *CommonTermsQuery) LowFreqMinimumShouldMatch(minShouldMatch string) *CommonTermsQuery { + q.lowFreqMinimumShouldMatch = minShouldMatch + return q +} + +func (q *CommonTermsQuery) Analyzer(analyzer string) *CommonTermsQuery { + q.analyzer = analyzer + return q +} + +func (q *CommonTermsQuery) Boost(boost float64) *CommonTermsQuery { + q.boost = &boost + return q +} + +func (q *CommonTermsQuery) DisableCoord(disableCoord bool) *CommonTermsQuery { + q.disableCoord = &disableCoord + return q +} + +func (q *CommonTermsQuery) QueryName(queryName string) *CommonTermsQuery { + q.queryName = queryName + return q +} + +// Creates the query source for the common query. +func (q *CommonTermsQuery) Source() (interface{}, error) { + // { + // "common": { + // "body": { + // "query": "this is bonsai cool", + // "cutoff_frequency": 0.001 + // } + // } + // } + source := make(map[string]interface{}) + body := make(map[string]interface{}) + query := make(map[string]interface{}) + + source["common"] = body + body[q.name] = query + query["query"] = q.text + + if q.cutoffFreq != nil { + query["cutoff_frequency"] = *q.cutoffFreq + } + if q.highFreq != nil { + query["high_freq"] = *q.highFreq + } + if q.highFreqOp != "" { + query["high_freq_operator"] = q.highFreqOp + } + if q.lowFreq != nil { + query["low_freq"] = *q.lowFreq + } + if q.lowFreqOp != "" { + query["low_freq_operator"] = q.lowFreqOp + } + if q.lowFreqMinimumShouldMatch != "" || q.highFreqMinimumShouldMatch != "" { + mm := make(map[string]interface{}) + if q.lowFreqMinimumShouldMatch != "" { + mm["low_freq"] = q.lowFreqMinimumShouldMatch + } + if q.highFreqMinimumShouldMatch != "" { + mm["high_freq"] = q.highFreqMinimumShouldMatch + } + query["minimum_should_match"] = mm + } + if q.analyzer != "" { + query["analyzer"] = q.analyzer + } + if q.disableCoord != nil { + query["disable_coord"] = *q.disableCoord + } + if q.boost != nil { + query["boost"] = *q.boost + } + if q.queryName != "" { + query["_name"] = q.queryName + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_common_terms_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_common_terms_test.go new file mode 100644 index 000000000..02c1c2b60 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_common_terms_test.go @@ -0,0 +1,84 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + _ "net/http" + "testing" +) + +func TestCommonTermsQuery(t *testing.T) { + q := NewCommonTermsQuery("message", "Golang").CutoffFrequency(0.001) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"common":{"message":{"cutoff_frequency":0.001,"query":"Golang"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchQueriesCommonTermsQuery(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Common terms query + q := NewCommonTermsQuery("message", "Golang") + searchResult, err := client.Search().Index(testIndexName).Query(q).Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 1 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 1 { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 1, len(searchResult.Hits.Hits)) + } + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_constant_score.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_constant_score.go new file mode 100644 index 000000000..c754d279d --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_constant_score.go @@ -0,0 +1,59 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ConstantScoreQuery is a query that wraps a filter and simply returns +// a constant score equal to the query boost for every document in the filter. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-constant-score-query.html +type ConstantScoreQuery struct { + filter Query + boost *float64 +} + +// ConstantScoreQuery creates and initializes a new constant score query. +func NewConstantScoreQuery(filter Query) *ConstantScoreQuery { + return &ConstantScoreQuery{ + filter: filter, + } +} + +// Boost sets the boost for this query. Documents matching this query +// will (in addition to the normal weightings) have their score multiplied +// by the boost provided. +func (q *ConstantScoreQuery) Boost(boost float64) *ConstantScoreQuery { + q.boost = &boost + return q +} + +// Source returns the query source. +func (q *ConstantScoreQuery) Source() (interface{}, error) { + // "constant_score" : { + // "filter" : { + // .... + // }, + // "boost" : 1.5 + // } + + query := make(map[string]interface{}) + + params := make(map[string]interface{}) + query["constant_score"] = params + + // filter + src, err := q.filter.Source() + if err != nil { + return nil, err + } + params["filter"] = src + + // boost + if q.boost != nil { + params["boost"] = *q.boost + } + + return query, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_constant_score_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_constant_score_test.go new file mode 100644 index 000000000..bdcce659c --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_constant_score_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestConstantScoreQuery(t *testing.T) { + q := NewConstantScoreQuery(NewTermQuery("user", "kimchy")).Boost(1.2) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"constant_score":{"boost":1.2,"filter":{"term":{"user":"kimchy"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_dis_max.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_dis_max.go new file mode 100644 index 000000000..c47d6bb12 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_dis_max.go @@ -0,0 +1,104 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// DisMaxQuery is a query that generates the union of documents produced by +// its subqueries, and that scores each document with the maximum score +// for that document as produced by any subquery, plus a tie breaking +// increment for any additional matching subqueries. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-dis-max-query.html +type DisMaxQuery struct { + queries []Query + boost *float64 + tieBreaker *float64 + queryName string +} + +// NewDisMaxQuery creates and initializes a new dis max query. +func NewDisMaxQuery() *DisMaxQuery { + return &DisMaxQuery{ + queries: make([]Query, 0), + } +} + +// Query adds one or more queries to the dis max query. +func (q *DisMaxQuery) Query(queries ...Query) *DisMaxQuery { + q.queries = append(q.queries, queries...) + return q +} + +// Boost sets the boost for this query. Documents matching this query will +// (in addition to the normal weightings) have their score multiplied by +// the boost provided. +func (q *DisMaxQuery) Boost(boost float64) *DisMaxQuery { + q.boost = &boost + return q +} + +// TieBreaker is the factor by which the score of each non-maximum disjunct +// for a document is multiplied with and added into the final score. +// +// If non-zero, the value should be small, on the order of 0.1, which says +// that 10 occurrences of word in a lower-scored field that is also in a +// higher scored field is just as good as a unique word in the lower scored +// field (i.e., one that is not in any higher scored field). +func (q *DisMaxQuery) TieBreaker(tieBreaker float64) *DisMaxQuery { + q.tieBreaker = &tieBreaker + return q +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched filters per hit. +func (q *DisMaxQuery) QueryName(queryName string) *DisMaxQuery { + q.queryName = queryName + return q +} + +// Source returns the JSON serializable content for this query. +func (q *DisMaxQuery) Source() (interface{}, error) { + // { + // "dis_max" : { + // "tie_breaker" : 0.7, + // "boost" : 1.2, + // "queries" : { + // { + // "term" : { "age" : 34 } + // }, + // { + // "term" : { "age" : 35 } + // } + // ] + // } + // } + + query := make(map[string]interface{}) + params := make(map[string]interface{}) + query["dis_max"] = params + + if q.tieBreaker != nil { + params["tie_breaker"] = *q.tieBreaker + } + if q.boost != nil { + params["boost"] = *q.boost + } + if q.queryName != "" { + params["_name"] = q.queryName + } + + // queries + clauses := make([]interface{}, 0) + for _, subQuery := range q.queries { + src, err := subQuery.Source() + if err != nil { + return nil, err + } + clauses = append(clauses, src) + } + params["queries"] = clauses + + return query, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_dis_max_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_dis_max_test.go new file mode 100644 index 000000000..8b005a61e --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_dis_max_test.go @@ -0,0 +1,28 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestDisMaxQuery(t *testing.T) { + q := NewDisMaxQuery() + q = q.Query(NewTermQuery("age", 34), NewTermQuery("age", 35)).Boost(1.2).TieBreaker(0.7) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"dis_max":{"boost":1.2,"queries":[{"term":{"age":34}},{"term":{"age":35}}],"tie_breaker":0.7}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_exists.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_exists.go new file mode 100644 index 000000000..e117673bd --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_exists.go @@ -0,0 +1,49 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ExistsQuery is a query that only matches on documents that the field +// has a value in them. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-exists-query.html +type ExistsQuery struct { + name string + queryName string +} + +// NewExistsQuery creates and initializes a new dis max query. +func NewExistsQuery(name string) *ExistsQuery { + return &ExistsQuery{ + name: name, + } +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched queries per hit. +func (q *ExistsQuery) QueryName(queryName string) *ExistsQuery { + q.queryName = queryName + return q +} + +// Source returns the JSON serializable content for this query. +func (q *ExistsQuery) Source() (interface{}, error) { + // { + // "exists" : { + // "field" : "user" + // } + // } + + query := make(map[string]interface{}) + params := make(map[string]interface{}) + query["exists"] = params + + params["field"] = q.name + if q.queryName != "" { + params["_name"] = q.queryName + } + + return query, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_exists_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_exists_test.go new file mode 100644 index 000000000..a1112085c --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_exists_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestExistsQuery(t *testing.T) { + q := NewExistsQuery("user") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"exists":{"field":"user"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_fsq.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_fsq.go new file mode 100644 index 000000000..b7fa15e67 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_fsq.go @@ -0,0 +1,172 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// FunctionScoreQuery allows you to modify the score of documents that +// are retrieved by a query. This can be useful if, for example, +// a score function is computationally expensive and it is sufficient +// to compute the score on a filtered set of documents. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html +type FunctionScoreQuery struct { + query Query + filter Query + boost *float64 + maxBoost *float64 + scoreMode string + boostMode string + filters []Query + scoreFuncs []ScoreFunction + minScore *float64 + weight *float64 +} + +// NewFunctionScoreQuery creates and initializes a new function score query. +func NewFunctionScoreQuery() *FunctionScoreQuery { + return &FunctionScoreQuery{ + filters: make([]Query, 0), + scoreFuncs: make([]ScoreFunction, 0), + } +} + +// Query sets the query for the function score query. +func (q *FunctionScoreQuery) Query(query Query) *FunctionScoreQuery { + q.query = query + q.filter = nil + return q +} + +// Filter sets the filter for the function score query. +func (q *FunctionScoreQuery) Filter(filter Query) *FunctionScoreQuery { + q.query = nil + q.filter = filter + return q +} + +// Add adds a score function that will execute on all the documents +// matching the filter. +func (q *FunctionScoreQuery) Add(filter Query, scoreFunc ScoreFunction) *FunctionScoreQuery { + q.filters = append(q.filters, filter) + q.scoreFuncs = append(q.scoreFuncs, scoreFunc) + return q +} + +// AddScoreFunc adds a score function that will execute the function on all documents. +func (q *FunctionScoreQuery) AddScoreFunc(scoreFunc ScoreFunction) *FunctionScoreQuery { + q.filters = append(q.filters, nil) + q.scoreFuncs = append(q.scoreFuncs, scoreFunc) + return q +} + +// ScoreMode defines how results of individual score functions will be aggregated. +// Can be first, avg, max, sum, min, or multiply. +func (q *FunctionScoreQuery) ScoreMode(scoreMode string) *FunctionScoreQuery { + q.scoreMode = scoreMode + return q +} + +// BoostMode defines how the combined result of score functions will +// influence the final score together with the sub query score. +func (q *FunctionScoreQuery) BoostMode(boostMode string) *FunctionScoreQuery { + q.boostMode = boostMode + return q +} + +// MaxBoost is the maximum boost that will be applied by function score. +func (q *FunctionScoreQuery) MaxBoost(maxBoost float64) *FunctionScoreQuery { + q.maxBoost = &maxBoost + return q +} + +// Boost sets the boost for this query. Documents matching this query will +// (in addition to the normal weightings) have their score multiplied by the +// boost provided. +func (q *FunctionScoreQuery) Boost(boost float64) *FunctionScoreQuery { + q.boost = &boost + return q +} + +// MinScore sets the minimum score. +func (q *FunctionScoreQuery) MinScore(minScore float64) *FunctionScoreQuery { + q.minScore = &minScore + return q +} + +// Source returns JSON for the function score query. +func (q *FunctionScoreQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["function_score"] = query + + if q.query != nil { + src, err := q.query.Source() + if err != nil { + return nil, err + } + query["query"] = src + } else if q.filter != nil { + src, err := q.filter.Source() + if err != nil { + return nil, err + } + query["filter"] = src + } + + if len(q.filters) == 1 && q.filters[0] == nil { + // Weight needs to be serialized on this level. + if weight := q.scoreFuncs[0].GetWeight(); weight != nil { + query["weight"] = weight + } + // Serialize the score function + src, err := q.scoreFuncs[0].Source() + if err != nil { + return nil, err + } + query[q.scoreFuncs[0].Name()] = src + } else { + funcs := make([]interface{}, len(q.filters)) + for i, filter := range q.filters { + hsh := make(map[string]interface{}) + if filter != nil { + src, err := filter.Source() + if err != nil { + return nil, err + } + hsh["filter"] = src + } + // Weight needs to be serialized on this level. + if weight := q.scoreFuncs[i].GetWeight(); weight != nil { + hsh["weight"] = weight + } + // Serialize the score function + src, err := q.scoreFuncs[i].Source() + if err != nil { + return nil, err + } + hsh[q.scoreFuncs[i].Name()] = src + funcs[i] = hsh + } + query["functions"] = funcs + } + + if q.scoreMode != "" { + query["score_mode"] = q.scoreMode + } + if q.boostMode != "" { + query["boost_mode"] = q.boostMode + } + if q.maxBoost != nil { + query["max_boost"] = *q.maxBoost + } + if q.boost != nil { + query["boost"] = *q.boost + } + if q.minScore != nil { + query["min_score"] = *q.minScore + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_fsq_score_funcs.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_fsq_score_funcs.go new file mode 100644 index 000000000..fbce3577d --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_fsq_score_funcs.go @@ -0,0 +1,567 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "strings" +) + +// ScoreFunction is used in combination with the Function Score Query. +type ScoreFunction interface { + Name() string + GetWeight() *float64 // returns the weight which must be serialized at the level of FunctionScoreQuery + Source() (interface{}, error) +} + +// -- Exponential Decay -- + +// ExponentialDecayFunction builds an exponential decay score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html +// for details. +type ExponentialDecayFunction struct { + fieldName string + origin interface{} + scale interface{} + decay *float64 + offset interface{} + multiValueMode string + weight *float64 +} + +// NewExponentialDecayFunction creates a new ExponentialDecayFunction. +func NewExponentialDecayFunction() *ExponentialDecayFunction { + return &ExponentialDecayFunction{} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *ExponentialDecayFunction) Name() string { + return "exp" +} + +// FieldName specifies the name of the field to which this decay function is applied to. +func (fn *ExponentialDecayFunction) FieldName(fieldName string) *ExponentialDecayFunction { + fn.fieldName = fieldName + return fn +} + +// Origin defines the "central point" by which the decay function calculates +// "distance". +func (fn *ExponentialDecayFunction) Origin(origin interface{}) *ExponentialDecayFunction { + fn.origin = origin + return fn +} + +// Scale defines the scale to be used with Decay. +func (fn *ExponentialDecayFunction) Scale(scale interface{}) *ExponentialDecayFunction { + fn.scale = scale + return fn +} + +// Decay defines how documents are scored at the distance given a Scale. +// If no decay is defined, documents at the distance Scale will be scored 0.5. +func (fn *ExponentialDecayFunction) Decay(decay float64) *ExponentialDecayFunction { + fn.decay = &decay + return fn +} + +// Offset, if defined, computes the decay function only for a distance +// greater than the defined offset. +func (fn *ExponentialDecayFunction) Offset(offset interface{}) *ExponentialDecayFunction { + fn.offset = offset + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *ExponentialDecayFunction) Weight(weight float64) *ExponentialDecayFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *ExponentialDecayFunction) GetWeight() *float64 { + return fn.weight +} + +// MultiValueMode specifies how the decay function should be calculated +// on a field that has multiple values. +// Valid modes are: min, max, avg, and sum. +func (fn *ExponentialDecayFunction) MultiValueMode(mode string) *ExponentialDecayFunction { + fn.multiValueMode = mode + return fn +} + +// Source returns the serializable JSON data of this score function. +func (fn *ExponentialDecayFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source[fn.fieldName] = params + if fn.origin != nil { + params["origin"] = fn.origin + } + params["scale"] = fn.scale + if fn.decay != nil && *fn.decay > 0 { + params["decay"] = *fn.decay + } + if fn.offset != nil { + params["offset"] = fn.offset + } + if fn.multiValueMode != "" { + source["multi_value_mode"] = fn.multiValueMode + } + return source, nil +} + +// -- Gauss Decay -- + +// GaussDecayFunction builds a gauss decay score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html +// for details. +type GaussDecayFunction struct { + fieldName string + origin interface{} + scale interface{} + decay *float64 + offset interface{} + multiValueMode string + weight *float64 +} + +// NewGaussDecayFunction returns a new GaussDecayFunction. +func NewGaussDecayFunction() *GaussDecayFunction { + return &GaussDecayFunction{} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *GaussDecayFunction) Name() string { + return "gauss" +} + +// FieldName specifies the name of the field to which this decay function is applied to. +func (fn *GaussDecayFunction) FieldName(fieldName string) *GaussDecayFunction { + fn.fieldName = fieldName + return fn +} + +// Origin defines the "central point" by which the decay function calculates +// "distance". +func (fn *GaussDecayFunction) Origin(origin interface{}) *GaussDecayFunction { + fn.origin = origin + return fn +} + +// Scale defines the scale to be used with Decay. +func (fn *GaussDecayFunction) Scale(scale interface{}) *GaussDecayFunction { + fn.scale = scale + return fn +} + +// Decay defines how documents are scored at the distance given a Scale. +// If no decay is defined, documents at the distance Scale will be scored 0.5. +func (fn *GaussDecayFunction) Decay(decay float64) *GaussDecayFunction { + fn.decay = &decay + return fn +} + +// Offset, if defined, computes the decay function only for a distance +// greater than the defined offset. +func (fn *GaussDecayFunction) Offset(offset interface{}) *GaussDecayFunction { + fn.offset = offset + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *GaussDecayFunction) Weight(weight float64) *GaussDecayFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *GaussDecayFunction) GetWeight() *float64 { + return fn.weight +} + +// MultiValueMode specifies how the decay function should be calculated +// on a field that has multiple values. +// Valid modes are: min, max, avg, and sum. +func (fn *GaussDecayFunction) MultiValueMode(mode string) *GaussDecayFunction { + fn.multiValueMode = mode + return fn +} + +// Source returns the serializable JSON data of this score function. +func (fn *GaussDecayFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source[fn.fieldName] = params + if fn.origin != nil { + params["origin"] = fn.origin + } + params["scale"] = fn.scale + if fn.decay != nil && *fn.decay > 0 { + params["decay"] = *fn.decay + } + if fn.offset != nil { + params["offset"] = fn.offset + } + if fn.multiValueMode != "" { + source["multi_value_mode"] = fn.multiValueMode + } + // Notice that the weight has to be serialized in FunctionScoreQuery. + return source, nil +} + +// -- Linear Decay -- + +// LinearDecayFunction builds a linear decay score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html +// for details. +type LinearDecayFunction struct { + fieldName string + origin interface{} + scale interface{} + decay *float64 + offset interface{} + multiValueMode string + weight *float64 +} + +// NewLinearDecayFunction initializes and returns a new LinearDecayFunction. +func NewLinearDecayFunction() *LinearDecayFunction { + return &LinearDecayFunction{} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *LinearDecayFunction) Name() string { + return "linear" +} + +// FieldName specifies the name of the field to which this decay function is applied to. +func (fn *LinearDecayFunction) FieldName(fieldName string) *LinearDecayFunction { + fn.fieldName = fieldName + return fn +} + +// Origin defines the "central point" by which the decay function calculates +// "distance". +func (fn *LinearDecayFunction) Origin(origin interface{}) *LinearDecayFunction { + fn.origin = origin + return fn +} + +// Scale defines the scale to be used with Decay. +func (fn *LinearDecayFunction) Scale(scale interface{}) *LinearDecayFunction { + fn.scale = scale + return fn +} + +// Decay defines how documents are scored at the distance given a Scale. +// If no decay is defined, documents at the distance Scale will be scored 0.5. +func (fn *LinearDecayFunction) Decay(decay float64) *LinearDecayFunction { + fn.decay = &decay + return fn +} + +// Offset, if defined, computes the decay function only for a distance +// greater than the defined offset. +func (fn *LinearDecayFunction) Offset(offset interface{}) *LinearDecayFunction { + fn.offset = offset + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *LinearDecayFunction) Weight(weight float64) *LinearDecayFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *LinearDecayFunction) GetWeight() *float64 { + return fn.weight +} + +// MultiValueMode specifies how the decay function should be calculated +// on a field that has multiple values. +// Valid modes are: min, max, avg, and sum. +func (fn *LinearDecayFunction) MultiValueMode(mode string) *LinearDecayFunction { + fn.multiValueMode = mode + return fn +} + +// GetMultiValueMode returns how the decay function should be calculated +// on a field that has multiple values. +// Valid modes are: min, max, avg, and sum. +func (fn *LinearDecayFunction) GetMultiValueMode() string { + return fn.multiValueMode +} + +// Source returns the serializable JSON data of this score function. +func (fn *LinearDecayFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source[fn.fieldName] = params + if fn.origin != nil { + params["origin"] = fn.origin + } + params["scale"] = fn.scale + if fn.decay != nil && *fn.decay > 0 { + params["decay"] = *fn.decay + } + if fn.offset != nil { + params["offset"] = fn.offset + } + if fn.multiValueMode != "" { + source["multi_value_mode"] = fn.multiValueMode + } + // Notice that the weight has to be serialized in FunctionScoreQuery. + return source, nil +} + +// -- Script -- + +// ScriptFunction builds a script score function. It uses a script to +// compute or influence the score of documents that match with the inner +// query or filter. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_script_score +// for details. +type ScriptFunction struct { + script *Script + weight *float64 +} + +// NewScriptFunction initializes and returns a new ScriptFunction. +func NewScriptFunction(script *Script) *ScriptFunction { + return &ScriptFunction{ + script: script, + } +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *ScriptFunction) Name() string { + return "script_score" +} + +// Script specifies the script to be executed. +func (fn *ScriptFunction) Script(script *Script) *ScriptFunction { + fn.script = script + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *ScriptFunction) Weight(weight float64) *ScriptFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *ScriptFunction) GetWeight() *float64 { + return fn.weight +} + +// Source returns the serializable JSON data of this score function. +func (fn *ScriptFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + if fn.script != nil { + src, err := fn.script.Source() + if err != nil { + return nil, err + } + source["script"] = src + } + // Notice that the weight has to be serialized in FunctionScoreQuery. + return source, nil +} + +// -- Field value factor -- + +// FieldValueFactorFunction is a function score function that allows you +// to use a field from a document to influence the score. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_field_value_factor. +type FieldValueFactorFunction struct { + field string + factor *float64 + missing *float64 + weight *float64 + modifier string +} + +// NewFieldValueFactorFunction initializes and returns a new FieldValueFactorFunction. +func NewFieldValueFactorFunction() *FieldValueFactorFunction { + return &FieldValueFactorFunction{} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *FieldValueFactorFunction) Name() string { + return "field_value_factor" +} + +// Field is the field to be extracted from the document. +func (fn *FieldValueFactorFunction) Field(field string) *FieldValueFactorFunction { + fn.field = field + return fn +} + +// Factor is the (optional) factor to multiply the field with. If you do not +// specify a factor, the default is 1. +func (fn *FieldValueFactorFunction) Factor(factor float64) *FieldValueFactorFunction { + fn.factor = &factor + return fn +} + +// Modifier to apply to the field value. It can be one of: none, log, log1p, +// log2p, ln, ln1p, ln2p, square, sqrt, or reciprocal. Defaults to: none. +func (fn *FieldValueFactorFunction) Modifier(modifier string) *FieldValueFactorFunction { + fn.modifier = modifier + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *FieldValueFactorFunction) Weight(weight float64) *FieldValueFactorFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *FieldValueFactorFunction) GetWeight() *float64 { + return fn.weight +} + +// Missing is used if a document does not have that field. +func (fn *FieldValueFactorFunction) Missing(missing float64) *FieldValueFactorFunction { + fn.missing = &missing + return fn +} + +// Source returns the serializable JSON data of this score function. +func (fn *FieldValueFactorFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + if fn.field != "" { + source["field"] = fn.field + } + if fn.factor != nil { + source["factor"] = *fn.factor + } + if fn.missing != nil { + source["missing"] = *fn.missing + } + if fn.modifier != "" { + source["modifier"] = strings.ToLower(fn.modifier) + } + // Notice that the weight has to be serialized in FunctionScoreQuery. + return source, nil +} + +// -- Weight Factor -- + +// WeightFactorFunction builds a weight factor function that multiplies +// the weight to the score. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_weight +// for details. +type WeightFactorFunction struct { + weight float64 +} + +// NewWeightFactorFunction initializes and returns a new WeightFactorFunction. +func NewWeightFactorFunction(weight float64) *WeightFactorFunction { + return &WeightFactorFunction{weight: weight} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *WeightFactorFunction) Name() string { + return "weight" +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *WeightFactorFunction) Weight(weight float64) *WeightFactorFunction { + fn.weight = weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *WeightFactorFunction) GetWeight() *float64 { + return &fn.weight +} + +// Source returns the serializable JSON data of this score function. +func (fn *WeightFactorFunction) Source() (interface{}, error) { + // Notice that the weight has to be serialized in FunctionScoreQuery. + return fn.weight, nil +} + +// -- Random -- + +// RandomFunction builds a random score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_random +// for details. +type RandomFunction struct { + seed interface{} + weight *float64 +} + +// NewRandomFunction initializes and returns a new RandomFunction. +func NewRandomFunction() *RandomFunction { + return &RandomFunction{} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *RandomFunction) Name() string { + return "random_score" +} + +// Seed is documented in 1.6 as a numeric value. However, in the source code +// of the Java client, it also accepts strings. So we accept both here, too. +func (fn *RandomFunction) Seed(seed interface{}) *RandomFunction { + fn.seed = seed + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *RandomFunction) Weight(weight float64) *RandomFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *RandomFunction) GetWeight() *float64 { + return fn.weight +} + +// Source returns the serializable JSON data of this score function. +func (fn *RandomFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + if fn.seed != nil { + source["seed"] = fn.seed + } + // Notice that the weight has to be serialized in FunctionScoreQuery. + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_fsq_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_fsq_test.go new file mode 100644 index 000000000..59f1cd191 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_fsq_test.go @@ -0,0 +1,166 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestFunctionScoreQuery(t *testing.T) { + q := NewFunctionScoreQuery(). + Query(NewTermQuery("name.last", "banon")). + Add(NewTermQuery("name.last", "banon"), NewWeightFactorFunction(1.5)). + AddScoreFunc(NewWeightFactorFunction(3)). + AddScoreFunc(NewRandomFunction()). + Boost(3). + MaxBoost(10). + ScoreMode("avg") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"function_score":{"boost":3,"functions":[{"filter":{"term":{"name.last":"banon"}},"weight":1.5},{"weight":3},{"random_score":{}}],"max_boost":10,"query":{"term":{"name.last":"banon"}},"score_mode":"avg"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFunctionScoreQueryWithNilFilter(t *testing.T) { + q := NewFunctionScoreQuery(). + Query(NewTermQuery("tag", "wow")). + AddScoreFunc(NewRandomFunction()). + Boost(2.0). + MaxBoost(12.0). + BoostMode("multiply"). + ScoreMode("max") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"function_score":{"boost":2,"boost_mode":"multiply","max_boost":12,"query":{"term":{"tag":"wow"}},"random_score":{},"score_mode":"max"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFieldValueFactor(t *testing.T) { + q := NewFunctionScoreQuery(). + Query(NewTermQuery("name.last", "banon")). + AddScoreFunc(NewFieldValueFactorFunction().Modifier("sqrt").Factor(2).Field("income")). + Boost(2.0). + MaxBoost(12.0). + BoostMode("multiply"). + ScoreMode("max") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"function_score":{"boost":2,"boost_mode":"multiply","field_value_factor":{"factor":2,"field":"income","modifier":"sqrt"},"max_boost":12,"query":{"term":{"name.last":"banon"}},"score_mode":"max"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFieldValueFactorWithWeight(t *testing.T) { + q := NewFunctionScoreQuery(). + Query(NewTermQuery("name.last", "banon")). + AddScoreFunc(NewFieldValueFactorFunction().Modifier("sqrt").Factor(2).Field("income").Weight(2.5)). + Boost(2.0). + MaxBoost(12.0). + BoostMode("multiply"). + ScoreMode("max") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"function_score":{"boost":2,"boost_mode":"multiply","field_value_factor":{"factor":2,"field":"income","modifier":"sqrt"},"max_boost":12,"query":{"term":{"name.last":"banon"}},"score_mode":"max","weight":2.5}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFieldValueFactorWithMultipleScoreFuncsAndWeights(t *testing.T) { + q := NewFunctionScoreQuery(). + Query(NewTermQuery("name.last", "banon")). + AddScoreFunc(NewFieldValueFactorFunction().Modifier("sqrt").Factor(2).Field("income").Weight(2.5)). + AddScoreFunc(NewScriptFunction(NewScript("_score * doc['my_numeric_field'].value")).Weight(1.25)). + AddScoreFunc(NewWeightFactorFunction(0.5)). + Boost(2.0). + MaxBoost(12.0). + BoostMode("multiply"). + ScoreMode("max") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"function_score":{"boost":2,"boost_mode":"multiply","functions":[{"field_value_factor":{"factor":2,"field":"income","modifier":"sqrt"},"weight":2.5},{"script_score":{"script":"_score * doc['my_numeric_field'].value"},"weight":1.25},{"weight":0.5}],"max_boost":12,"query":{"term":{"name.last":"banon"}},"score_mode":"max"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFunctionScoreQueryWithGaussScoreFunc(t *testing.T) { + q := NewFunctionScoreQuery(). + Query(NewTermQuery("name.last", "banon")). + AddScoreFunc(NewGaussDecayFunction().FieldName("pin.location").Origin("11, 12").Scale("2km").Offset("0km").Decay(0.33)) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"function_score":{"gauss":{"pin.location":{"decay":0.33,"offset":"0km","origin":"11, 12","scale":"2km"}},"query":{"term":{"name.last":"banon"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFunctionScoreQueryWithGaussScoreFuncAndMultiValueMode(t *testing.T) { + q := NewFunctionScoreQuery(). + Query(NewTermQuery("name.last", "banon")). + AddScoreFunc(NewGaussDecayFunction().FieldName("pin.location").Origin("11, 12").Scale("2km").Offset("0km").Decay(0.33).MultiValueMode("avg")) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"function_score":{"gauss":{"multi_value_mode":"avg","pin.location":{"decay":0.33,"offset":"0km","origin":"11, 12","scale":"2km"}},"query":{"term":{"name.last":"banon"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_fuzzy.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_fuzzy.go new file mode 100644 index 000000000..da79dc7e6 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_fuzzy.go @@ -0,0 +1,120 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// FuzzyQuery uses similarity based on Levenshtein edit distance for +// string fields, and a +/- margin on numeric and date fields. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-fuzzy-query.html +type FuzzyQuery struct { + name string + value interface{} + boost *float64 + fuzziness interface{} + prefixLength *int + maxExpansions *int + transpositions *bool + rewrite string + queryName string +} + +// NewFuzzyQuery creates a new fuzzy query. +func NewFuzzyQuery(name string, value interface{}) *FuzzyQuery { + q := &FuzzyQuery{ + name: name, + value: value, + } + return q +} + +// Boost sets the boost for this query. Documents matching this query will +// (in addition to the normal weightings) have their score multiplied by +// the boost provided. +func (q *FuzzyQuery) Boost(boost float64) *FuzzyQuery { + q.boost = &boost + return q +} + +// Fuzziness can be an integer/long like 0, 1 or 2 as well as strings +// like "auto", "0..1", "1..4" or "0.0..1.0". +func (q *FuzzyQuery) Fuzziness(fuzziness interface{}) *FuzzyQuery { + q.fuzziness = fuzziness + return q +} + +func (q *FuzzyQuery) PrefixLength(prefixLength int) *FuzzyQuery { + q.prefixLength = &prefixLength + return q +} + +func (q *FuzzyQuery) MaxExpansions(maxExpansions int) *FuzzyQuery { + q.maxExpansions = &maxExpansions + return q +} + +func (q *FuzzyQuery) Transpositions(transpositions bool) *FuzzyQuery { + q.transpositions = &transpositions + return q +} + +func (q *FuzzyQuery) Rewrite(rewrite string) *FuzzyQuery { + q.rewrite = rewrite + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched filters per hit. +func (q *FuzzyQuery) QueryName(queryName string) *FuzzyQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *FuzzyQuery) Source() (interface{}, error) { + // { + // "fuzzy" : { + // "user" : { + // "value" : "ki", + // "boost" : 1.0, + // "fuzziness" : 2, + // "prefix_length" : 0, + // "max_expansions" : 100 + // } + // } + + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["fuzzy"] = query + + fq := make(map[string]interface{}) + query[q.name] = fq + + fq["value"] = q.value + + if q.boost != nil { + fq["boost"] = *q.boost + } + if q.transpositions != nil { + fq["transpositions"] = *q.transpositions + } + if q.fuzziness != nil { + fq["fuzziness"] = q.fuzziness + } + if q.prefixLength != nil { + fq["prefix_length"] = *q.prefixLength + } + if q.maxExpansions != nil { + fq["max_expansions"] = *q.maxExpansions + } + if q.rewrite != "" { + fq["rewrite"] = q.rewrite + } + if q.queryName != "" { + fq["_name"] = q.queryName + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_fuzzy_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_fuzzy_test.go new file mode 100644 index 000000000..fbbfe2f94 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_fuzzy_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestFuzzyQuery(t *testing.T) { + q := NewFuzzyQuery("user", "ki").Boost(1.5).Fuzziness(2).PrefixLength(0).MaxExpansions(100) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"fuzzy":{"user":{"boost":1.5,"fuzziness":2,"max_expansions":100,"prefix_length":0,"value":"ki"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_geo_bounding_box.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_geo_bounding_box.go new file mode 100644 index 000000000..808ce82df --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_geo_bounding_box.go @@ -0,0 +1,121 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// GeoBoundingBoxQuery allows to filter hits based on a point location using +// a bounding box. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-geo-bounding-box-query.html +type GeoBoundingBoxQuery struct { + name string + top *float64 + left *float64 + bottom *float64 + right *float64 + typ string + queryName string +} + +// NewGeoBoundingBoxQuery creates and initializes a new GeoBoundingBoxQuery. +func NewGeoBoundingBoxQuery(name string) *GeoBoundingBoxQuery { + return &GeoBoundingBoxQuery{ + name: name, + } +} + +func (q *GeoBoundingBoxQuery) TopLeft(top, left float64) *GeoBoundingBoxQuery { + q.top = &top + q.left = &left + return q +} + +func (q *GeoBoundingBoxQuery) TopLeftFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { + return q.TopLeft(point.Lat, point.Lon) +} + +func (q *GeoBoundingBoxQuery) BottomRight(bottom, right float64) *GeoBoundingBoxQuery { + q.bottom = &bottom + q.right = &right + return q +} + +func (q *GeoBoundingBoxQuery) BottomRightFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { + return q.BottomRight(point.Lat, point.Lon) +} + +func (q *GeoBoundingBoxQuery) BottomLeft(bottom, left float64) *GeoBoundingBoxQuery { + q.bottom = &bottom + q.left = &left + return q +} + +func (q *GeoBoundingBoxQuery) BottomLeftFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { + return q.BottomLeft(point.Lat, point.Lon) +} + +func (q *GeoBoundingBoxQuery) TopRight(top, right float64) *GeoBoundingBoxQuery { + q.top = &top + q.right = &right + return q +} + +func (q *GeoBoundingBoxQuery) TopRightFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { + return q.TopRight(point.Lat, point.Lon) +} + +// Type sets the type of executing the geo bounding box. It can be either +// memory or indexed. It defaults to memory. +func (q *GeoBoundingBoxQuery) Type(typ string) *GeoBoundingBoxQuery { + q.typ = typ + return q +} + +func (q *GeoBoundingBoxQuery) QueryName(queryName string) *GeoBoundingBoxQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *GeoBoundingBoxQuery) Source() (interface{}, error) { + // { + // "geo_bbox" : { + // ... + // } + // } + + if q.top == nil { + return nil, errors.New("geo_bounding_box requires top latitude to be set") + } + if q.bottom == nil { + return nil, errors.New("geo_bounding_box requires bottom latitude to be set") + } + if q.right == nil { + return nil, errors.New("geo_bounding_box requires right longitude to be set") + } + if q.left == nil { + return nil, errors.New("geo_bounding_box requires left longitude to be set") + } + + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["geo_bbox"] = params + + box := make(map[string]interface{}) + box["top_left"] = []float64{*q.left, *q.top} + box["bottom_right"] = []float64{*q.right, *q.bottom} + params[q.name] = box + + if q.typ != "" { + params["type"] = q.typ + } + if q.queryName != "" { + params["_name"] = q.queryName + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_geo_bounding_box_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_geo_bounding_box_test.go new file mode 100644 index 000000000..6b15885ca --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_geo_bounding_box_test.go @@ -0,0 +1,63 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestGeoBoundingBoxQueryIncomplete(t *testing.T) { + q := NewGeoBoundingBoxQuery("pin.location") + q = q.TopLeft(40.73, -74.1) + // no bottom and no right here + q = q.Type("memory") + src, err := q.Source() + if err == nil { + t.Fatal("expected error") + } + if src != nil { + t.Fatal("expected empty source") + } +} + +func TestGeoBoundingBoxQuery(t *testing.T) { + q := NewGeoBoundingBoxQuery("pin.location") + q = q.TopLeft(40.73, -74.1) + q = q.BottomRight(40.01, -71.12) + q = q.Type("memory") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_bbox":{"pin.location":{"bottom_right":[-71.12,40.01],"top_left":[-74.1,40.73]},"type":"memory"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoBoundingBoxQueryWithGeoPoint(t *testing.T) { + q := NewGeoBoundingBoxQuery("pin.location") + q = q.TopLeftFromGeoPoint(GeoPointFromLatLon(40.73, -74.1)) + q = q.BottomRightFromGeoPoint(GeoPointFromLatLon(40.01, -71.12)) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_bbox":{"pin.location":{"bottom_right":[-71.12,40.01],"top_left":[-74.1,40.73]}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_geo_distance.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_geo_distance.go new file mode 100644 index 000000000..c1eed8521 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_geo_distance.go @@ -0,0 +1,116 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// GeoDistanceQuery filters documents that include only hits that exists +// within a specific distance from a geo point. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-geo-distance-query.html +type GeoDistanceQuery struct { + name string + distance string + lat float64 + lon float64 + geohash string + distanceType string + optimizeBbox string + queryName string +} + +// NewGeoDistanceQuery creates and initializes a new GeoDistanceQuery. +func NewGeoDistanceQuery(name string) *GeoDistanceQuery { + return &GeoDistanceQuery{name: name} +} + +func (q *GeoDistanceQuery) GeoPoint(point *GeoPoint) *GeoDistanceQuery { + q.lat = point.Lat + q.lon = point.Lon + return q +} + +func (q *GeoDistanceQuery) Point(lat, lon float64) *GeoDistanceQuery { + q.lat = lat + q.lon = lon + return q +} + +func (q *GeoDistanceQuery) Lat(lat float64) *GeoDistanceQuery { + q.lat = lat + return q +} + +func (q *GeoDistanceQuery) Lon(lon float64) *GeoDistanceQuery { + q.lon = lon + return q +} + +func (q *GeoDistanceQuery) GeoHash(geohash string) *GeoDistanceQuery { + q.geohash = geohash + return q +} + +func (q *GeoDistanceQuery) Distance(distance string) *GeoDistanceQuery { + q.distance = distance + return q +} + +func (q *GeoDistanceQuery) DistanceType(distanceType string) *GeoDistanceQuery { + q.distanceType = distanceType + return q +} + +func (q *GeoDistanceQuery) OptimizeBbox(optimizeBbox string) *GeoDistanceQuery { + q.optimizeBbox = optimizeBbox + return q +} + +func (q *GeoDistanceQuery) QueryName(queryName string) *GeoDistanceQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *GeoDistanceQuery) Source() (interface{}, error) { + // { + // "geo_distance" : { + // "distance" : "200km", + // "pin.location" : { + // "lat" : 40, + // "lon" : -70 + // } + // } + // } + + source := make(map[string]interface{}) + + params := make(map[string]interface{}) + + if q.geohash != "" { + params[q.name] = q.geohash + } else { + location := make(map[string]interface{}) + location["lat"] = q.lat + location["lon"] = q.lon + params[q.name] = location + } + + if q.distance != "" { + params["distance"] = q.distance + } + if q.distanceType != "" { + params["distance_type"] = q.distanceType + } + if q.optimizeBbox != "" { + params["optimize_bbox"] = q.optimizeBbox + } + if q.queryName != "" { + params["_name"] = q.queryName + } + + source["geo_distance"] = params + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_geo_distance_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_geo_distance_test.go new file mode 100644 index 000000000..f0b8ca654 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_geo_distance_test.go @@ -0,0 +1,70 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestGeoDistanceQuery(t *testing.T) { + q := NewGeoDistanceQuery("pin.location") + q = q.Lat(40) + q = q.Lon(-70) + q = q.Distance("200km") + q = q.DistanceType("plane") + q = q.OptimizeBbox("memory") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_distance":{"distance":"200km","distance_type":"plane","optimize_bbox":"memory","pin.location":{"lat":40,"lon":-70}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoDistanceQueryWithGeoPoint(t *testing.T) { + q := NewGeoDistanceQuery("pin.location") + q = q.GeoPoint(GeoPointFromLatLon(40, -70)) + q = q.Distance("200km") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_distance":{"distance":"200km","pin.location":{"lat":40,"lon":-70}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoDistanceQueryWithGeoHash(t *testing.T) { + q := NewGeoDistanceQuery("pin.location") + q = q.GeoHash("drm3btev3e86") + q = q.Distance("12km") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_distance":{"distance":"12km","pin.location":"drm3btev3e86"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_geo_polygon.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_geo_polygon.go new file mode 100644 index 000000000..b08d7078a --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_geo_polygon.go @@ -0,0 +1,72 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// GeoPolygonQuery allows to include hits that only fall within a polygon of points. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-geo-polygon-query.html +type GeoPolygonQuery struct { + name string + points []*GeoPoint + queryName string +} + +// NewGeoPolygonQuery creates and initializes a new GeoPolygonQuery. +func NewGeoPolygonQuery(name string) *GeoPolygonQuery { + return &GeoPolygonQuery{ + name: name, + points: make([]*GeoPoint, 0), + } +} + +// AddPoint adds a point from latitude and longitude. +func (q *GeoPolygonQuery) AddPoint(lat, lon float64) *GeoPolygonQuery { + q.points = append(q.points, GeoPointFromLatLon(lat, lon)) + return q +} + +// AddGeoPoint adds a GeoPoint. +func (q *GeoPolygonQuery) AddGeoPoint(point *GeoPoint) *GeoPolygonQuery { + q.points = append(q.points, point) + return q +} + +func (q *GeoPolygonQuery) QueryName(queryName string) *GeoPolygonQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *GeoPolygonQuery) Source() (interface{}, error) { + // "geo_polygon" : { + // "person.location" : { + // "points" : [ + // {"lat" : 40, "lon" : -70}, + // {"lat" : 30, "lon" : -80}, + // {"lat" : 20, "lon" : -90} + // ] + // } + // } + source := make(map[string]interface{}) + + params := make(map[string]interface{}) + source["geo_polygon"] = params + + polygon := make(map[string]interface{}) + params[q.name] = polygon + + points := make([]interface{}, 0) + for _, point := range q.points { + points = append(points, point.Source()) + } + polygon["points"] = points + + if q.queryName != "" { + params["_name"] = q.queryName + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_geo_polygon_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_geo_polygon_test.go new file mode 100644 index 000000000..efe89a8d4 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_geo_polygon_test.go @@ -0,0 +1,58 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestGeoPolygonQuery(t *testing.T) { + q := NewGeoPolygonQuery("person.location") + q = q.AddPoint(40, -70) + q = q.AddPoint(30, -80) + point, err := GeoPointFromString("20,-90") + if err != nil { + t.Fatalf("GeoPointFromString failed: %v", err) + } + q = q.AddGeoPoint(point) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_polygon":{"person.location":{"points":[{"lat":40,"lon":-70},{"lat":30,"lon":-80},{"lat":20,"lon":-90}]}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoPolygonQueryFromGeoPoints(t *testing.T) { + q := NewGeoPolygonQuery("person.location") + q = q.AddGeoPoint(&GeoPoint{Lat: 40, Lon: -70}) + q = q.AddGeoPoint(GeoPointFromLatLon(30, -80)) + point, err := GeoPointFromString("20,-90") + if err != nil { + t.Fatalf("GeoPointFromString failed: %v", err) + } + q = q.AddGeoPoint(point) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"geo_polygon":{"person.location":{"points":[{"lat":40,"lon":-70},{"lat":30,"lon":-80},{"lat":20,"lon":-90}]}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_has_child.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_has_child.go new file mode 100644 index 000000000..a8907546b --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_has_child.go @@ -0,0 +1,129 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// HasChildQuery accepts a query and the child type to run against, and results +// in parent documents that have child docs matching the query. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-has-child-query.html +type HasChildQuery struct { + query Query + childType string + boost *float64 + scoreType string + minChildren *int + maxChildren *int + shortCircuitCutoff *int + queryName string + innerHit *InnerHit +} + +// NewHasChildQuery creates and initializes a new has_child query. +func NewHasChildQuery(childType string, query Query) *HasChildQuery { + return &HasChildQuery{ + query: query, + childType: childType, + } +} + +// Boost sets the boost for this query. +func (q *HasChildQuery) Boost(boost float64) *HasChildQuery { + q.boost = &boost + return q +} + +// ScoreType defines how the scores from the matching child documents +// are mapped into the parent document. +func (q *HasChildQuery) ScoreType(scoreType string) *HasChildQuery { + q.scoreType = scoreType + return q +} + +// MinChildren defines the minimum number of children that are required +// to match for the parent to be considered a match. +func (q *HasChildQuery) MinChildren(minChildren int) *HasChildQuery { + q.minChildren = &minChildren + return q +} + +// MaxChildren defines the maximum number of children that are required +// to match for the parent to be considered a match. +func (q *HasChildQuery) MaxChildren(maxChildren int) *HasChildQuery { + q.maxChildren = &maxChildren + return q +} + +// ShortCircuitCutoff configures what cut off point only to evaluate +// parent documents that contain the matching parent id terms instead +// of evaluating all parent docs. +func (q *HasChildQuery) ShortCircuitCutoff(shortCircuitCutoff int) *HasChildQuery { + q.shortCircuitCutoff = &shortCircuitCutoff + return q +} + +// QueryName specifies the query name for the filter that can be used when +// searching for matched filters per hit. +func (q *HasChildQuery) QueryName(queryName string) *HasChildQuery { + q.queryName = queryName + return q +} + +// InnerHit sets the inner hit definition in the scope of this query and +// reusing the defined type and query. +func (q *HasChildQuery) InnerHit(innerHit *InnerHit) *HasChildQuery { + q.innerHit = innerHit + return q +} + +// Source returns JSON for the function score query. +func (q *HasChildQuery) Source() (interface{}, error) { + // { + // "has_child" : { + // "type" : "blog_tag", + // "query" : { + // "term" : { + // "tag" : "something" + // } + // } + // } + // } + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["has_child"] = query + + src, err := q.query.Source() + if err != nil { + return nil, err + } + query["query"] = src + query["type"] = q.childType + if q.boost != nil { + query["boost"] = *q.boost + } + if q.scoreType != "" { + query["score_type"] = q.scoreType + } + if q.minChildren != nil { + query["min_children"] = *q.minChildren + } + if q.maxChildren != nil { + query["max_children"] = *q.maxChildren + } + if q.shortCircuitCutoff != nil { + query["short_circuit_cutoff"] = *q.shortCircuitCutoff + } + if q.queryName != "" { + query["_name"] = q.queryName + } + if q.innerHit != nil { + src, err := q.innerHit.Source() + if err != nil { + return nil, err + } + query["inner_hits"] = src + } + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_has_child_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_has_child_test.go new file mode 100644 index 000000000..887b2e263 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_has_child_test.go @@ -0,0 +1,45 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestHasChildQuery(t *testing.T) { + q := NewHasChildQuery("blog_tag", NewTermQuery("tag", "something")) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"has_child":{"query":{"term":{"tag":"something"}},"type":"blog_tag"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestHasChildQueryWithInnerHit(t *testing.T) { + q := NewHasChildQuery("blog_tag", NewTermQuery("tag", "something")) + q = q.InnerHit(NewInnerHit().Name("comments")) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"has_child":{"inner_hits":{"name":"comments"},"query":{"term":{"tag":"something"}},"type":"blog_tag"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_has_parent.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_has_parent.go new file mode 100644 index 000000000..4db1dde7e --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_has_parent.go @@ -0,0 +1,97 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// HasParentQuery accepts a query and a parent type. The query is executed +// in the parent document space which is specified by the parent type. +// This query returns child documents which associated parents have matched. +// For the rest has_parent query has the same options and works in the +// same manner as has_child query. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-has-parent-query.html +type HasParentQuery struct { + query Query + parentType string + boost *float64 + scoreType string + queryName string + innerHit *InnerHit +} + +// NewHasParentQuery creates and initializes a new has_parent query. +func NewHasParentQuery(parentType string, query Query) *HasParentQuery { + return &HasParentQuery{ + query: query, + parentType: parentType, + } +} + +// Boost sets the boost for this query. +func (q *HasParentQuery) Boost(boost float64) *HasParentQuery { + q.boost = &boost + return q +} + +// ScoreType defines how the parent score is mapped into the child documents. +func (q *HasParentQuery) ScoreType(scoreType string) *HasParentQuery { + q.scoreType = scoreType + return q +} + +// QueryName specifies the query name for the filter that can be used when +// searching for matched filters per hit. +func (q *HasParentQuery) QueryName(queryName string) *HasParentQuery { + q.queryName = queryName + return q +} + +// InnerHit sets the inner hit definition in the scope of this query and +// reusing the defined type and query. +func (q *HasParentQuery) InnerHit(innerHit *InnerHit) *HasParentQuery { + q.innerHit = innerHit + return q +} + +// Source returns JSON for the function score query. +func (q *HasParentQuery) Source() (interface{}, error) { + // { + // "has_parent" : { + // "parent_type" : "blog", + // "query" : { + // "term" : { + // "tag" : "something" + // } + // } + // } + // } + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["has_parent"] = query + + src, err := q.query.Source() + if err != nil { + return nil, err + } + query["query"] = src + query["parent_type"] = q.parentType + if q.boost != nil { + query["boost"] = *q.boost + } + if q.scoreType != "" { + query["score_type"] = q.scoreType + } + if q.queryName != "" { + query["_name"] = q.queryName + } + if q.innerHit != nil { + src, err := q.innerHit.Source() + if err != nil { + return nil, err + } + query["inner_hits"] = src + } + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_has_parent_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_has_parent_test.go new file mode 100644 index 000000000..b5daefda8 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_has_parent_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestHasParentQueryTest(t *testing.T) { + q := NewHasParentQuery("blog", NewTermQuery("tag", "something")) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"has_parent":{"parent_type":"blog","query":{"term":{"tag":"something"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_ids.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_ids.go new file mode 100644 index 000000000..96f463dc6 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_ids.go @@ -0,0 +1,76 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// IdsQuery filters documents that only have the provided ids. +// Note, this query uses the _uid field. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-ids-query.html +type IdsQuery struct { + types []string + values []string + boost *float64 + queryName string +} + +// NewIdsQuery creates and initializes a new ids query. +func NewIdsQuery(types ...string) *IdsQuery { + return &IdsQuery{ + types: types, + values: make([]string, 0), + } +} + +// Ids adds ids to the filter. +func (q *IdsQuery) Ids(ids ...string) *IdsQuery { + q.values = append(q.values, ids...) + return q +} + +// Boost sets the boost for this query. +func (q *IdsQuery) Boost(boost float64) *IdsQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter. +func (q *IdsQuery) QueryName(queryName string) *IdsQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *IdsQuery) Source() (interface{}, error) { + // { + // "ids" : { + // "type" : "my_type", + // "values" : ["1", "4", "100"] + // } + // } + + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["ids"] = query + + // type(s) + if len(q.types) == 1 { + query["type"] = q.types[0] + } else if len(q.types) > 1 { + query["types"] = q.types + } + + // values + query["values"] = q.values + + if q.boost != nil { + query["boost"] = *q.boost + } + if q.queryName != "" { + query["_name"] = q.queryName + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_ids_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_ids_test.go new file mode 100644 index 000000000..d1ff9a6b1 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_ids_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestIdsQuery(t *testing.T) { + q := NewIdsQuery("my_type").Ids("1", "4", "100").Boost(10.5).QueryName("my_query") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"ids":{"_name":"my_query","boost":10.5,"type":"my_type","values":["1","4","100"]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_indices.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_indices.go new file mode 100644 index 000000000..56efab3dd --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_indices.go @@ -0,0 +1,89 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// IndicesQuery can be used when executed across multiple indices, allowing +// to have a query that executes only when executed on an index that matches +// a specific list of indices, and another query that executes when it is +// executed on an index that does not match the listed indices. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-indices-query.html +type IndicesQuery struct { + query Query + indices []string + noMatchQueryType string + noMatchQuery Query + queryName string +} + +// NewIndicesQuery creates and initializes a new indices query. +func NewIndicesQuery(query Query, indices ...string) *IndicesQuery { + return &IndicesQuery{ + query: query, + indices: indices, + } +} + +// NoMatchQuery sets the query to use when it executes on an index that +// does not match the indices provided. +func (q *IndicesQuery) NoMatchQuery(query Query) *IndicesQuery { + q.noMatchQuery = query + return q +} + +// NoMatchQueryType sets the no match query which can be either all or none. +func (q *IndicesQuery) NoMatchQueryType(typ string) *IndicesQuery { + q.noMatchQueryType = typ + return q +} + +// QueryName sets the query name for the filter. +func (q *IndicesQuery) QueryName(queryName string) *IndicesQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *IndicesQuery) Source() (interface{}, error) { + // { + // "indices" : { + // "indices" : ["index1", "index2"], + // "query" : { + // "term" : { "tag" : "wow" } + // }, + // "no_match_query" : { + // "term" : { "tag" : "kow" } + // } + // } + // } + + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["indices"] = params + + params["indices"] = q.indices + + src, err := q.query.Source() + if err != nil { + return nil, err + } + params["query"] = src + + if q.noMatchQuery != nil { + src, err := q.noMatchQuery.Source() + if err != nil { + return nil, err + } + params["no_match_query"] = src + } else if q.noMatchQueryType != "" { + params["no_match_query"] = q.noMatchQueryType + } + if q.queryName != "" { + params["_name"] = q.queryName + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_indices_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_indices_test.go new file mode 100644 index 000000000..f011b9ac7 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_indices_test.go @@ -0,0 +1,46 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestIndicesQuery(t *testing.T) { + q := NewIndicesQuery(NewTermQuery("tag", "wow"), "index1", "index2") + q = q.NoMatchQuery(NewTermQuery("tag", "kow")) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"indices":{"indices":["index1","index2"],"no_match_query":{"term":{"tag":"kow"}},"query":{"term":{"tag":"wow"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestIndicesQueryWithNoMatchQueryType(t *testing.T) { + q := NewIndicesQuery(NewTermQuery("tag", "wow"), "index1", "index2") + q = q.NoMatchQueryType("all") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"indices":{"indices":["index1","index2"],"no_match_query":"all","query":{"term":{"tag":"wow"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_match.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_match.go new file mode 100644 index 000000000..b740b0f0d --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_match.go @@ -0,0 +1,214 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MatchQuery is a family of queries that accepts text/numerics/dates, +// analyzes them, and constructs a query. +// +// To create a new MatchQuery, use NewMatchQuery. To create specific types +// of queries, e.g. a match_phrase query, use NewMatchPhrQuery(...).Type("phrase"), +// or use one of the shortcuts e.g. NewMatchPhraseQuery(...). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-match-query.html +type MatchQuery struct { + name string + text interface{} + typ string // boolean, phrase, phrase_prefix + operator string // or / and + analyzer string + boost *float64 + slop *int + fuzziness string + prefixLength *int + maxExpansions *int + minimumShouldMatch string + rewrite string + fuzzyRewrite string + lenient *bool + fuzzyTranspositions *bool + zeroTermsQuery string + cutoffFrequency *float64 + queryName string +} + +// NewMatchQuery creates and initializes a new MatchQuery. +func NewMatchQuery(name string, text interface{}) *MatchQuery { + return &MatchQuery{name: name, text: text} +} + +// NewMatchPhraseQuery creates and initializes a new MatchQuery of type phrase. +func NewMatchPhraseQuery(name string, text interface{}) *MatchQuery { + return &MatchQuery{name: name, text: text, typ: "phrase"} +} + +// NewMatchPhrasePrefixQuery creates and initializes a new MatchQuery of type phrase_prefix. +func NewMatchPhrasePrefixQuery(name string, text interface{}) *MatchQuery { + return &MatchQuery{name: name, text: text, typ: "phrase_prefix"} +} + +// Type can be "boolean", "phrase", or "phrase_prefix". Defaults to "boolean". +func (q *MatchQuery) Type(typ string) *MatchQuery { + q.typ = typ + return q +} + +// Operator sets the operator to use when using a boolean query. +// Can be "AND" or "OR" (default). +func (q *MatchQuery) Operator(operator string) *MatchQuery { + q.operator = operator + return q +} + +// Analyzer explicitly sets the analyzer to use. It defaults to use explicit +// mapping config for the field, or, if not set, the default search analyzer. +func (q *MatchQuery) Analyzer(analyzer string) *MatchQuery { + q.analyzer = analyzer + return q +} + +// Boost sets the boost to apply to this query. +func (q *MatchQuery) Boost(boost float64) *MatchQuery { + q.boost = &boost + return q +} + +// Slop sets the phrase slop if evaluated to a phrase query type. +func (q *MatchQuery) Slop(slop int) *MatchQuery { + q.slop = &slop + return q +} + +// Fuzziness sets the fuzziness when evaluated to a fuzzy query type. +// Defaults to "AUTO". +func (q *MatchQuery) Fuzziness(fuzziness string) *MatchQuery { + q.fuzziness = fuzziness + return q +} + +func (q *MatchQuery) PrefixLength(prefixLength int) *MatchQuery { + q.prefixLength = &prefixLength + return q +} + +// MaxExpansions is used with fuzzy or prefix type queries. It specifies +// the number of term expansions to use. It defaults to unbounded so that +// its recommended to set it to a reasonable value for faster execution. +func (q *MatchQuery) MaxExpansions(maxExpansions int) *MatchQuery { + q.maxExpansions = &maxExpansions + return q +} + +// CutoffFrequency can be a value in [0..1] (or an absolute number >=1). +// It represents the maximum treshold of a terms document frequency to be +// considered a low frequency term. +func (q *MatchQuery) CutoffFrequency(cutoff float64) *MatchQuery { + q.cutoffFrequency = &cutoff + return q +} + +func (q *MatchQuery) MinimumShouldMatch(minimumShouldMatch string) *MatchQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +func (q *MatchQuery) Rewrite(rewrite string) *MatchQuery { + q.rewrite = rewrite + return q +} + +func (q *MatchQuery) FuzzyRewrite(fuzzyRewrite string) *MatchQuery { + q.fuzzyRewrite = fuzzyRewrite + return q +} + +func (q *MatchQuery) FuzzyTranspositions(fuzzyTranspositions bool) *MatchQuery { + q.fuzzyTranspositions = &fuzzyTranspositions + return q +} + +// Lenient specifies whether format based failures will be ignored. +func (q *MatchQuery) Lenient(lenient bool) *MatchQuery { + q.lenient = &lenient + return q +} + +// ZeroTermsQuery can be "all" or "none". +func (q *MatchQuery) ZeroTermsQuery(zeroTermsQuery string) *MatchQuery { + q.zeroTermsQuery = zeroTermsQuery + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched filters per hit. +func (q *MatchQuery) QueryName(queryName string) *MatchQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *MatchQuery) Source() (interface{}, error) { + // {"match":{"name":{"query":"value","type":"boolean/phrase"}}} + source := make(map[string]interface{}) + + match := make(map[string]interface{}) + source["match"] = match + + query := make(map[string]interface{}) + match[q.name] = query + + query["query"] = q.text + + if q.typ != "" { + query["type"] = q.typ + } + if q.operator != "" { + query["operator"] = q.operator + } + if q.analyzer != "" { + query["analyzer"] = q.analyzer + } + if q.boost != nil { + query["boost"] = *q.boost + } + if q.slop != nil { + query["slop"] = *q.slop + } + if q.fuzziness != "" { + query["fuzziness"] = q.fuzziness + } + if q.prefixLength != nil { + query["prefix_length"] = *q.prefixLength + } + if q.maxExpansions != nil { + query["max_expansions"] = *q.maxExpansions + } + if q.minimumShouldMatch != "" { + query["minimum_should_match"] = q.minimumShouldMatch + } + if q.rewrite != "" { + query["rewrite"] = q.rewrite + } + if q.fuzzyRewrite != "" { + query["fuzzy_rewrite"] = q.fuzzyRewrite + } + if q.lenient != nil { + query["lenient"] = *q.lenient + } + if q.fuzzyTranspositions != nil { + query["fuzzy_transpositions"] = *q.fuzzyTranspositions + } + if q.zeroTermsQuery != "" { + query["zero_terms_query"] = q.zeroTermsQuery + } + if q.cutoffFrequency != nil { + query["cutoff_frequency"] = q.cutoffFrequency + } + if q.queryName != "" { + query["_name"] = q.queryName + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_match_all.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_match_all.go new file mode 100644 index 000000000..5b5ca590e --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_match_all.go @@ -0,0 +1,41 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MatchAllQuery is the most simple query, which matches all documents, +// giving them all a _score of 1.0. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-match-all-query.html +type MatchAllQuery struct { + boost *float64 +} + +// NewMatchAllQuery creates and initializes a new match all query. +func NewMatchAllQuery() *MatchAllQuery { + return &MatchAllQuery{} +} + +// Boost sets the boost for this query. Documents matching this query will +// (in addition to the normal weightings) have their score multiplied by the +// boost provided. +func (q *MatchAllQuery) Boost(boost float64) *MatchAllQuery { + q.boost = &boost + return q +} + +// Source returns JSON for the function score query. +func (q MatchAllQuery) Source() (interface{}, error) { + // { + // "match_all" : { ... } + // } + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["match_all"] = params + if q.boost != nil { + params["boost"] = *q.boost + } + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_match_all_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_match_all_test.go new file mode 100644 index 000000000..0dcebb1f6 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_match_all_test.go @@ -0,0 +1,44 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMatchAllQuery(t *testing.T) { + q := NewMatchAllQuery() + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"match_all":{}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMatchAllQueryWithBoost(t *testing.T) { + q := NewMatchAllQuery().Boost(3.14) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"match_all":{"boost":3.14}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_match_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_match_test.go new file mode 100644 index 000000000..ade59351f --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_match_test.go @@ -0,0 +1,78 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMatchQuery(t *testing.T) { + q := NewMatchQuery("message", "this is a test") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"match":{"message":{"query":"this is a test"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMatchPhraseQuery(t *testing.T) { + q := NewMatchPhraseQuery("message", "this is a test") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"match":{"message":{"query":"this is a test","type":"phrase"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMatchPhrasePrefixQuery(t *testing.T) { + q := NewMatchPhrasePrefixQuery("message", "this is a test") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"match":{"message":{"query":"this is a test","type":"phrase_prefix"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMatchQueryWithOptions(t *testing.T) { + q := NewMatchQuery("message", "this is a test").Analyzer("whitespace").Operator("or").Boost(2.5) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"match":{"message":{"analyzer":"whitespace","boost":2.5,"operator":"or","query":"this is a test"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_missing.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_missing.go new file mode 100644 index 000000000..0fff3f55c --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_missing.go @@ -0,0 +1,67 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MissingQuery returns documents that have only null values or no value +// in the original field. +// +// For details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-missing-query.html +type MissingQuery struct { + name string + queryName string + nullValue *bool + existence *bool +} + +// NewMissingQuery creates and initializes a new MissingQuery. +func NewMissingQuery(name string) *MissingQuery { + return &MissingQuery{name: name} +} + +// QueryName sets the query name for the query that can be used when +// searching for matched filters hit. +func (q *MissingQuery) QueryName(queryName string) *MissingQuery { + q.queryName = queryName + return q +} + +// NullValue indicates whether the missing filter automatically includes +// fields with null value configured in the mappings. Defaults to false. +func (q *MissingQuery) NullValue(nullValue bool) *MissingQuery { + q.nullValue = &nullValue + return q +} + +// Existence indicates whether the missing filter includes documents where +// the field doesn't exist in the docs. +func (q *MissingQuery) Existence(existence bool) *MissingQuery { + q.existence = &existence + return q +} + +// Source returns JSON for the query. +func (q *MissingQuery) Source() (interface{}, error) { + // { + // "missing" : { + // "field" : "..." + // } + // } + + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["missing"] = params + params["field"] = q.name + if q.nullValue != nil { + params["null_value"] = *q.nullValue + } + if q.existence != nil { + params["existence"] = *q.existence + } + if q.queryName != "" { + params["_name"] = q.queryName + } + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_missing_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_missing_test.go new file mode 100644 index 000000000..096b0b3cd --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_missing_test.go @@ -0,0 +1,44 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMissingQuery(t *testing.T) { + q := NewMissingQuery("user") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"missing":{"field":"user"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMissingQueryWithParams(t *testing.T) { + q := NewMissingQuery("user").NullValue(true).Existence(true).QueryName("_my_query") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"missing":{"_name":"_my_query","existence":true,"field":"user","null_value":true}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_more_like_this.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_more_like_this.go new file mode 100644 index 000000000..afce3f05c --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_more_like_this.go @@ -0,0 +1,412 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// MoreLikeThis query (MLT Query) finds documents that are "like" a given +// set of documents. In order to do so, MLT selects a set of representative +// terms of these input documents, forms a query using these terms, executes +// the query and returns the results. The user controls the input documents, +// how the terms should be selected and how the query is formed. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-mlt-query.html +type MoreLikeThisQuery struct { + fields []string + docs []*MoreLikeThisQueryItem + unlikeDocs []*MoreLikeThisQueryItem + include *bool + minimumShouldMatch string + minTermFreq *int + maxQueryTerms *int + stopWords []string + minDocFreq *int + maxDocFreq *int + minWordLen *int + maxWordLen *int + boostTerms *float64 + boost *float64 + analyzer string + failOnUnsupportedField *bool + queryName string +} + +// NewMoreLikeThisQuery creates and initializes a new MoreLikeThisQuery. +func NewMoreLikeThisQuery() *MoreLikeThisQuery { + return &MoreLikeThisQuery{ + fields: make([]string, 0), + stopWords: make([]string, 0), + docs: make([]*MoreLikeThisQueryItem, 0), + unlikeDocs: make([]*MoreLikeThisQueryItem, 0), + } +} + +// Field adds one or more field names to the query. +func (q *MoreLikeThisQuery) Field(fields ...string) *MoreLikeThisQuery { + q.fields = append(q.fields, fields...) + return q +} + +// StopWord sets the stopwords. Any word in this set is considered +// "uninteresting" and ignored. Even if your Analyzer allows stopwords, +// you might want to tell the MoreLikeThis code to ignore them, as for +// the purposes of document similarity it seems reasonable to assume that +// "a stop word is never interesting". +func (q *MoreLikeThisQuery) StopWord(stopWords ...string) *MoreLikeThisQuery { + q.stopWords = append(q.stopWords, stopWords...) + return q +} + +// LikeText sets the text to use in order to find documents that are "like" this. +func (q *MoreLikeThisQuery) LikeText(likeTexts ...string) *MoreLikeThisQuery { + for _, s := range likeTexts { + item := NewMoreLikeThisQueryItem().LikeText(s) + q.docs = append(q.docs, item) + } + return q +} + +// LikeItems sets the documents to use in order to find documents that are "like" this. +func (q *MoreLikeThisQuery) LikeItems(docs ...*MoreLikeThisQueryItem) *MoreLikeThisQuery { + q.docs = append(q.docs, docs...) + return q +} + +// IgnoreLikeText sets the text from which the terms should not be selected from. +func (q *MoreLikeThisQuery) IgnoreLikeText(ignoreLikeText ...string) *MoreLikeThisQuery { + for _, s := range ignoreLikeText { + item := NewMoreLikeThisQueryItem().LikeText(s) + q.unlikeDocs = append(q.unlikeDocs, item) + } + return q +} + +// IgnoreLikeItems sets the documents from which the terms should not be selected from. +func (q *MoreLikeThisQuery) IgnoreLikeItems(ignoreDocs ...*MoreLikeThisQueryItem) *MoreLikeThisQuery { + q.unlikeDocs = append(q.unlikeDocs, ignoreDocs...) + return q +} + +// Ids sets the document ids to use in order to find documents that are "like" this. +func (q *MoreLikeThisQuery) Ids(ids ...string) *MoreLikeThisQuery { + for _, id := range ids { + item := NewMoreLikeThisQueryItem().Id(id) + q.docs = append(q.docs, item) + } + return q +} + +// Include specifies whether the input documents should also be included +// in the results returned. Defaults to false. +func (q *MoreLikeThisQuery) Include(include bool) *MoreLikeThisQuery { + q.include = &include + return q +} + +// MinimumShouldMatch sets the number of terms that must match the generated +// query expressed in the common syntax for minimum should match. +// The default value is "30%". +// +// This used to be "PercentTermsToMatch" in Elasticsearch versions before 2.0. +func (q *MoreLikeThisQuery) MinimumShouldMatch(minimumShouldMatch string) *MoreLikeThisQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +// MinTermFreq is the frequency below which terms will be ignored in the +// source doc. The default frequency is 2. +func (q *MoreLikeThisQuery) MinTermFreq(minTermFreq int) *MoreLikeThisQuery { + q.minTermFreq = &minTermFreq + return q +} + +// MaxQueryTerms sets the maximum number of query terms that will be included +// in any generated query. It defaults to 25. +func (q *MoreLikeThisQuery) MaxQueryTerms(maxQueryTerms int) *MoreLikeThisQuery { + q.maxQueryTerms = &maxQueryTerms + return q +} + +// MinDocFreq sets the frequency at which words will be ignored which do +// not occur in at least this many docs. The default is 5. +func (q *MoreLikeThisQuery) MinDocFreq(minDocFreq int) *MoreLikeThisQuery { + q.minDocFreq = &minDocFreq + return q +} + +// MaxDocFreq sets the maximum frequency for which words may still appear. +// Words that appear in more than this many docs will be ignored. +// It defaults to unbounded. +func (q *MoreLikeThisQuery) MaxDocFreq(maxDocFreq int) *MoreLikeThisQuery { + q.maxDocFreq = &maxDocFreq + return q +} + +// MinWordLength sets the minimum word length below which words will be +// ignored. It defaults to 0. +func (q *MoreLikeThisQuery) MinWordLen(minWordLen int) *MoreLikeThisQuery { + q.minWordLen = &minWordLen + return q +} + +// MaxWordLen sets the maximum word length above which words will be ignored. +// Defaults to unbounded (0). +func (q *MoreLikeThisQuery) MaxWordLen(maxWordLen int) *MoreLikeThisQuery { + q.maxWordLen = &maxWordLen + return q +} + +// BoostTerms sets the boost factor to use when boosting terms. +// It defaults to 1. +func (q *MoreLikeThisQuery) BoostTerms(boostTerms float64) *MoreLikeThisQuery { + q.boostTerms = &boostTerms + return q +} + +// Analyzer specifies the analyzer that will be use to analyze the text. +// Defaults to the analyzer associated with the field. +func (q *MoreLikeThisQuery) Analyzer(analyzer string) *MoreLikeThisQuery { + q.analyzer = analyzer + return q +} + +// Boost sets the boost for this query. +func (q *MoreLikeThisQuery) Boost(boost float64) *MoreLikeThisQuery { + q.boost = &boost + return q +} + +// FailOnUnsupportedField indicates whether to fail or return no result +// when this query is run against a field which is not supported such as +// a binary/numeric field. +func (q *MoreLikeThisQuery) FailOnUnsupportedField(fail bool) *MoreLikeThisQuery { + q.failOnUnsupportedField = &fail + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched_filters per hit. +func (q *MoreLikeThisQuery) QueryName(queryName string) *MoreLikeThisQuery { + q.queryName = queryName + return q +} + +// Source creates the source for the MLT query. +// It may return an error if the caller forgot to specify any documents to +// be "liked" in the MoreLikeThisQuery. +func (q *MoreLikeThisQuery) Source() (interface{}, error) { + // { + // "match_all" : { ... } + // } + if len(q.docs) == 0 { + return nil, errors.New(`more_like_this requires some documents to be "liked"`) + } + + source := make(map[string]interface{}) + + params := make(map[string]interface{}) + source["mlt"] = params + + if len(q.fields) > 0 { + params["fields"] = q.fields + } + + likes := make([]interface{}, 0) + for _, doc := range q.docs { + src, err := doc.Source() + if err != nil { + return nil, err + } + likes = append(likes, src) + } + params["like"] = likes + + if len(q.unlikeDocs) > 0 { + dontLikes := make([]interface{}, 0) + for _, doc := range q.unlikeDocs { + src, err := doc.Source() + if err != nil { + return nil, err + } + dontLikes = append(dontLikes, src) + } + params["unlike"] = dontLikes + } + + if q.minimumShouldMatch != "" { + params["minimum_should_match"] = q.minimumShouldMatch + } + if q.minTermFreq != nil { + params["min_term_freq"] = *q.minTermFreq + } + if q.maxQueryTerms != nil { + params["max_query_terms"] = *q.maxQueryTerms + } + if len(q.stopWords) > 0 { + params["stop_words"] = q.stopWords + } + if q.minDocFreq != nil { + params["min_doc_freq"] = *q.minDocFreq + } + if q.maxDocFreq != nil { + params["max_doc_freq"] = *q.maxDocFreq + } + if q.minWordLen != nil { + params["min_word_len"] = *q.minWordLen + } + if q.maxWordLen != nil { + params["max_word_len"] = *q.maxWordLen + } + if q.boostTerms != nil { + params["boost_terms"] = *q.boostTerms + } + if q.boost != nil { + params["boost"] = *q.boost + } + if q.analyzer != "" { + params["analyzer"] = q.analyzer + } + if q.failOnUnsupportedField != nil { + params["fail_on_unsupported_field"] = *q.failOnUnsupportedField + } + if q.queryName != "" { + params["_name"] = q.queryName + } + if q.include != nil { + params["include"] = *q.include + } + + return source, nil +} + +// -- MoreLikeThisQueryItem -- + +// MoreLikeThisQueryItem represents a single item of a MoreLikeThisQuery +// to be "liked" or "unliked". +type MoreLikeThisQueryItem struct { + likeText string + + index string + typ string + id string + doc interface{} + fields []string + routing string + fsc *FetchSourceContext + version int64 + versionType string +} + +// NewMoreLikeThisQueryItem creates and initializes a MoreLikeThisQueryItem. +func NewMoreLikeThisQueryItem() *MoreLikeThisQueryItem { + return &MoreLikeThisQueryItem{ + version: -1, + } +} + +// LikeText represents a text to be "liked". +func (item *MoreLikeThisQueryItem) LikeText(likeText string) *MoreLikeThisQueryItem { + item.likeText = likeText + return item +} + +// Index represents the index of the item. +func (item *MoreLikeThisQueryItem) Index(index string) *MoreLikeThisQueryItem { + item.index = index + return item +} + +// Type represents the document type of the item. +func (item *MoreLikeThisQueryItem) Type(typ string) *MoreLikeThisQueryItem { + item.typ = typ + return item +} + +// Id represents the document id of the item. +func (item *MoreLikeThisQueryItem) Id(id string) *MoreLikeThisQueryItem { + item.id = id + return item +} + +// Doc represents a raw document template for the item. +func (item *MoreLikeThisQueryItem) Doc(doc interface{}) *MoreLikeThisQueryItem { + item.doc = doc + return item +} + +// Fields represents the list of fields of the item. +func (item *MoreLikeThisQueryItem) Fields(fields ...string) *MoreLikeThisQueryItem { + item.fields = append(item.fields, fields...) + return item +} + +// Routing sets the routing associated with the item. +func (item *MoreLikeThisQueryItem) Routing(routing string) *MoreLikeThisQueryItem { + item.routing = routing + return item +} + +// FetchSourceContext represents the fetch source of the item which controls +// if and how _source should be returned. +func (item *MoreLikeThisQueryItem) FetchSourceContext(fsc *FetchSourceContext) *MoreLikeThisQueryItem { + item.fsc = fsc + return item +} + +// Version specifies the version of the item. +func (item *MoreLikeThisQueryItem) Version(version int64) *MoreLikeThisQueryItem { + item.version = version + return item +} + +// VersionType represents the version type of the item. +func (item *MoreLikeThisQueryItem) VersionType(versionType string) *MoreLikeThisQueryItem { + item.versionType = versionType + return item +} + +// Source returns the JSON-serializable fragment of the entity. +func (item *MoreLikeThisQueryItem) Source() (interface{}, error) { + if item.likeText != "" { + return item.likeText, nil + } + + source := make(map[string]interface{}) + + if item.index != "" { + source["_index"] = item.index + } + if item.typ != "" { + source["_type"] = item.typ + } + if item.id != "" { + source["_id"] = item.id + } + if item.doc != nil { + source["doc"] = item.doc + } + if len(item.fields) > 0 { + source["fields"] = item.fields + } + if item.routing != "" { + source["_routing"] = item.routing + } + if item.fsc != nil { + src, err := item.fsc.Source() + if err != nil { + return nil, err + } + source["_source"] = src + } + if item.version >= 0 { + source["_version"] = item.version + } + if item.versionType != "" { + source["_version_type"] = item.versionType + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_more_like_this_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_more_like_this_test.go new file mode 100644 index 000000000..64bfe4305 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_more_like_this_test.go @@ -0,0 +1,91 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMoreLikeThisQuerySourceWithLikeText(t *testing.T) { + q := NewMoreLikeThisQuery().LikeText("Golang topic").Field("message") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatal(err) + } + got := string(data) + expected := `{"mlt":{"fields":["message"],"like":["Golang topic"]}}` + if got != expected { + t.Fatalf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMoreLikeThisQuerySourceWithLikeAndUnlikeItems(t *testing.T) { + q := NewMoreLikeThisQuery() + q = q.LikeItems( + NewMoreLikeThisQueryItem().Id("1"), + NewMoreLikeThisQueryItem().Index(testIndexName2).Type("comment").Id("2").Routing("routing_id"), + ) + q = q.IgnoreLikeItems(NewMoreLikeThisQueryItem().Id("3")) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatal(err) + } + got := string(data) + expected := `{"mlt":{"like":[{"_id":"1"},{"_id":"2","_index":"elastic-test2","_routing":"routing_id","_type":"comment"}],"unlike":[{"_id":"3"}]}}` + if got != expected { + t.Fatalf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMoreLikeThisQuery(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another Golang topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Common query + mltq := NewMoreLikeThisQuery().LikeText("Golang topic").Field("message") + res, err := client.Search(). + Index(testIndexName). + Query(mltq). + Do() + if err != nil { + t.Fatal(err) + } + if res.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_multi_match.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_multi_match.go new file mode 100644 index 000000000..b9f74a0d3 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_multi_match.go @@ -0,0 +1,275 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "strings" +) + +// MultiMatchQuery builds on the MatchQuery to allow multi-field queries. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-multi-match-query.html +type MultiMatchQuery struct { + text interface{} + fields []string + fieldBoosts map[string]*float64 + typ string // best_fields, boolean, most_fields, cross_fields, phrase, phrase_prefix + operator string // AND or OR + analyzer string + boost *float64 + slop *int + fuzziness string + prefixLength *int + maxExpansions *int + minimumShouldMatch string + rewrite string + fuzzyRewrite string + tieBreaker *float64 + lenient *bool + cutoffFrequency *float64 + zeroTermsQuery string + queryName string +} + +// MultiMatchQuery creates and initializes a new MultiMatchQuery. +func NewMultiMatchQuery(text interface{}, fields ...string) *MultiMatchQuery { + q := &MultiMatchQuery{ + text: text, + fields: make([]string, 0), + fieldBoosts: make(map[string]*float64), + } + q.fields = append(q.fields, fields...) + return q +} + +// Field adds a field to run the multi match against. +func (q *MultiMatchQuery) Field(field string) *MultiMatchQuery { + q.fields = append(q.fields, field) + return q +} + +// FieldWithBoost adds a field to run the multi match against with a specific boost. +func (q *MultiMatchQuery) FieldWithBoost(field string, boost float64) *MultiMatchQuery { + q.fields = append(q.fields, field) + q.fieldBoosts[field] = &boost + return q +} + +// Type can be "best_fields", "boolean", "most_fields", "cross_fields", +// "phrase", or "phrase_prefix". +func (q *MultiMatchQuery) Type(typ string) *MultiMatchQuery { + var zero = float64(0.0) + var one = float64(1.0) + + switch strings.ToLower(typ) { + default: // best_fields / boolean + q.typ = "best_fields" + q.tieBreaker = &zero + case "most_fields": + q.typ = "most_fields" + q.tieBreaker = &one + case "cross_fields": + q.typ = "cross_fields" + q.tieBreaker = &zero + case "phrase": + q.typ = "phrase" + q.tieBreaker = &zero + case "phrase_prefix": + q.typ = "phrase_prefix" + q.tieBreaker = &zero + } + return q +} + +// Operator sets the operator to use when using boolean query. +// It can be either AND or OR (default). +func (q *MultiMatchQuery) Operator(operator string) *MultiMatchQuery { + q.operator = operator + return q +} + +// Analyzer sets the analyzer to use explicitly. It defaults to use explicit +// mapping config for the field, or, if not set, the default search analyzer. +func (q *MultiMatchQuery) Analyzer(analyzer string) *MultiMatchQuery { + q.analyzer = analyzer + return q +} + +// Boost sets the boost for this query. +func (q *MultiMatchQuery) Boost(boost float64) *MultiMatchQuery { + q.boost = &boost + return q +} + +// Slop sets the phrase slop if evaluated to a phrase query type. +func (q *MultiMatchQuery) Slop(slop int) *MultiMatchQuery { + q.slop = &slop + return q +} + +// Fuzziness sets the fuzziness used when evaluated to a fuzzy query type. +// It defaults to "AUTO". +func (q *MultiMatchQuery) Fuzziness(fuzziness string) *MultiMatchQuery { + q.fuzziness = fuzziness + return q +} + +// PrefixLength for the fuzzy process. +func (q *MultiMatchQuery) PrefixLength(prefixLength int) *MultiMatchQuery { + q.prefixLength = &prefixLength + return q +} + +// MaxExpansions is the number of term expansions to use when using fuzzy +// or prefix type query. It defaults to unbounded so it's recommended +// to set it to a reasonable value for faster execution. +func (q *MultiMatchQuery) MaxExpansions(maxExpansions int) *MultiMatchQuery { + q.maxExpansions = &maxExpansions + return q +} + +// MinimumShouldMatch represents the minimum number of optional should clauses +// to match. +func (q *MultiMatchQuery) MinimumShouldMatch(minimumShouldMatch string) *MultiMatchQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +func (q *MultiMatchQuery) Rewrite(rewrite string) *MultiMatchQuery { + q.rewrite = rewrite + return q +} + +func (q *MultiMatchQuery) FuzzyRewrite(fuzzyRewrite string) *MultiMatchQuery { + q.fuzzyRewrite = fuzzyRewrite + return q +} + +// TieBreaker for "best-match" disjunction queries (OR queries). +// The tie breaker capability allows documents that match more than one +// query clause (in this case on more than one field) to be scored better +// than documents that match only the best of the fields, without confusing +// this with the better case of two distinct matches in the multiple fields. +// +// A tie-breaker value of 1.0 is interpreted as a signal to score queries as +// "most-match" queries where all matching query clauses are considered for scoring. +func (q *MultiMatchQuery) TieBreaker(tieBreaker float64) *MultiMatchQuery { + q.tieBreaker = &tieBreaker + return q +} + +// Lenient indicates whether format based failures will be ignored. +func (q *MultiMatchQuery) Lenient(lenient bool) *MultiMatchQuery { + q.lenient = &lenient + return q +} + +// CutoffFrequency sets a cutoff value in [0..1] (or absolute number >=1) +// representing the maximum threshold of a terms document frequency to be +// considered a low frequency term. +func (q *MultiMatchQuery) CutoffFrequency(cutoff float64) *MultiMatchQuery { + q.cutoffFrequency = &cutoff + return q +} + +// ZeroTermsQuery can be "all" or "none". +func (q *MultiMatchQuery) ZeroTermsQuery(zeroTermsQuery string) *MultiMatchQuery { + q.zeroTermsQuery = zeroTermsQuery + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched filters per hit. +func (q *MultiMatchQuery) QueryName(queryName string) *MultiMatchQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the query. +func (q *MultiMatchQuery) Source() (interface{}, error) { + // + // { + // "multi_match" : { + // "query" : "this is a test", + // "fields" : [ "subject", "message" ] + // } + // } + + source := make(map[string]interface{}) + + multiMatch := make(map[string]interface{}) + source["multi_match"] = multiMatch + + multiMatch["query"] = q.text + + if len(q.fields) > 0 { + fields := make([]string, 0) + for _, field := range q.fields { + if boost, found := q.fieldBoosts[field]; found { + if boost != nil { + fields = append(fields, fmt.Sprintf("%s^%f", field, *boost)) + } else { + fields = append(fields, field) + } + } else { + fields = append(fields, field) + } + } + multiMatch["fields"] = fields + } + + if q.typ != "" { + multiMatch["type"] = q.typ + } + + if q.operator != "" { + multiMatch["operator"] = q.operator + } + if q.analyzer != "" { + multiMatch["analyzer"] = q.analyzer + } + if q.boost != nil { + multiMatch["boost"] = *q.boost + } + if q.slop != nil { + multiMatch["slop"] = *q.slop + } + if q.fuzziness != "" { + multiMatch["fuzziness"] = q.fuzziness + } + if q.prefixLength != nil { + multiMatch["prefix_length"] = *q.prefixLength + } + if q.maxExpansions != nil { + multiMatch["max_expansions"] = *q.maxExpansions + } + if q.minimumShouldMatch != "" { + multiMatch["minimum_should_match"] = q.minimumShouldMatch + } + if q.rewrite != "" { + multiMatch["rewrite"] = q.rewrite + } + if q.fuzzyRewrite != "" { + multiMatch["fuzzy_rewrite"] = q.fuzzyRewrite + } + if q.tieBreaker != nil { + multiMatch["tie_breaker"] = *q.tieBreaker + } + if q.lenient != nil { + multiMatch["lenient"] = *q.lenient + } + if q.cutoffFrequency != nil { + multiMatch["cutoff_frequency"] = *q.cutoffFrequency + } + if q.zeroTermsQuery != "" { + multiMatch["zero_terms_query"] = q.zeroTermsQuery + } + if q.queryName != "" { + multiMatch["_name"] = q.queryName + } + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_multi_match_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_multi_match_test.go new file mode 100644 index 000000000..508726bed --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_multi_match_test.go @@ -0,0 +1,131 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestMultiMatchQuery(t *testing.T) { + q := NewMultiMatchQuery("this is a test", "subject", "message") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMultiMatchQueryBestFields(t *testing.T) { + q := NewMultiMatchQuery("this is a test", "subject", "message").Type("best_fields") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"best_fields"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMultiMatchQueryMostFields(t *testing.T) { + q := NewMultiMatchQuery("this is a test", "subject", "message").Type("most_fields") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":1,"type":"most_fields"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMultiMatchQueryCrossFields(t *testing.T) { + q := NewMultiMatchQuery("this is a test", "subject", "message").Type("cross_fields") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"cross_fields"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMultiMatchQueryPhrase(t *testing.T) { + q := NewMultiMatchQuery("this is a test", "subject", "message").Type("phrase") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"phrase"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMultiMatchQueryPhrasePrefix(t *testing.T) { + q := NewMultiMatchQuery("this is a test", "subject", "message").Type("phrase_prefix") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"phrase_prefix"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestMultiMatchQueryBestFieldsWithCustomTieBreaker(t *testing.T) { + q := NewMultiMatchQuery("this is a test", "subject", "message"). + Type("best_fields"). + TieBreaker(0.3) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0.3,"type":"best_fields"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_nested.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_nested.go new file mode 100644 index 000000000..0a598f8bf --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_nested.go @@ -0,0 +1,85 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// NestedQuery allows to query nested objects / docs. +// The query is executed against the nested objects / docs as if they were +// indexed as separate docs (they are, internally) and resulting in the +// root parent doc (or parent nested mapping). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-nested-query.html +type NestedQuery struct { + query Query + path string + scoreMode string + boost *float64 + queryName string + innerHit *InnerHit +} + +// NewNestedQuery creates and initializes a new NestedQuery. +func NewNestedQuery(path string, query Query) *NestedQuery { + return &NestedQuery{path: path, query: query} +} + +// ScoreMode specifies the score mode. +func (q *NestedQuery) ScoreMode(scoreMode string) *NestedQuery { + q.scoreMode = scoreMode + return q +} + +// Boost sets the boost for this query. +func (q *NestedQuery) Boost(boost float64) *NestedQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *NestedQuery) QueryName(queryName string) *NestedQuery { + q.queryName = queryName + return q +} + +// InnerHit sets the inner hit definition in the scope of this nested query +// and reusing the defined path and query. +func (q *NestedQuery) InnerHit(innerHit *InnerHit) *NestedQuery { + q.innerHit = innerHit + return q +} + +// Source returns JSON for the query. +func (q *NestedQuery) Source() (interface{}, error) { + query := make(map[string]interface{}) + nq := make(map[string]interface{}) + query["nested"] = nq + + src, err := q.query.Source() + if err != nil { + return nil, err + } + nq["query"] = src + + nq["path"] = q.path + + if q.scoreMode != "" { + nq["score_mode"] = q.scoreMode + } + if q.boost != nil { + nq["boost"] = *q.boost + } + if q.queryName != "" { + nq["_name"] = q.queryName + } + if q.innerHit != nil { + src, err := q.innerHit.Source() + if err != nil { + return nil, err + } + nq["inner_hits"] = src + } + return query, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_nested_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_nested_test.go new file mode 100644 index 000000000..b068c59b1 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_nested_test.go @@ -0,0 +1,52 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestNestedQuery(t *testing.T) { + bq := NewBoolQuery() + bq = bq.Must(NewTermQuery("obj1.name", "blue")) + bq = bq.Must(NewRangeQuery("obj1.count").Gt(5)) + q := NewNestedQuery("obj1", bq).QueryName("qname") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"nested":{"_name":"qname","path":"obj1","query":{"bool":{"must":[{"term":{"obj1.name":"blue"}},{"range":{"obj1.count":{"from":5,"include_lower":false,"include_upper":true,"to":null}}}]}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestNestedQueryWithInnerHit(t *testing.T) { + bq := NewBoolQuery() + bq = bq.Must(NewTermQuery("obj1.name", "blue")) + bq = bq.Must(NewRangeQuery("obj1.count").Gt(5)) + q := NewNestedQuery("obj1", bq) + q = q.QueryName("qname") + q = q.InnerHit(NewInnerHit().Name("comments").Query(NewTermQuery("user", "olivere"))) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"nested":{"_name":"qname","inner_hits":{"name":"comments","query":{"term":{"user":"olivere"}}},"path":"obj1","query":{"bool":{"must":[{"term":{"obj1.name":"blue"}},{"range":{"obj1.count":{"from":5,"include_lower":false,"include_upper":true,"to":null}}}]}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_not.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_not.go new file mode 100644 index 000000000..7a1ee8e08 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_not.go @@ -0,0 +1,45 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// NotQuery filters out matched documents using a query. +// +// For details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-not-query.html +type NotQuery struct { + filter Query + queryName string +} + +// NewNotQuery creates and initializes a new NotQuery. +func NewNotQuery(filter Query) *NotQuery { + return &NotQuery{ + filter: filter, + } +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *NotQuery) QueryName(queryName string) *NotQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the query. +func (q *NotQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["not"] = params + + src, err := q.filter.Source() + if err != nil { + return nil, err + } + params["query"] = src + if q.queryName != "" { + params["_name"] = q.queryName + } + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_not_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_not_test.go new file mode 100644 index 000000000..4c4f1c0ab --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_not_test.go @@ -0,0 +1,46 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestNotQuery(t *testing.T) { + f := NewNotQuery(NewTermQuery("user", "olivere")) + src, err := f.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"not":{"query":{"term":{"user":"olivere"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestNotQueryWithParams(t *testing.T) { + postDateFilter := NewRangeQuery("postDate").From("2010-03-01").To("2010-04-01") + f := NewNotQuery(postDateFilter) + f = f.QueryName("MyQueryName") + src, err := f.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"not":{"_name":"MyQueryName","query":{"range":{"postDate":{"from":"2010-03-01","include_lower":true,"include_upper":true,"to":"2010-04-01"}}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_prefix.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_prefix.go new file mode 100644 index 000000000..1628ba8cc --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_prefix.go @@ -0,0 +1,67 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// PrefixQuery matches documents that have fields containing terms +// with a specified prefix (not analyzed). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-prefix-query.html +type PrefixQuery struct { + name string + prefix string + boost *float64 + rewrite string + queryName string +} + +// NewPrefixQuery creates and initializes a new PrefixQuery. +func NewPrefixQuery(name string, prefix string) *PrefixQuery { + return &PrefixQuery{name: name, prefix: prefix} +} + +// Boost sets the boost for this query. +func (q *PrefixQuery) Boost(boost float64) *PrefixQuery { + q.boost = &boost + return q +} + +func (q *PrefixQuery) Rewrite(rewrite string) *PrefixQuery { + q.rewrite = rewrite + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched_filters per hit. +func (q *PrefixQuery) QueryName(queryName string) *PrefixQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the query. +func (q *PrefixQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["prefix"] = query + + if q.boost == nil && q.rewrite == "" && q.queryName == "" { + query[q.name] = q.prefix + } else { + subQuery := make(map[string]interface{}) + subQuery["prefix"] = q.prefix + if q.boost != nil { + subQuery["boost"] = *q.boost + } + if q.rewrite != "" { + subQuery["rewrite"] = q.rewrite + } + if q.queryName != "" { + subQuery["_name"] = q.queryName + } + query[q.name] = subQuery + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_prefix_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_prefix_test.go new file mode 100644 index 000000000..ce1b74e41 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_prefix_test.go @@ -0,0 +1,45 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestPrefixQuery(t *testing.T) { + q := NewPrefixQuery("user", "ki") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"prefix":{"user":"ki"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPrefixQueryWithOptions(t *testing.T) { + q := NewPrefixQuery("user", "ki") + q = q.QueryName("my_query_name") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"prefix":{"user":{"_name":"my_query_name","prefix":"ki"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_query_string.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_query_string.go new file mode 100644 index 000000000..53e4f344f --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_query_string.go @@ -0,0 +1,349 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" +) + +// QueryStringQuery uses the query parser in order to parse its content. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html +type QueryStringQuery struct { + queryString string + defaultField string + defaultOperator string + analyzer string + quoteAnalyzer string + quoteFieldSuffix string + autoGeneratePhraseQueries *bool + allowLeadingWildcard *bool + lowercaseExpandedTerms *bool + enablePositionIncrements *bool + analyzeWildcard *bool + locale string + boost *float64 + fuzziness string + fuzzyPrefixLength *int + fuzzyMaxExpansions *int + fuzzyRewrite string + phraseSlop *int + fields []string + fieldBoosts map[string]*float64 + useDisMax *bool + tieBreaker *float64 + rewrite string + minimumShouldMatch string + lenient *bool + queryName string + timeZone string + maxDeterminizedStates *int +} + +// NewQueryStringQuery creates and initializes a new QueryStringQuery. +func NewQueryStringQuery(queryString string) *QueryStringQuery { + return &QueryStringQuery{ + queryString: queryString, + fields: make([]string, 0), + fieldBoosts: make(map[string]*float64), + } +} + +// DefaultField specifies the field to run against when no prefix field +// is specified. Only relevant when not explicitly adding fields the query +// string will run against. +func (q *QueryStringQuery) DefaultField(defaultField string) *QueryStringQuery { + q.defaultField = defaultField + return q +} + +// Field adds a field to run the query string against. +func (q *QueryStringQuery) Field(field string) *QueryStringQuery { + q.fields = append(q.fields, field) + return q +} + +// FieldWithBoost adds a field to run the query string against with a specific boost. +func (q *QueryStringQuery) FieldWithBoost(field string, boost float64) *QueryStringQuery { + q.fields = append(q.fields, field) + q.fieldBoosts[field] = &boost + return q +} + +// UseDisMax specifies whether to combine queries using dis max or boolean +// query when more zhan one field is used with the query string. Defaults +// to dismax (true). +func (q *QueryStringQuery) UseDisMax(useDisMax bool) *QueryStringQuery { + q.useDisMax = &useDisMax + return q +} + +// TieBreaker is used when more than one field is used with the query string, +// and combined queries are using dismax. +func (q *QueryStringQuery) TieBreaker(tieBreaker float64) *QueryStringQuery { + q.tieBreaker = &tieBreaker + return q +} + +// DefaultOperator sets the boolean operator of the query parser used to +// parse the query string. +// +// In default mode (OR) terms without any modifiers +// are considered optional, e.g. "capital of Hungary" is equal to +// "capital OR of OR Hungary". +// +// In AND mode, terms are considered to be in conjunction. The above mentioned +// query is then parsed as "capital AND of AND Hungary". +func (q *QueryStringQuery) DefaultOperator(operator string) *QueryStringQuery { + q.defaultOperator = operator + return q +} + +// Analyzer is an optional analyzer used to analyze the query string. +// Note, if a field has search analyzer defined for it, then it will be used +// automatically. Defaults to the smart search analyzer. +func (q *QueryStringQuery) Analyzer(analyzer string) *QueryStringQuery { + q.analyzer = analyzer + return q +} + +// QuoteAnalyzer is an optional analyzer to be used to analyze the query string +// for phrase searches. Note, if a field has search analyzer defined for it, +// then it will be used automatically. Defaults to the smart search analyzer. +func (q *QueryStringQuery) QuoteAnalyzer(quoteAnalyzer string) *QueryStringQuery { + q.quoteAnalyzer = quoteAnalyzer + return q +} + +// AutoGeneratePhraseQueries indicates whether or not phrase queries will +// be automatically generated when the analyzer returns more then one term +// from whitespace delimited text. Set to false if phrase queries should only +// be generated when surrounded by double quotes. +func (q *QueryStringQuery) AutoGeneratePhraseQueries(autoGeneratePhraseQueries bool) *QueryStringQuery { + q.autoGeneratePhraseQueries = &autoGeneratePhraseQueries + return q +} + +// MaxDeterminizedState protects against too-difficult regular expression queries. +func (q *QueryStringQuery) MaxDeterminizedState(maxDeterminizedStates int) *QueryStringQuery { + q.maxDeterminizedStates = &maxDeterminizedStates + return q +} + +// AllowLeadingWildcard specifies whether leading wildcards should be allowed +// or not (defaults to true). +func (q *QueryStringQuery) AllowLeadingWildcard(allowLeadingWildcard bool) *QueryStringQuery { + q.allowLeadingWildcard = &allowLeadingWildcard + return q +} + +// LowercaseExpandedTerms indicates whether terms of wildcard, prefix, fuzzy +// and range queries are automatically lower-cased or not. Default is true. +func (q *QueryStringQuery) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *QueryStringQuery { + q.lowercaseExpandedTerms = &lowercaseExpandedTerms + return q +} + +// EnablePositionIncrements indicates whether to enable position increments +// in result query. Defaults to true. +// +// When set, result phrase and multi-phrase queries will be aware of position +// increments. Useful when e.g. a StopFilter increases the position increment +// of the token that follows an omitted token. +func (q *QueryStringQuery) EnablePositionIncrements(enablePositionIncrements bool) *QueryStringQuery { + q.enablePositionIncrements = &enablePositionIncrements + return q +} + +// Fuzziness sets the edit distance for fuzzy queries. Default is "AUTO". +func (q *QueryStringQuery) Fuzziness(fuzziness string) *QueryStringQuery { + q.fuzziness = fuzziness + return q +} + +// FuzzyPrefixLength sets the minimum prefix length for fuzzy queries. +// Default is 1. +func (q *QueryStringQuery) FuzzyPrefixLength(fuzzyPrefixLength int) *QueryStringQuery { + q.fuzzyPrefixLength = &fuzzyPrefixLength + return q +} + +func (q *QueryStringQuery) FuzzyMaxExpansions(fuzzyMaxExpansions int) *QueryStringQuery { + q.fuzzyMaxExpansions = &fuzzyMaxExpansions + return q +} + +func (q *QueryStringQuery) FuzzyRewrite(fuzzyRewrite string) *QueryStringQuery { + q.fuzzyRewrite = fuzzyRewrite + return q +} + +// PhraseSlop sets the default slop for phrases. If zero, then exact matches +// are required. Default value is zero. +func (q *QueryStringQuery) PhraseSlop(phraseSlop int) *QueryStringQuery { + q.phraseSlop = &phraseSlop + return q +} + +// AnalyzeWildcard indicates whether to enabled analysis on wildcard and prefix queries. +func (q *QueryStringQuery) AnalyzeWildcard(analyzeWildcard bool) *QueryStringQuery { + q.analyzeWildcard = &analyzeWildcard + return q +} + +func (q *QueryStringQuery) Rewrite(rewrite string) *QueryStringQuery { + q.rewrite = rewrite + return q +} + +func (q *QueryStringQuery) MinimumShouldMatch(minimumShouldMatch string) *QueryStringQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +// Boost sets the boost for this query. +func (q *QueryStringQuery) Boost(boost float64) *QueryStringQuery { + q.boost = &boost + return q +} + +// QuoteFieldSuffix is an optional field name suffix to automatically +// try and add to the field searched when using quoted text. +func (q *QueryStringQuery) QuoteFieldSuffix(quoteFieldSuffix string) *QueryStringQuery { + q.quoteFieldSuffix = quoteFieldSuffix + return q +} + +// Lenient indicates whether the query string parser should be lenient +// when parsing field values. It defaults to the index setting and if not +// set, defaults to false. +func (q *QueryStringQuery) Lenient(lenient bool) *QueryStringQuery { + q.lenient = &lenient + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched_filters per hit. +func (q *QueryStringQuery) QueryName(queryName string) *QueryStringQuery { + q.queryName = queryName + return q +} + +func (q *QueryStringQuery) Locale(locale string) *QueryStringQuery { + q.locale = locale + return q +} + +// TimeZone can be used to automatically adjust to/from fields using a +// timezone. Only used with date fields, of course. +func (q *QueryStringQuery) TimeZone(timeZone string) *QueryStringQuery { + q.timeZone = timeZone + return q +} + +// Source returns JSON for the query. +func (q *QueryStringQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["query_string"] = query + + query["query"] = q.queryString + + if q.defaultField != "" { + query["default_field"] = q.defaultField + } + + if len(q.fields) > 0 { + fields := make([]string, 0) + for _, field := range q.fields { + if boost, found := q.fieldBoosts[field]; found { + if boost != nil { + fields = append(fields, fmt.Sprintf("%s^%f", field, *boost)) + } else { + fields = append(fields, field) + } + } else { + fields = append(fields, field) + } + } + query["fields"] = fields + } + + if q.tieBreaker != nil { + query["tie_breaker"] = *q.tieBreaker + } + if q.useDisMax != nil { + query["use_dis_max"] = *q.useDisMax + } + if q.defaultOperator != "" { + query["default_operator"] = q.defaultOperator + } + if q.analyzer != "" { + query["analyzer"] = q.analyzer + } + if q.quoteAnalyzer != "" { + query["quote_analyzer"] = q.quoteAnalyzer + } + if q.autoGeneratePhraseQueries != nil { + query["auto_generate_phrase_queries"] = *q.autoGeneratePhraseQueries + } + if q.maxDeterminizedStates != nil { + query["max_determinized_states"] = *q.maxDeterminizedStates + } + if q.allowLeadingWildcard != nil { + query["allow_leading_wildcard"] = *q.allowLeadingWildcard + } + if q.lowercaseExpandedTerms != nil { + query["lowercase_expanded_terms"] = *q.lowercaseExpandedTerms + } + if q.enablePositionIncrements != nil { + query["enable_position_increments"] = *q.enablePositionIncrements + } + if q.fuzziness != "" { + query["fuzziness"] = q.fuzziness + } + if q.boost != nil { + query["boost"] = *q.boost + } + if q.fuzzyPrefixLength != nil { + query["fuzzy_prefix_length"] = *q.fuzzyPrefixLength + } + if q.fuzzyMaxExpansions != nil { + query["fuzzy_max_expansions"] = *q.fuzzyMaxExpansions + } + if q.fuzzyRewrite != "" { + query["fuzzy_rewrite"] = q.fuzzyRewrite + } + if q.phraseSlop != nil { + query["phrase_slop"] = *q.phraseSlop + } + if q.analyzeWildcard != nil { + query["analyze_wildcard"] = *q.analyzeWildcard + } + if q.rewrite != "" { + query["rewrite"] = q.rewrite + } + if q.minimumShouldMatch != "" { + query["minimum_should_match"] = q.minimumShouldMatch + } + if q.quoteFieldSuffix != "" { + query["quote_field_suffix"] = q.quoteFieldSuffix + } + if q.lenient != nil { + query["lenient"] = *q.lenient + } + if q.queryName != "" { + query["_name"] = q.queryName + } + if q.locale != "" { + query["locale"] = q.locale + } + if q.timeZone != "" { + query["time_zone"] = q.timeZone + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_query_string_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_query_string_test.go new file mode 100644 index 000000000..4d766124a --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_query_string_test.go @@ -0,0 +1,28 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestQueryStringQuery(t *testing.T) { + q := NewQueryStringQuery(`this AND that OR thus`) + q = q.DefaultField("content") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"query_string":{"default_field":"content","query":"this AND that OR thus"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_range.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_range.go new file mode 100644 index 000000000..f688c25bd --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_range.go @@ -0,0 +1,145 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// RangeQuery matches documents with fields that have terms within a certain range. +// +// For details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-range-query.html +type RangeQuery struct { + name string + from interface{} + to interface{} + timeZone string + includeLower bool + includeUpper bool + boost *float64 + queryName string + format string +} + +// NewRangeQuery creates and initializes a new RangeQuery. +func NewRangeQuery(name string) *RangeQuery { + return &RangeQuery{name: name, includeLower: true, includeUpper: true} +} + +// From indicates the from part of the RangeQuery. +// Use nil to indicate an unbounded from part. +func (q *RangeQuery) From(from interface{}) *RangeQuery { + q.from = from + return q +} + +// Gt indicates a greater-than value for the from part. +// Use nil to indicate an unbounded from part. +func (q *RangeQuery) Gt(from interface{}) *RangeQuery { + q.from = from + q.includeLower = false + return q +} + +// Gte indicates a greater-than-or-equal value for the from part. +// Use nil to indicate an unbounded from part. +func (q *RangeQuery) Gte(from interface{}) *RangeQuery { + q.from = from + q.includeLower = true + return q +} + +// To indicates the to part of the RangeQuery. +// Use nil to indicate an unbounded to part. +func (q *RangeQuery) To(to interface{}) *RangeQuery { + q.to = to + return q +} + +// Lt indicates a less-than value for the to part. +// Use nil to indicate an unbounded to part. +func (q *RangeQuery) Lt(to interface{}) *RangeQuery { + q.to = to + q.includeUpper = false + return q +} + +// Lte indicates a less-than-or-equal value for the to part. +// Use nil to indicate an unbounded to part. +func (q *RangeQuery) Lte(to interface{}) *RangeQuery { + q.to = to + q.includeUpper = true + return q +} + +// IncludeLower indicates whether the lower bound should be included or not. +// Defaults to true. +func (q *RangeQuery) IncludeLower(includeLower bool) *RangeQuery { + q.includeLower = includeLower + return q +} + +// IncludeUpper indicates whether the upper bound should be included or not. +// Defaults to true. +func (q *RangeQuery) IncludeUpper(includeUpper bool) *RangeQuery { + q.includeUpper = includeUpper + return q +} + +// Boost sets the boost for this query. +func (q *RangeQuery) Boost(boost float64) *RangeQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched_filters per hit. +func (q *RangeQuery) QueryName(queryName string) *RangeQuery { + q.queryName = queryName + return q +} + +// TimeZone is used for date fields. In that case, we can adjust the +// from/to fields using a timezone. +func (q *RangeQuery) TimeZone(timeZone string) *RangeQuery { + q.timeZone = timeZone + return q +} + +// Format is used for date fields. In that case, we can set the format +// to be used instead of the mapper format. +func (q *RangeQuery) Format(format string) *RangeQuery { + q.format = format + return q +} + +// Source returns JSON for the query. +func (q *RangeQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + + rangeQ := make(map[string]interface{}) + source["range"] = rangeQ + + params := make(map[string]interface{}) + rangeQ[q.name] = params + + params["from"] = q.from + params["to"] = q.to + if q.timeZone != "" { + params["time_zone"] = q.timeZone + } + if q.format != "" { + params["format"] = q.format + } + params["include_lower"] = q.includeLower + params["include_upper"] = q.includeUpper + + if q.boost != nil { + rangeQ["boost"] = *q.boost + } + + if q.queryName != "" { + rangeQ["_name"] = q.queryName + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_range_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_range_test.go new file mode 100644 index 000000000..126bb16f2 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_range_test.go @@ -0,0 +1,68 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestRangeQuery(t *testing.T) { + q := NewRangeQuery("postDate").From("2010-03-01").To("2010-04-01") + q = q.QueryName("my_query") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"range":{"_name":"my_query","postDate":{"from":"2010-03-01","include_lower":true,"include_upper":true,"to":"2010-04-01"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestRangeQueryWithTimeZone(t *testing.T) { + q := NewRangeQuery("born"). + Gte("2012-01-01"). + Lte("now"). + TimeZone("+1:00") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"range":{"born":{"from":"2012-01-01","include_lower":true,"include_upper":true,"time_zone":"+1:00","to":"now"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestRangeQueryWithFormat(t *testing.T) { + q := NewRangeQuery("born"). + Gte("2012/01/01"). + Lte("now"). + Format("yyyy/MM/dd") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"range":{"born":{"format":"yyyy/MM/dd","from":"2012/01/01","include_lower":true,"include_upper":true,"to":"now"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_regexp.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_regexp.go new file mode 100644 index 000000000..ecd9f7fe0 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_regexp.go @@ -0,0 +1,82 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// RegexpQuery allows you to use regular expression term queries. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-regexp-query.html +type RegexpQuery struct { + name string + regexp string + flags string + boost *float64 + rewrite string + queryName string + maxDeterminizedStates *int +} + +// NewRegexpQuery creates and initializes a new RegexpQuery. +func NewRegexpQuery(name string, regexp string) *RegexpQuery { + return &RegexpQuery{name: name, regexp: regexp} +} + +// Flags sets the regexp flags. +func (q *RegexpQuery) Flags(flags string) *RegexpQuery { + q.flags = flags + return q +} + +// MaxDeterminizedStates protects against complex regular expressions. +func (q *RegexpQuery) MaxDeterminizedStates(maxDeterminizedStates int) *RegexpQuery { + q.maxDeterminizedStates = &maxDeterminizedStates + return q +} + +// Boost sets the boost for this query. +func (q *RegexpQuery) Boost(boost float64) *RegexpQuery { + q.boost = &boost + return q +} + +func (q *RegexpQuery) Rewrite(rewrite string) *RegexpQuery { + q.rewrite = rewrite + return q +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *RegexpQuery) QueryName(queryName string) *RegexpQuery { + q.queryName = queryName + return q +} + +// Source returns the JSON-serializable query data. +func (q *RegexpQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["regexp"] = query + + x := make(map[string]interface{}) + x["value"] = q.regexp + if q.flags != "" { + x["flags"] = q.flags + } + if q.maxDeterminizedStates != nil { + x["max_determinized_states"] = *q.maxDeterminizedStates + } + if q.boost != nil { + x["boost"] = *q.boost + } + if q.rewrite != "" { + x["rewrite"] = q.rewrite + } + if q.queryName != "" { + x["name"] = q.queryName + } + query[q.name] = x + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_regexp_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_regexp_test.go new file mode 100644 index 000000000..f4dc2355b --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_regexp_test.go @@ -0,0 +1,47 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestRegexpQuery(t *testing.T) { + q := NewRegexpQuery("name.first", "s.*y") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"regexp":{"name.first":{"value":"s.*y"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestRegexpQueryWithOptions(t *testing.T) { + q := NewRegexpQuery("name.first", "s.*y"). + Boost(1.2). + Flags("INTERSECTION|COMPLEMENT|EMPTY"). + QueryName("my_query_name") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"regexp":{"name.first":{"boost":1.2,"flags":"INTERSECTION|COMPLEMENT|EMPTY","name":"my_query_name","value":"s.*y"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_script.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_script.go new file mode 100644 index 000000000..3baa90574 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_script.go @@ -0,0 +1,51 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// ScriptQuery allows to define scripts as filters. +// +// For details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-script-query.html +type ScriptQuery struct { + script *Script + queryName string +} + +// NewScriptQuery creates and initializes a new ScriptQuery. +func NewScriptQuery(script *Script) *ScriptQuery { + return &ScriptQuery{ + script: script, + } +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *ScriptQuery) QueryName(queryName string) *ScriptQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the query. +func (q *ScriptQuery) Source() (interface{}, error) { + if q.script == nil { + return nil, errors.New("ScriptQuery expected a script") + } + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["script"] = params + + src, err := q.script.Source() + if err != nil { + return nil, err + } + params["script"] = src + + if q.queryName != "" { + params["_name"] = q.queryName + } + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_script_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_script_test.go new file mode 100644 index 000000000..e10510c10 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_script_test.go @@ -0,0 +1,45 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestScriptQuery(t *testing.T) { + q := NewScriptQuery(NewScript("doc['num1'.value > 1")) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"script":{"script":"doc['num1'.value \u003e 1"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestScriptQueryWithParams(t *testing.T) { + q := NewScriptQuery(NewScript("doc['num1'.value > 1")) + q = q.QueryName("MyQueryName") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"script":{"_name":"MyQueryName","script":"doc['num1'.value \u003e 1"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_simple_query_string.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_simple_query_string.go new file mode 100644 index 000000000..fb0a2a9b9 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_simple_query_string.go @@ -0,0 +1,185 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "strings" +) + +// SimpleQueryStringQuery is a query that uses the SimpleQueryParser +// to parse its context. Unlike the regular query_string query, +// the simple_query_string query will never throw an exception, +// and discards invalid parts of the query. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-simple-query-string-query.html +type SimpleQueryStringQuery struct { + queryText string + analyzer string + operator string + fields []string + fieldBoosts map[string]*float64 + minimumShouldMatch string + flags string + boost *float64 + lowercaseExpandedTerms *bool + lenient *bool + analyzeWildcard *bool + locale string + queryName string +} + +// NewSimpleQueryStringQuery creates and initializes a new SimpleQueryStringQuery. +func NewSimpleQueryStringQuery(text string) *SimpleQueryStringQuery { + return &SimpleQueryStringQuery{ + queryText: text, + fields: make([]string, 0), + fieldBoosts: make(map[string]*float64), + } +} + +// Field adds a field to run the query against. +func (q *SimpleQueryStringQuery) Field(field string) *SimpleQueryStringQuery { + q.fields = append(q.fields, field) + return q +} + +// Field adds a field to run the query against with a specific boost. +func (q *SimpleQueryStringQuery) FieldWithBoost(field string, boost float64) *SimpleQueryStringQuery { + q.fields = append(q.fields, field) + q.fieldBoosts[field] = &boost + return q +} + +// Boost sets the boost for this query. +func (q *SimpleQueryStringQuery) Boost(boost float64) *SimpleQueryStringQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched_filters per hit. +func (q *SimpleQueryStringQuery) QueryName(queryName string) *SimpleQueryStringQuery { + q.queryName = queryName + return q +} + +// Analyzer specifies the analyzer to use for the query. +func (q *SimpleQueryStringQuery) Analyzer(analyzer string) *SimpleQueryStringQuery { + q.analyzer = analyzer + return q +} + +// DefaultOperator specifies the default operator for the query. +func (q *SimpleQueryStringQuery) DefaultOperator(defaultOperator string) *SimpleQueryStringQuery { + q.operator = defaultOperator + return q +} + +// Flags sets the flags for the query. +func (q *SimpleQueryStringQuery) Flags(flags string) *SimpleQueryStringQuery { + q.flags = flags + return q +} + +// LowercaseExpandedTerms indicates whether terms of wildcard, prefix, fuzzy +// and range queries are automatically lower-cased or not. Default is true. +func (q *SimpleQueryStringQuery) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *SimpleQueryStringQuery { + q.lowercaseExpandedTerms = &lowercaseExpandedTerms + return q +} + +func (q *SimpleQueryStringQuery) Locale(locale string) *SimpleQueryStringQuery { + q.locale = locale + return q +} + +// Lenient indicates whether the query string parser should be lenient +// when parsing field values. It defaults to the index setting and if not +// set, defaults to false. +func (q *SimpleQueryStringQuery) Lenient(lenient bool) *SimpleQueryStringQuery { + q.lenient = &lenient + return q +} + +// AnalyzeWildcard indicates whether to enabled analysis on wildcard and prefix queries. +func (q *SimpleQueryStringQuery) AnalyzeWildcard(analyzeWildcard bool) *SimpleQueryStringQuery { + q.analyzeWildcard = &analyzeWildcard + return q +} + +func (q *SimpleQueryStringQuery) MinimumShouldMatch(minimumShouldMatch string) *SimpleQueryStringQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +// Source returns JSON for the query. +func (q *SimpleQueryStringQuery) Source() (interface{}, error) { + // { + // "simple_query_string" : { + // "query" : "\"fried eggs\" +(eggplant | potato) -frittata", + // "analyzer" : "snowball", + // "fields" : ["body^5","_all"], + // "default_operator" : "and" + // } + // } + + source := make(map[string]interface{}) + + query := make(map[string]interface{}) + source["simple_query_string"] = query + + query["query"] = q.queryText + + if len(q.fields) > 0 { + fields := make([]string, 0) + for _, field := range q.fields { + if boost, found := q.fieldBoosts[field]; found { + if boost != nil { + fields = append(fields, fmt.Sprintf("%s^%f", field, *boost)) + } else { + fields = append(fields, field) + } + } else { + fields = append(fields, field) + } + } + query["fields"] = fields + } + + if q.flags != "" { + query["flags"] = q.flags + } + if q.analyzer != "" { + query["analyzer"] = q.analyzer + } + if q.operator != "" { + query["default_operator"] = strings.ToLower(q.operator) + } + if q.lowercaseExpandedTerms != nil { + query["lowercase_expanded_terms"] = *q.lowercaseExpandedTerms + } + if q.lenient != nil { + query["lenient"] = *q.lenient + } + if q.analyzeWildcard != nil { + query["analyze_wildcard"] = *q.analyzeWildcard + } + if q.locale != "" { + query["locale"] = q.locale + } + if q.queryName != "" { + query["_name"] = q.queryName + } + if q.minimumShouldMatch != "" { + query["minimum_should_match"] = q.minimumShouldMatch + } + if q.boost != nil { + query["boost"] = *q.boost + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_simple_query_string_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_simple_query_string_test.go new file mode 100644 index 000000000..f6be3e5bd --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_simple_query_string_test.go @@ -0,0 +1,86 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSimpleQueryStringQuery(t *testing.T) { + q := NewSimpleQueryStringQuery(`"fried eggs" +(eggplant | potato) -frittata`) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"simple_query_string":{"query":"\"fried eggs\" +(eggplant | potato) -frittata"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSimpleQueryStringQueryExec(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + query := NewSimpleQueryStringQuery("+Golang +Elasticsearch") + searchResult, err := client.Search(). + Index(testIndexName). + Query(query). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 1 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 1 { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 1, len(searchResult.Hits.Hits)) + } + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_template_query.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_template_query.go new file mode 100644 index 000000000..0611c3ea4 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_template_query.go @@ -0,0 +1,84 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TemplateQuery is a query that accepts a query template and a +// map of key/value pairs to fill in template parameters. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-template-query.html +type TemplateQuery struct { + template string + templateType string + vars map[string]interface{} +} + +// NewTemplateQuery creates and initializes a new TemplateQuery. +func NewTemplateQuery(name string) *TemplateQuery { + return &TemplateQuery{ + template: name, + vars: make(map[string]interface{}), + } +} + +// Template specifies the name of the template. +func (q *TemplateQuery) Template(name string) *TemplateQuery { + q.template = name + return q +} + +// TemplateType defines which kind of query we use. The values can be: +// inline, indexed, or file. If undefined, inline is used. +func (q *TemplateQuery) TemplateType(typ string) *TemplateQuery { + q.templateType = typ + return q +} + +// Var sets a single parameter pair. +func (q *TemplateQuery) Var(name string, value interface{}) *TemplateQuery { + q.vars[name] = value + return q +} + +// Vars sets parameters for the template query. +func (q *TemplateQuery) Vars(vars map[string]interface{}) *TemplateQuery { + q.vars = vars + return q +} + +// Source returns the JSON serializable content for the search. +func (q *TemplateQuery) Source() (interface{}, error) { + // { + // "template" : { + // "query" : {"match_{{template}}": {}}, + // "params" : { + // "template": "all" + // } + // } + // } + + query := make(map[string]interface{}) + + tmpl := make(map[string]interface{}) + query["template"] = tmpl + + // TODO(oe): Implementation differs from online documentation at http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-template-query.html + var fieldname string + switch q.templateType { + case "file": // file + fieldname = "file" + case "indexed", "id": // indexed + fieldname = "id" + default: // inline + fieldname = "query" + } + + tmpl[fieldname] = q.template + if len(q.vars) > 0 { + tmpl["params"] = q.vars + } + + return query, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_template_query_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_template_query_test.go new file mode 100644 index 000000000..8f21ef9f0 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_template_query_test.go @@ -0,0 +1,65 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestTemplateQueryInlineTest(t *testing.T) { + q := NewTemplateQuery("\"match_{{template}}\": {}}\"").Vars(map[string]interface{}{"template": "all"}) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"template":{"params":{"template":"all"},"query":"\"match_{{template}}\": {}}\""}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestTemplateQueryIndexedTest(t *testing.T) { + q := NewTemplateQuery("indexedTemplate"). + TemplateType("id"). + Vars(map[string]interface{}{"template": "all"}) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"template":{"id":"indexedTemplate","params":{"template":"all"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestTemplateQueryFileTest(t *testing.T) { + q := NewTemplateQuery("storedTemplate"). + TemplateType("file"). + Vars(map[string]interface{}{"template": "all"}) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"template":{"file":"storedTemplate","params":{"template":"all"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_term.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_term.go new file mode 100644 index 000000000..c20c5c66e --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_term.go @@ -0,0 +1,58 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TermQuery finds documents that contain the exact term specified +// in the inverted index. +// +// For details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-term-query.html +type TermQuery struct { + name string + value interface{} + boost *float64 + queryName string +} + +// NewTermQuery creates and initializes a new TermQuery. +func NewTermQuery(name string, value interface{}) *TermQuery { + return &TermQuery{name: name, value: value} +} + +// Boost sets the boost for this query. +func (q *TermQuery) Boost(boost float64) *TermQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *TermQuery) QueryName(queryName string) *TermQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the query. +func (q *TermQuery) Source() (interface{}, error) { + // {"term":{"name":"value"}} + source := make(map[string]interface{}) + tq := make(map[string]interface{}) + source["term"] = tq + + if q.boost == nil && q.queryName == "" { + tq[q.name] = q.value + } else { + subQ := make(map[string]interface{}) + subQ["value"] = q.value + if q.boost != nil { + subQ["boost"] = *q.boost + } + if q.queryName != "" { + subQ["_name"] = q.queryName + } + tq[q.name] = subQ + } + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_term_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_term_test.go new file mode 100644 index 000000000..17c8c9848 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_term_test.go @@ -0,0 +1,46 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestTermQuery(t *testing.T) { + q := NewTermQuery("user", "ki") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"term":{"user":"ki"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestTermQueryWithOptions(t *testing.T) { + q := NewTermQuery("user", "ki") + q = q.Boost(2.79) + q = q.QueryName("my_tq") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"term":{"user":{"_name":"my_tq","boost":2.79,"value":"ki"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_terms.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_terms.go new file mode 100644 index 000000000..a7e158859 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_terms.go @@ -0,0 +1,58 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TermsQuery filters documents that have fields that match any +// of the provided terms (not analyzed). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-terms-query.html +type TermsQuery struct { + name string + values []interface{} + queryName string + boost *float64 +} + +// NewTermsQuery creates and initializes a new TermsQuery. +func NewTermsQuery(name string, values ...interface{}) *TermsQuery { + q := &TermsQuery{ + name: name, + values: make([]interface{}, 0), + } + if len(values) > 0 { + q.values = append(q.values, values...) + } + return q +} + +// Boost sets the boost for this query. +func (q *TermsQuery) Boost(boost float64) *TermsQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *TermsQuery) QueryName(queryName string) *TermsQuery { + q.queryName = queryName + return q +} + +// Creates the query source for the term query. +func (q *TermsQuery) Source() (interface{}, error) { + // {"terms":{"name":["value1","value2"]}} + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["terms"] = params + params[q.name] = q.values + if q.boost != nil { + params["boost"] = *q.boost + } + if q.queryName != "" { + params["_name"] = q.queryName + } + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_terms_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_terms_test.go new file mode 100644 index 000000000..6de743d14 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_terms_test.go @@ -0,0 +1,46 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestTermsQuery(t *testing.T) { + q := NewTermsQuery("user", "ki") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"terms":{"user":["ki"]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestTermQuerysWithOptions(t *testing.T) { + q := NewTermsQuery("user", "ki", "ko") + q = q.Boost(2.79) + q = q.QueryName("my_tq") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"terms":{"_name":"my_tq","boost":2.79,"user":["ki","ko"]}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_type.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_type.go new file mode 100644 index 000000000..884d4ae7b --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_type.go @@ -0,0 +1,26 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TypeQuery filters documents matching the provided document / mapping type. +// +// For details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-type-query.html +type TypeQuery struct { + typ string +} + +func NewTypeQuery(typ string) *TypeQuery { + return &TypeQuery{typ: typ} +} + +// Source returns JSON for the query. +func (q *TypeQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["type"] = params + params["value"] = q.typ + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_type_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_type_test.go new file mode 100644 index 000000000..bde0ed3d3 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_type_test.go @@ -0,0 +1,27 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestTypeQuery(t *testing.T) { + q := NewTypeQuery("my_type") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"type":{"value":"my_type"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_wildcard.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_wildcard.go new file mode 100644 index 000000000..127332da3 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_wildcard.go @@ -0,0 +1,81 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// WildcardQuery matches documents that have fields matching a wildcard +// expression (not analyzed). Supported wildcards are *, which matches +// any character sequence (including the empty one), and ?, which matches +// any single character. Note this query can be slow, as it needs to iterate +// over many terms. In order to prevent extremely slow wildcard queries, +// a wildcard term should not start with one of the wildcards * or ?. +// The wildcard query maps to Lucene WildcardQuery. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-wildcard-query.html +type WildcardQuery struct { + name string + wildcard string + boost *float64 + rewrite string + queryName string +} + +// NewWildcardQuery creates and initializes a new WildcardQuery. +func NewWildcardQuery(name, wildcard string) *WildcardQuery { + return &WildcardQuery{ + name: name, + wildcard: wildcard, + } +} + +// Boost sets the boost for this query. +func (q *WildcardQuery) Boost(boost float64) *WildcardQuery { + q.boost = &boost + return q +} + +func (q *WildcardQuery) Rewrite(rewrite string) *WildcardQuery { + q.rewrite = rewrite + return q +} + +// QueryName sets the name of this query. +func (q *WildcardQuery) QueryName(queryName string) *WildcardQuery { + q.queryName = queryName + return q +} + +// Source returns the JSON serializable body of this query. +func (q *WildcardQuery) Source() (interface{}, error) { + // { + // "wildcard" : { + // "user" : { + // "wildcard" : "ki*y", + // "boost" : 1.0 + // } + // } + + source := make(map[string]interface{}) + + query := make(map[string]interface{}) + source["wildcard"] = query + + wq := make(map[string]interface{}) + query[q.name] = wq + + wq["wildcard"] = q.wildcard + + if q.boost != nil { + wq["boost"] = *q.boost + } + if q.rewrite != "" { + wq["rewrite"] = q.rewrite + } + if q.queryName != "" { + wq["_name"] = q.queryName + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_wildcard_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_wildcard_test.go new file mode 100644 index 000000000..5cd529aff --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_queries_wildcard_test.go @@ -0,0 +1,67 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic_test + +import ( + "encoding/json" + "testing" + + "gopkg.in/olivere/elastic.v3" +) + +func ExampleWildcardQuery() { + // Get a client to the local Elasticsearch instance. + client, err := elastic.NewClient() + if err != nil { + // Handle error + panic(err) + } + + // Define wildcard query + q := elastic.NewWildcardQuery("user", "oli*er?").Boost(1.2) + searchResult, err := client.Search(). + Index("twitter"). // search in index "twitter" + Query(q). // use wildcard query defined above + Do() // execute + if err != nil { + // Handle error + panic(err) + } + _ = searchResult +} + +func TestWildcardQuery(t *testing.T) { + q := elastic.NewWildcardQuery("user", "ki*y??") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"wildcard":{"user":{"wildcard":"ki*y??"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestWildcardQueryWithBoost(t *testing.T) { + q := elastic.NewWildcardQuery("user", "ki*y??").Boost(1.2) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"wildcard":{"user":{"boost":1.2,"wildcard":"ki*y??"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_request.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_request.go new file mode 100644 index 000000000..5fb476dd1 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_request.go @@ -0,0 +1,153 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "strings" +) + +// SearchRequest combines a search request and its +// query details (see SearchSource). +// It is used in combination with MultiSearch. +type SearchRequest struct { + searchType string // default in ES is "query_then_fetch" + indices []string + types []string + routing *string + preference *string + source interface{} +} + +// NewSearchRequest creates a new search request. +func NewSearchRequest() *SearchRequest { + return &SearchRequest{ + indices: make([]string, 0), + types: make([]string, 0), + } +} + +// SearchRequest must be one of "query_then_fetch", "query_and_fetch", +// "scan", "count", "dfs_query_then_fetch", or "dfs_query_and_fetch". +// Use one of the constants defined via SearchType. +func (r *SearchRequest) SearchType(searchType string) *SearchRequest { + r.searchType = searchType + return r +} + +func (r *SearchRequest) SearchTypeDfsQueryThenFetch() *SearchRequest { + return r.SearchType("dfs_query_then_fetch") +} + +func (r *SearchRequest) SearchTypeDfsQueryAndFetch() *SearchRequest { + return r.SearchType("dfs_query_and_fetch") +} + +func (r *SearchRequest) SearchTypeQueryThenFetch() *SearchRequest { + return r.SearchType("query_then_fetch") +} + +func (r *SearchRequest) SearchTypeQueryAndFetch() *SearchRequest { + return r.SearchType("query_and_fetch") +} + +func (r *SearchRequest) SearchTypeScan() *SearchRequest { + return r.SearchType("scan") +} + +func (r *SearchRequest) SearchTypeCount() *SearchRequest { + return r.SearchType("count") +} + +func (r *SearchRequest) Index(indices ...string) *SearchRequest { + r.indices = append(r.indices, indices...) + return r +} + +func (r *SearchRequest) HasIndices() bool { + return len(r.indices) > 0 +} + +func (r *SearchRequest) Type(types ...string) *SearchRequest { + r.types = append(r.types, types...) + return r +} + +func (r *SearchRequest) Routing(routing string) *SearchRequest { + r.routing = &routing + return r +} + +func (r *SearchRequest) Routings(routings ...string) *SearchRequest { + if routings != nil { + routings := strings.Join(routings, ",") + r.routing = &routings + } else { + r.routing = nil + } + return r +} + +func (r *SearchRequest) Preference(preference string) *SearchRequest { + r.preference = &preference + return r +} + +func (r *SearchRequest) Source(source interface{}) *SearchRequest { + switch v := source.(type) { + case *SearchSource: + src, err := v.Source() + if err != nil { + // Do not do anything in case of an error + return r + } + r.source = src + default: + r.source = source + } + return r +} + +// header is used by MultiSearch to get information about the search header +// of one SearchRequest. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-multi-search.html +func (r *SearchRequest) header() interface{} { + h := make(map[string]interface{}) + if r.searchType != "" { + h["search_type"] = r.searchType + } + + switch len(r.indices) { + case 0: + case 1: + h["index"] = r.indices[0] + default: + h["indices"] = r.indices + } + + switch len(r.types) { + case 0: + case 1: + h["types"] = r.types[0] + default: + h["type"] = r.types + } + + if r.routing != nil && *r.routing != "" { + h["routing"] = *r.routing + } + + if r.preference != nil && *r.preference != "" { + h["preference"] = *r.preference + } + + return h +} + +// bidy is used by MultiSearch to get information about the search body +// of one SearchRequest. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-multi-search.html +func (r *SearchRequest) body() interface{} { + return r.source +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_request_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_request_test.go new file mode 100644 index 000000000..c672b0705 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_request_test.go @@ -0,0 +1,48 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + _ "net/http" + "testing" +) + +func TestSearchRequestIndex(t *testing.T) { + builder := NewSearchRequest().Index("test") + data, err := json.Marshal(builder.header()) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"index":"test"}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchRequestIndices(t *testing.T) { + builder := NewSearchRequest().Index("test", "test2") + data, err := json.Marshal(builder.header()) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"indices":["test","test2"]}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchRequestHasIndices(t *testing.T) { + builder := NewSearchRequest() + if builder.HasIndices() { + t.Errorf("expected HasIndices to return true; got %v", builder.HasIndices()) + } + builder = builder.Index("test", "test2") + if !builder.HasIndices() { + t.Errorf("expected HasIndices to return false; got %v", builder.HasIndices()) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_source.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_source.go new file mode 100644 index 000000000..59c9fec67 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_source.go @@ -0,0 +1,511 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" +) + +// SearchSource enables users to build the search source. +// It resembles the SearchSourceBuilder in Elasticsearch. +type SearchSource struct { + query Query + postQuery Query + from int + size int + explain *bool + version *bool + sorts []SortInfo + sorters []Sorter + trackScores bool + minScore *float64 + timeout string + terminateAfter *int + fieldNames []string + fieldDataFields []string + scriptFields []*ScriptField + fetchSourceContext *FetchSourceContext + aggregations map[string]Aggregation + highlight *Highlight + globalSuggestText string + suggesters []Suggester + rescores []*Rescore + defaultRescoreWindowSize *int + indexBoosts map[string]float64 + stats []string + innerHits map[string]*InnerHit +} + +// NewSearchSource initializes a new SearchSource. +func NewSearchSource() *SearchSource { + return &SearchSource{ + from: -1, + size: -1, + trackScores: false, + sorts: make([]SortInfo, 0), + sorters: make([]Sorter, 0), + fieldDataFields: make([]string, 0), + scriptFields: make([]*ScriptField, 0), + aggregations: make(map[string]Aggregation), + rescores: make([]*Rescore, 0), + indexBoosts: make(map[string]float64), + stats: make([]string, 0), + innerHits: make(map[string]*InnerHit), + } +} + +// Query sets the query to use with this search source. +func (s *SearchSource) Query(query Query) *SearchSource { + s.query = query + return s +} + +// PostFilter will be executed after the query has been executed and +// only affects the search hits, not the aggregations. +// This filter is always executed as the last filtering mechanism. +func (s *SearchSource) PostFilter(postFilter Query) *SearchSource { + s.postQuery = postFilter + return s +} + +// From index to start the search from. Defaults to 0. +func (s *SearchSource) From(from int) *SearchSource { + s.from = from + return s +} + +// Size is the number of search hits to return. Defaults to 10. +func (s *SearchSource) Size(size int) *SearchSource { + s.size = size + return s +} + +// MinScore sets the minimum score below which docs will be filtered out. +func (s *SearchSource) MinScore(minScore float64) *SearchSource { + s.minScore = &minScore + return s +} + +// Explain indicates whether each search hit should be returned with +// an explanation of the hit (ranking). +func (s *SearchSource) Explain(explain bool) *SearchSource { + s.explain = &explain + return s +} + +// Version indicates whether each search hit should be returned with +// a version associated to it. +func (s *SearchSource) Version(version bool) *SearchSource { + s.version = &version + return s +} + +// Timeout controls how long a search is allowed to take, e.g. "1s" or "500ms". +func (s *SearchSource) Timeout(timeout string) *SearchSource { + s.timeout = timeout + return s +} + +// TimeoutInMillis controls how many milliseconds a search is allowed +// to take before it is canceled. +func (s *SearchSource) TimeoutInMillis(timeoutInMillis int) *SearchSource { + s.timeout = fmt.Sprintf("%dms", timeoutInMillis) + return s +} + +// TerminateAfter allows the request to stop after the given number +// of search hits are collected. +func (s *SearchSource) TerminateAfter(terminateAfter int) *SearchSource { + s.terminateAfter = &terminateAfter + return s +} + +// Sort adds a sort order. +func (s *SearchSource) Sort(field string, ascending bool) *SearchSource { + s.sorts = append(s.sorts, SortInfo{Field: field, Ascending: ascending}) + return s +} + +// SortWithInfo adds a sort order. +func (s *SearchSource) SortWithInfo(info SortInfo) *SearchSource { + s.sorts = append(s.sorts, info) + return s +} + +// SortBy adds a sort order. +func (s *SearchSource) SortBy(sorter ...Sorter) *SearchSource { + s.sorters = append(s.sorters, sorter...) + return s +} + +func (s *SearchSource) hasSort() bool { + return len(s.sorts) > 0 || len(s.sorters) > 0 +} + +// TrackScores is applied when sorting and controls if scores will be +// tracked as well. Defaults to false. +func (s *SearchSource) TrackScores(trackScores bool) *SearchSource { + s.trackScores = trackScores + return s +} + +// Aggregation adds an aggreation to perform as part of the search. +func (s *SearchSource) Aggregation(name string, aggregation Aggregation) *SearchSource { + s.aggregations[name] = aggregation + return s +} + +// DefaultRescoreWindowSize sets the rescore window size for rescores +// that don't specify their window. +func (s *SearchSource) DefaultRescoreWindowSize(defaultRescoreWindowSize int) *SearchSource { + s.defaultRescoreWindowSize = &defaultRescoreWindowSize + return s +} + +// Highlight adds highlighting to the search. +func (s *SearchSource) Highlight(highlight *Highlight) *SearchSource { + s.highlight = highlight + return s +} + +// Highlighter returns the highlighter. +func (s *SearchSource) Highlighter() *Highlight { + if s.highlight == nil { + s.highlight = NewHighlight() + } + return s.highlight +} + +// GlobalSuggestText defines the global text to use with all suggesters. +// This avoids repetition. +func (s *SearchSource) GlobalSuggestText(text string) *SearchSource { + s.globalSuggestText = text + return s +} + +// Suggester adds a suggester to the search. +func (s *SearchSource) Suggester(suggester Suggester) *SearchSource { + s.suggesters = append(s.suggesters, suggester) + return s +} + +// Rescorer adds a rescorer to the search. +func (s *SearchSource) Rescorer(rescore *Rescore) *SearchSource { + s.rescores = append(s.rescores, rescore) + return s +} + +// ClearRescorers removes all rescorers from the search. +func (s *SearchSource) ClearRescorers() *SearchSource { + s.rescores = make([]*Rescore, 0) + return s +} + +// FetchSource indicates whether the response should contain the stored +// _source for every hit. +func (s *SearchSource) FetchSource(fetchSource bool) *SearchSource { + if s.fetchSourceContext == nil { + s.fetchSourceContext = NewFetchSourceContext(fetchSource) + } else { + s.fetchSourceContext.SetFetchSource(fetchSource) + } + return s +} + +// FetchSourceContext indicates how the _source should be fetched. +func (s *SearchSource) FetchSourceContext(fetchSourceContext *FetchSourceContext) *SearchSource { + s.fetchSourceContext = fetchSourceContext + return s +} + +// NoFields indicates that no fields should be loaded, resulting in only +// id and type to be returned per field. +func (s *SearchSource) NoFields() *SearchSource { + s.fieldNames = make([]string, 0) + return s +} + +// Field adds a single field to load and return (note, must be stored) as +// part of the search request. If none are specified, the source of the +// document will be returned. +func (s *SearchSource) Field(fieldName string) *SearchSource { + if s.fieldNames == nil { + s.fieldNames = make([]string, 0) + } + s.fieldNames = append(s.fieldNames, fieldName) + return s +} + +// Fields sets the fields to load and return as part of the search request. +// If none are specified, the source of the document will be returned. +func (s *SearchSource) Fields(fieldNames ...string) *SearchSource { + if s.fieldNames == nil { + s.fieldNames = make([]string, 0) + } + s.fieldNames = append(s.fieldNames, fieldNames...) + return s +} + +// FieldDataField adds a single field to load from the field data cache +// and return as part of the search request. +func (s *SearchSource) FieldDataField(fieldDataField string) *SearchSource { + s.fieldDataFields = append(s.fieldDataFields, fieldDataField) + return s +} + +// FieldDataFields adds one or more fields to load from the field data cache +// and return as part of the search request. +func (s *SearchSource) FieldDataFields(fieldDataFields ...string) *SearchSource { + s.fieldDataFields = append(s.fieldDataFields, fieldDataFields...) + return s +} + +// ScriptField adds a single script field with the provided script. +func (s *SearchSource) ScriptField(scriptField *ScriptField) *SearchSource { + s.scriptFields = append(s.scriptFields, scriptField) + return s +} + +// ScriptFields adds one or more script fields with the provided scripts. +func (s *SearchSource) ScriptFields(scriptFields ...*ScriptField) *SearchSource { + s.scriptFields = append(s.scriptFields, scriptFields...) + return s +} + +// IndexBoost sets the boost that a specific index will receive when the +// query is executed against it. +func (s *SearchSource) IndexBoost(index string, boost float64) *SearchSource { + s.indexBoosts[index] = boost + return s +} + +// Stats group this request will be aggregated under. +func (s *SearchSource) Stats(statsGroup ...string) *SearchSource { + s.stats = append(s.stats, statsGroup...) + return s +} + +// InnerHit adds an inner hit to return with the result. +func (s *SearchSource) InnerHit(name string, innerHit *InnerHit) *SearchSource { + s.innerHits[name] = innerHit + return s +} + +// Source returns the serializable JSON for the source builder. +func (s *SearchSource) Source() (interface{}, error) { + source := make(map[string]interface{}) + + if s.from != -1 { + source["from"] = s.from + } + if s.size != -1 { + source["size"] = s.size + } + if s.timeout != "" { + source["timeout"] = s.timeout + } + if s.terminateAfter != nil { + source["terminate_after"] = *s.terminateAfter + } + if s.query != nil { + src, err := s.query.Source() + if err != nil { + return nil, err + } + source["query"] = src + } + if s.postQuery != nil { + src, err := s.postQuery.Source() + if err != nil { + return nil, err + } + source["post_filter"] = src + } + if s.minScore != nil { + source["min_score"] = *s.minScore + } + if s.version != nil { + source["version"] = *s.version + } + if s.explain != nil { + source["explain"] = *s.explain + } + if s.fetchSourceContext != nil { + src, err := s.fetchSourceContext.Source() + if err != nil { + return nil, err + } + source["_source"] = src + } + + if s.fieldNames != nil { + switch len(s.fieldNames) { + case 1: + source["fields"] = s.fieldNames[0] + default: + source["fields"] = s.fieldNames + } + } + + if len(s.fieldDataFields) > 0 { + source["fielddata_fields"] = s.fieldDataFields + } + + if len(s.scriptFields) > 0 { + sfmap := make(map[string]interface{}) + for _, scriptField := range s.scriptFields { + src, err := scriptField.Source() + if err != nil { + return nil, err + } + sfmap[scriptField.FieldName] = src + } + source["script_fields"] = sfmap + } + + if len(s.sorters) > 0 { + sortarr := make([]interface{}, 0) + for _, sorter := range s.sorters { + src, err := sorter.Source() + if err != nil { + return nil, err + } + sortarr = append(sortarr, src) + } + source["sort"] = sortarr + } else if len(s.sorts) > 0 { + sortarr := make([]interface{}, 0) + for _, sort := range s.sorts { + src, err := sort.Source() + if err != nil { + return nil, err + } + sortarr = append(sortarr, src) + } + source["sort"] = sortarr + } + + if s.trackScores { + source["track_scores"] = s.trackScores + } + + if len(s.indexBoosts) > 0 { + source["indices_boost"] = s.indexBoosts + } + + if len(s.aggregations) > 0 { + aggsMap := make(map[string]interface{}) + for name, aggregate := range s.aggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + source["aggregations"] = aggsMap + } + + if s.highlight != nil { + src, err := s.highlight.Source() + if err != nil { + return nil, err + } + source["highlight"] = src + } + + if len(s.suggesters) > 0 { + suggesters := make(map[string]interface{}) + for _, s := range s.suggesters { + src, err := s.Source(false) + if err != nil { + return nil, err + } + suggesters[s.Name()] = src + } + if s.globalSuggestText != "" { + suggesters["text"] = s.globalSuggestText + } + source["suggest"] = suggesters + } + + if len(s.rescores) > 0 { + // Strip empty rescores from request + rescores := make([]*Rescore, 0) + for _, r := range s.rescores { + if !r.IsEmpty() { + rescores = append(rescores, r) + } + } + + if len(rescores) == 1 { + rescores[0].defaultRescoreWindowSize = s.defaultRescoreWindowSize + src, err := rescores[0].Source() + if err != nil { + return nil, err + } + source["rescore"] = src + } else { + slice := make([]interface{}, 0) + for _, r := range rescores { + r.defaultRescoreWindowSize = s.defaultRescoreWindowSize + src, err := r.Source() + if err != nil { + return nil, err + } + slice = append(slice, src) + } + source["rescore"] = slice + } + } + + if len(s.stats) > 0 { + source["stats"] = s.stats + } + + if len(s.innerHits) > 0 { + // Top-level inner hits + // See http://www.elastic.co/guide/en/elasticsearch/reference/1.5/search-request-inner-hits.html#top-level-inner-hits + // "inner_hits": { + // "": { + // "": { + // "": { + // , + // [,"inner_hits" : { []+ } ]? + // } + // } + // }, + // [,"" : { ... } ]* + // } + m := make(map[string]interface{}) + for name, hit := range s.innerHits { + if hit.path != "" { + src, err := hit.Source() + if err != nil { + return nil, err + } + path := make(map[string]interface{}) + path[hit.path] = src + m[name] = map[string]interface{}{ + "path": path, + } + } else if hit.typ != "" { + src, err := hit.Source() + if err != nil { + return nil, err + } + typ := make(map[string]interface{}) + typ[hit.typ] = src + m[name] = map[string]interface{}{ + "type": typ, + } + } else { + // TODO the Java client throws here, because either path or typ must be specified + } + } + source["inner_hits"] = m + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_source_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_source_test.go new file mode 100644 index 000000000..b5ddf61af --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_source_test.go @@ -0,0 +1,238 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSearchSourceMatchAllQuery(t *testing.T) { + matchAllQ := NewMatchAllQuery() + builder := NewSearchSource().Query(matchAllQ) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceNoFields(t *testing.T) { + matchAllQ := NewMatchAllQuery() + builder := NewSearchSource().Query(matchAllQ).NoFields() + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"fields":[],"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceFields(t *testing.T) { + matchAllQ := NewMatchAllQuery() + builder := NewSearchSource().Query(matchAllQ).Fields("message", "tags") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"fields":["message","tags"],"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceFetchSourceDisabled(t *testing.T) { + matchAllQ := NewMatchAllQuery() + builder := NewSearchSource().Query(matchAllQ).FetchSource(false) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"_source":false,"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceFetchSourceByWildcards(t *testing.T) { + matchAllQ := NewMatchAllQuery() + fsc := NewFetchSourceContext(true).Include("obj1.*", "obj2.*").Exclude("*.description") + builder := NewSearchSource().Query(matchAllQ).FetchSourceContext(fsc) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"_source":{"excludes":["*.description"],"includes":["obj1.*","obj2.*"]},"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceFieldDataFields(t *testing.T) { + matchAllQ := NewMatchAllQuery() + builder := NewSearchSource().Query(matchAllQ).FieldDataFields("test1", "test2") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"fielddata_fields":["test1","test2"],"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceScriptFields(t *testing.T) { + matchAllQ := NewMatchAllQuery() + sf1 := NewScriptField("test1", NewScript("doc['my_field_name'].value * 2")) + sf2 := NewScriptField("test2", NewScript("doc['my_field_name'].value * factor").Param("factor", 3.1415927)) + builder := NewSearchSource().Query(matchAllQ).ScriptFields(sf1, sf2) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"query":{"match_all":{}},"script_fields":{"test1":{"script":"doc['my_field_name'].value * 2"},"test2":{"script":{"inline":"doc['my_field_name'].value * factor","params":{"factor":3.1415927}}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourcePostFilter(t *testing.T) { + matchAllQ := NewMatchAllQuery() + pf := NewTermQuery("tag", "important") + builder := NewSearchSource().Query(matchAllQ).PostFilter(pf) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"post_filter":{"term":{"tag":"important"}},"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceHighlight(t *testing.T) { + matchAllQ := NewMatchAllQuery() + hl := NewHighlight().Field("content") + builder := NewSearchSource().Query(matchAllQ).Highlight(hl) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"highlight":{"fields":{"content":{}}},"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceRescoring(t *testing.T) { + matchAllQ := NewMatchAllQuery() + rescorerQuery := NewMatchQuery("field1", "the quick brown fox").Type("phrase").Slop(2) + rescorer := NewQueryRescorer(rescorerQuery) + rescorer = rescorer.QueryWeight(0.7) + rescorer = rescorer.RescoreQueryWeight(1.2) + rescore := NewRescore().WindowSize(50).Rescorer(rescorer) + builder := NewSearchSource().Query(matchAllQ).Rescorer(rescore) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"query":{"match_all":{}},"rescore":{"query":{"query_weight":0.7,"rescore_query":{"match":{"field1":{"query":"the quick brown fox","slop":2,"type":"phrase"}}},"rescore_query_weight":1.2},"window_size":50}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceIndexBoost(t *testing.T) { + matchAllQ := NewMatchAllQuery() + builder := NewSearchSource().Query(matchAllQ).IndexBoost("index1", 1.4).IndexBoost("index2", 1.3) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"indices_boost":{"index1":1.4,"index2":1.3},"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSearchSourceInnerHits(t *testing.T) { + matchAllQ := NewMatchAllQuery() + builder := NewSearchSource().Query(matchAllQ). + InnerHit("comments", NewInnerHit().Type("comment").Query(NewMatchQuery("user", "olivere"))). + InnerHit("views", NewInnerHit().Path("view")) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"inner_hits":{"comments":{"type":{"comment":{"query":{"match":{"user":{"query":"olivere"}}}}}},"views":{"path":{"view":{}}}},"query":{"match_all":{}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_suggester_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_suggester_test.go new file mode 100644 index 000000000..02c552af2 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_suggester_test.go @@ -0,0 +1,259 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + _ "encoding/json" + _ "net/http" + "testing" +) + +func TestTermSuggester(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + all := NewMatchAllQuery() + + tsName := "my-suggestions" + ts := NewTermSuggester(tsName) + ts = ts.Text("Goolang") + ts = ts.Field("message") + + searchResult, err := client.Search(). + Index(testIndexName). + Query(all). + Suggester(ts). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Suggest == nil { + t.Errorf("expected SearchResult.Suggest != nil; got nil") + } + mySuggestions, found := searchResult.Suggest[tsName] + if !found { + t.Errorf("expected to find SearchResult.Suggest[%s]; got false", tsName) + } + if mySuggestions == nil { + t.Errorf("expected SearchResult.Suggest[%s] != nil; got nil", tsName) + } + + if len(mySuggestions) != 1 { + t.Errorf("expected 1 suggestion; got %d", len(mySuggestions)) + } + mySuggestion := mySuggestions[0] + if mySuggestion.Text != "goolang" { + t.Errorf("expected Text = 'goolang'; got %s", mySuggestion.Text) + } + if mySuggestion.Offset != 0 { + t.Errorf("expected Offset = %d; got %d", 0, mySuggestion.Offset) + } + if mySuggestion.Length != 7 { + t.Errorf("expected Length = %d; got %d", 7, mySuggestion.Length) + } + if len(mySuggestion.Options) != 1 { + t.Errorf("expected 1 option; got %d", len(mySuggestion.Options)) + } + myOption := mySuggestion.Options[0] + if myOption.Text != "golang" { + t.Errorf("expected Text = 'golang'; got %s", myOption.Text) + } + if myOption.Score == float64(0.0) { + t.Errorf("expected Score != 0.0; got %v", myOption.Score) + } + if myOption.Freq == 0 { + t.Errorf("expected Freq != 0; got %v", myOption.Freq) + } +} + +func TestPhraseSuggester(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + all := NewMatchAllQuery() + + phraseSuggesterName := "my-suggestions" + ps := NewPhraseSuggester(phraseSuggesterName) + ps = ps.Text("Goolang") + ps = ps.Field("message") + + searchResult, err := client.Search(). + Index(testIndexName). + Query(all). + Suggester(ps). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Suggest == nil { + t.Errorf("expected SearchResult.Suggest != nil; got nil") + } + mySuggestions, found := searchResult.Suggest[phraseSuggesterName] + if !found { + t.Errorf("expected to find SearchResult.Suggest[%s]; got false", phraseSuggesterName) + } + if mySuggestions == nil { + t.Errorf("expected SearchResult.Suggest[%s] != nil; got nil", phraseSuggesterName) + } + + if len(mySuggestions) != 1 { + t.Errorf("expected 1 suggestion; got %d", len(mySuggestions)) + } + mySuggestion := mySuggestions[0] + if mySuggestion.Text != "Goolang" { + t.Errorf("expected Text = 'Goolang'; got %s", mySuggestion.Text) + } + if mySuggestion.Offset != 0 { + t.Errorf("expected Offset = %d; got %d", 0, mySuggestion.Offset) + } + if mySuggestion.Length != 7 { + t.Errorf("expected Length = %d; got %d", 7, mySuggestion.Length) + } + /* + if len(mySuggestion.Options) != 1 { + t.Errorf("expected 1 option; got %d", len(mySuggestion.Options)) + } + myOption := mySuggestion.Options[0] + if myOption.Text != "golang" { + t.Errorf("expected Text = 'golang'; got %s", myOption.Text) + } + if myOption.Score == float64(0.0) { + t.Errorf("expected Score != 0.0; got %v", myOption.Score) + } + */ +} + +// TODO(oe): I get a "Completion suggester not supported" exception on 0.90.2?! +/* +func TestCompletionSuggester(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + all := NewMatchAllQuery() + + suggesterName := "my-suggestions" + cs := NewCompletionSuggester(suggesterName) + cs = cs.Text("Goolang") + cs = cs.Field("message") + + searchResult, err := client.Search(). + Index(testIndexName). + Query(&all). + Suggester(cs). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Suggest == nil { + t.Errorf("expected SearchResult.Suggest != nil; got nil") + } + mySuggestions, found := searchResult.Suggest[suggesterName] + if !found { + t.Errorf("expected to find SearchResult.Suggest[%s]; got false") + } + if mySuggestions == nil { + t.Errorf("expected SearchResult.Suggest[%s] != nil; got nil", suggesterName) + } + + if len(mySuggestions) != 1 { + t.Errorf("expected 1 suggestion; got %d", len(mySuggestions)) + } + mySuggestion := mySuggestions[0] + if mySuggestion.Text != "Goolang" { + t.Errorf("expected Text = 'Goolang'; got %s", mySuggestion.Text) + } + if mySuggestion.Offset != 0 { + t.Errorf("expected Offset = %d; got %d", 0, mySuggestion.Offset) + } + if mySuggestion.Length != 7 { + t.Errorf("expected Length = %d; got %d", 7, mySuggestion.Length) + } + if len(mySuggestion.Options) != 1 { + t.Errorf("expected 1 option; got %d", len(mySuggestion.Options)) + } + myOption := mySuggestion.Options[0] + if myOption.Text != "golang" { + t.Errorf("expected Text = 'golang'; got %s", myOption.Text) + } + if myOption.Score == float64(0.0) { + t.Errorf("expected Score != 0.0; got %v", myOption.Score) + } +} +//*/ diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_template.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_template.go new file mode 100644 index 000000000..229a2712b --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_template.go @@ -0,0 +1,152 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// PutTemplateService creates or updates a search template. +// The documentation can be found at +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html. +type PutTemplateService struct { + client *Client + pretty bool + id string + opType string + version *int + versionType string + bodyJson interface{} + bodyString string +} + +// NewPutTemplateService creates a new PutTemplateService. +func NewPutTemplateService(client *Client) *PutTemplateService { + return &PutTemplateService{ + client: client, + } +} + +// Id is the template ID. +func (s *PutTemplateService) Id(id string) *PutTemplateService { + s.id = id + return s +} + +// OpType is an explicit operation type. +func (s *PutTemplateService) OpType(opType string) *PutTemplateService { + s.opType = opType + return s +} + +// Version is an explicit version number for concurrency control. +func (s *PutTemplateService) Version(version int) *PutTemplateService { + s.version = &version + return s +} + +// VersionType is a specific version type. +func (s *PutTemplateService) VersionType(versionType string) *PutTemplateService { + s.versionType = versionType + return s +} + +// BodyJson is the document as a JSON serializable object. +func (s *PutTemplateService) BodyJson(body interface{}) *PutTemplateService { + s.bodyJson = body + return s +} + +// BodyString is the document as a string. +func (s *PutTemplateService) BodyString(body string) *PutTemplateService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *PutTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{ + "id": s.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.version != nil { + params.Set("version", fmt.Sprintf("%d", *s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + if s.opType != "" { + params.Set("op_type", s.opType) + } + + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *PutTemplateService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *PutTemplateService) Do() (*PutTemplateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest("PUT", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(PutTemplateResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// PutTemplateResponse is the response of PutTemplateService.Do. +type PutTemplateResponse struct { + Id string `json:"_id"` + Version int `json:"_version"` + Created bool `json:"created"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_templates_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_templates_test.go new file mode 100644 index 000000000..3f8bbcb65 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_templates_test.go @@ -0,0 +1,98 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" +) + +func TestSearchTemplatesLifecycle(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Template + tmpl := `{"template":{"query":{"match":{"title":"{{query_string}}"}}}}` + + // Create template + cresp, err := client.PutTemplate().Id("elastic-test").BodyString(tmpl).Do() + if err != nil { + t.Fatal(err) + } + if cresp == nil { + t.Fatalf("expected response != nil; got: %v", cresp) + } + if !cresp.Created { + t.Errorf("expected created = %v; got: %v", true, cresp.Created) + } + + // Get template + resp, err := client.GetTemplate().Id("elastic-test").Do() + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatalf("expected response != nil; got: %v", resp) + } + if resp.Template == "" { + t.Errorf("expected template != %q; got: %q", "", resp.Template) + } + + // Delete template + dresp, err := client.DeleteTemplate().Id("elastic-test").Do() + if err != nil { + t.Fatal(err) + } + if dresp == nil { + t.Fatalf("expected response != nil; got: %v", dresp) + } + if !dresp.Found { + t.Fatalf("expected found = %v; got: %v", true, dresp.Found) + } +} + +func TestSearchTemplatesInlineQuery(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Run query with (inline) search template + // See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-template-query.html + tq := NewTemplateQuery(`{"match_{{template}}": {}}`).Var("template", "all") + resp, err := client.Search(testIndexName).Query(tq).Do() + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatalf("expected response != nil; got: %v", resp) + } + if resp.Hits == nil { + t.Fatalf("expected response hits != nil; got: %v", resp.Hits) + } + if resp.Hits.TotalHits != 3 { + t.Fatalf("expected 3 hits; got: %d", resp.Hits.TotalHits) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_test.go new file mode 100644 index 000000000..43a6695ff --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/search_test.go @@ -0,0 +1,885 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + _ "net/http" + "reflect" + "testing" + "time" +) + +func TestSearchMatchAll(t *testing.T) { + //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) + client := setupTestClientAndCreateIndexAndAddDocs(t) + + // Match all should return all documents + searchResult, err := client.Search(). + Index(testIndexName). + Query(NewMatchAllQuery()). + Size(100). + Pretty(true). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if got, want := searchResult.Hits.TotalHits, int64(12); got != want { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", want, got) + } + if got, want := len(searchResult.Hits.Hits), 12; got != want { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", want, got) + } + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } +} + +func BenchmarkSearchMatchAll(b *testing.B) { + client := setupTestClientAndCreateIndexAndAddDocs(b) + + for n := 0; n < b.N; n++ { + // Match all should return all documents + all := NewMatchAllQuery() + searchResult, err := client.Search().Index(testIndexName).Query(all).Do() + if err != nil { + b.Fatal(err) + } + if searchResult.Hits == nil { + b.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 4 { + b.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 4, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 4 { + b.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 4, len(searchResult.Hits.Hits)) + } + } +} + +func TestSearchResultTotalHits(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) + + count, err := client.Count(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + all := NewMatchAllQuery() + searchResult, err := client.Search().Index(testIndexName).Query(all).Do() + if err != nil { + t.Fatal(err) + } + + got := searchResult.TotalHits() + if got != count { + t.Fatalf("expected %d hits; got: %d", count, got) + } + + // No hits + searchResult = &SearchResult{} + got = searchResult.TotalHits() + if got != 0 { + t.Errorf("expected %d hits; got: %d", 0, got) + } +} + +func TestSearchResultEach(t *testing.T) { + client := setupTestClientAndCreateIndexAndAddDocs(t) + + all := NewMatchAllQuery() + searchResult, err := client.Search().Index(testIndexName).Query(all).Do() + if err != nil { + t.Fatal(err) + } + + // Iterate over non-ptr type + var aTweet tweet + count := 0 + for _, item := range searchResult.Each(reflect.TypeOf(aTweet)) { + count++ + _, ok := item.(tweet) + if !ok { + t.Fatalf("expected hit to be serialized as tweet; got: %v", reflect.ValueOf(item)) + } + } + if count == 0 { + t.Errorf("expected to find some hits; got: %d", count) + } + + // Iterate over ptr-type + count = 0 + var aTweetPtr *tweet + for _, item := range searchResult.Each(reflect.TypeOf(aTweetPtr)) { + count++ + tw, ok := item.(*tweet) + if !ok { + t.Fatalf("expected hit to be serialized as tweet; got: %v", reflect.ValueOf(item)) + } + if tw == nil { + t.Fatal("expected hit to not be nil") + } + } + if count == 0 { + t.Errorf("expected to find some hits; got: %d", count) + } + + // Does not iterate when no hits are found + searchResult = &SearchResult{Hits: nil} + count = 0 + for _, item := range searchResult.Each(reflect.TypeOf(aTweet)) { + count++ + _ = item + } + if count != 0 { + t.Errorf("expected to not find any hits; got: %d", count) + } + searchResult = &SearchResult{Hits: &SearchHits{Hits: make([]*SearchHit, 0)}} + count = 0 + for _, item := range searchResult.Each(reflect.TypeOf(aTweet)) { + count++ + _ = item + } + if count != 0 { + t.Errorf("expected to not find any hits; got: %d", count) + } +} + +func TestSearchSorting(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + tweet2 := tweet{ + User: "olivere", Retweets: 0, + Message: "Another unrelated topic.", + Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), + } + tweet3 := tweet{ + User: "sandrae", Retweets: 12, + Message: "Cycling is fun.", + Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + all := NewMatchAllQuery() + searchResult, err := client.Search(). + Index(testIndexName). + Query(all). + Sort("created", false). + Timeout("1s"). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 3 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 3 { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits)) + } + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } +} + +func TestSearchSortingBySorters(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + tweet2 := tweet{ + User: "olivere", Retweets: 0, + Message: "Another unrelated topic.", + Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), + } + tweet3 := tweet{ + User: "sandrae", Retweets: 12, + Message: "Cycling is fun.", + Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + all := NewMatchAllQuery() + searchResult, err := client.Search(). + Index(testIndexName). + Query(all). + SortBy(NewFieldSort("created").Desc(), NewScoreSort()). + Timeout("1s"). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 3 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 3 { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits)) + } + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + item := make(map[string]interface{}) + err := json.Unmarshal(*hit.Source, &item) + if err != nil { + t.Fatal(err) + } + } +} + +func TestSearchSpecificFields(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + all := NewMatchAllQuery() + searchResult, err := client.Search(). + Index(testIndexName). + Query(all). + Fields("message"). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 3 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 3 { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits)) + } + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + if hit.Source != nil { + t.Fatalf("expected SearchResult.Hits.Hit.Source to be nil; got: %q", hit.Source) + } + if hit.Fields == nil { + t.Fatal("expected SearchResult.Hits.Hit.Fields to be != nil") + } + field, found := hit.Fields["message"] + if !found { + t.Errorf("expected SearchResult.Hits.Hit.Fields[%s] to be found", "message") + } + fields, ok := field.([]interface{}) + if !ok { + t.Errorf("expected []interface{}; got: %v", reflect.TypeOf(fields)) + } + if len(fields) != 1 { + t.Errorf("expected a field with 1 entry; got: %d", len(fields)) + } + message, ok := fields[0].(string) + if !ok { + t.Errorf("expected a string; got: %v", reflect.TypeOf(fields[0])) + } + if message == "" { + t.Errorf("expected a message; got: %q", message) + } + } +} + +func TestSearchExplain(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + tweet2 := tweet{ + User: "olivere", Retweets: 0, + Message: "Another unrelated topic.", + Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), + } + tweet3 := tweet{ + User: "sandrae", Retweets: 12, + Message: "Cycling is fun.", + Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Match all should return all documents + all := NewMatchAllQuery() + searchResult, err := client.Search(). + Index(testIndexName). + Query(all). + Explain(true). + Timeout("1s"). + // Pretty(true). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 3 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 3 { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits)) + } + + for _, hit := range searchResult.Hits.Hits { + if hit.Index != testIndexName { + t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) + } + if hit.Explanation == nil { + t.Fatal("expected search explanation") + } + if hit.Explanation.Value <= 0.0 { + t.Errorf("expected explanation value to be > 0.0; got: %v", hit.Explanation.Value) + } + if hit.Explanation.Description == "" { + t.Errorf("expected explanation description != %q; got: %q", "", hit.Explanation.Description) + } + } +} + +func TestSearchSource(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + tweet2 := tweet{ + User: "olivere", Retweets: 0, + Message: "Another unrelated topic.", + Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), + } + tweet3 := tweet{ + User: "sandrae", Retweets: 12, + Message: "Cycling is fun.", + Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Set up the request JSON manually to pass to the search service via Source() + source := map[string]interface{}{ + "query": map[string]interface{}{ + "match_all": map[string]interface{}{}, + }, + } + + searchResult, err := client.Search(). + Index(testIndexName). + Source(source). // sets the JSON request + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 3 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) + } +} + +func TestSearchSearchSource(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + tweet2 := tweet{ + User: "olivere", Retweets: 0, + Message: "Another unrelated topic.", + Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), + } + tweet3 := tweet{ + User: "sandrae", Retweets: 12, + Message: "Cycling is fun.", + Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Set up the search source manually and pass it to the search service via SearchSource() + ss := NewSearchSource().Query(NewMatchAllQuery()).From(0).Size(2) + + // One can use ss.Source() to get to the raw interface{} that will be used + // as the search request JSON by the SearchService. + + searchResult, err := client.Search(). + Index(testIndexName). + SearchSource(ss). // sets the SearchSource + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 3 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 2 { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 2, len(searchResult.Hits.Hits)) + } +} + +func TestSearchInnerHitsOnHasChild(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Check for valid ES version + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + if esversion < "1.5.0" { + t.Skip("InnerHits feature is only available for Elasticsearch 1.5+") + return + } + + tweet1 := tweet{ + User: "olivere", Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + tweet2 := tweet{ + User: "olivere", Retweets: 0, + Message: "Another unrelated topic.", + Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), + } + comment2a := comment{User: "sandrae", Comment: "What does that even mean?"} + tweet3 := tweet{ + User: "sandrae", Retweets: 12, + Message: "Cycling is fun.", + Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), + } + comment3a := comment{User: "nico", Comment: "You bet."} + comment3b := comment{User: "olivere", Comment: "It sure is."} + + // Add all documents + _, err = client.Index().Index(testIndexName).Type("tweet").Id("t1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("tweet").Id("t2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("comment").Id("c2a").Parent("t2").BodyJson(&comment2a).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("tweet").Id("t3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("comment").Id("c3a").Parent("t3").BodyJson(&comment3a).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("comment").Id("c3b").Parent("t3").BodyJson(&comment3b).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + bq := NewBoolQuery() + bq = bq.Must(NewMatchAllQuery()) + bq = bq.Filter(NewHasChildQuery("comment", NewMatchAllQuery()). + InnerHit(NewInnerHit().Name("comments"))) + + searchResult, err := client.Search(). + Index(testIndexName). + Query(bq). + Pretty(true). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 2 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 2, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 2 { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 2, len(searchResult.Hits.Hits)) + } + + hit := searchResult.Hits.Hits[0] + if hit.Id != "t2" { + t.Fatalf("expected tweet %q; got: %q", "t2", hit.Id) + } + if hit.InnerHits == nil { + t.Fatalf("expected inner hits; got: %v", hit.InnerHits) + } + if len(hit.InnerHits) != 1 { + t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits)) + } + innerHits, found := hit.InnerHits["comments"] + if !found { + t.Fatalf("expected inner hits for name %q", "comments") + } + if innerHits == nil || innerHits.Hits == nil { + t.Fatal("expected inner hits != nil") + } + if len(innerHits.Hits.Hits) != 1 { + t.Fatalf("expected %d inner hits; got: %d", 1, len(innerHits.Hits.Hits)) + } + if innerHits.Hits.Hits[0].Id != "c2a" { + t.Fatalf("expected inner hit with id %q; got: %q", "c2a", innerHits.Hits.Hits[0].Id) + } + + hit = searchResult.Hits.Hits[1] + if hit.Id != "t3" { + t.Fatalf("expected tweet %q; got: %q", "t3", hit.Id) + } + if hit.InnerHits == nil { + t.Fatalf("expected inner hits; got: %v", hit.InnerHits) + } + if len(hit.InnerHits) != 1 { + t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits)) + } + innerHits, found = hit.InnerHits["comments"] + if !found { + t.Fatalf("expected inner hits for name %q", "comments") + } + if innerHits == nil || innerHits.Hits == nil { + t.Fatal("expected inner hits != nil") + } + if len(innerHits.Hits.Hits) != 2 { + t.Fatalf("expected %d inner hits; got: %d", 2, len(innerHits.Hits.Hits)) + } + if innerHits.Hits.Hits[0].Id != "c3a" { + t.Fatalf("expected inner hit with id %q; got: %q", "c3a", innerHits.Hits.Hits[0].Id) + } + if innerHits.Hits.Hits[1].Id != "c3b" { + t.Fatalf("expected inner hit with id %q; got: %q", "c3b", innerHits.Hits.Hits[1].Id) + } +} + +func TestSearchInnerHitsOnHasParent(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Check for valid ES version + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + if esversion < "1.5.0" { + t.Skip("InnerHits feature is only available for Elasticsearch 1.5+") + return + } + + tweet1 := tweet{ + User: "olivere", Retweets: 108, + Message: "Welcome to Golang and Elasticsearch.", + Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), + } + tweet2 := tweet{ + User: "olivere", Retweets: 0, + Message: "Another unrelated topic.", + Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), + } + comment2a := comment{User: "sandrae", Comment: "What does that even mean?"} + tweet3 := tweet{ + User: "sandrae", Retweets: 12, + Message: "Cycling is fun.", + Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), + } + comment3a := comment{User: "nico", Comment: "You bet."} + comment3b := comment{User: "olivere", Comment: "It sure is."} + + // Add all documents + _, err = client.Index().Index(testIndexName).Type("tweet").Id("t1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("tweet").Id("t2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("comment").Id("c2a").Parent("t2").BodyJson(&comment2a).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("tweet").Id("t3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("comment").Id("c3a").Parent("t3").BodyJson(&comment3a).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("comment").Id("c3b").Parent("t3").BodyJson(&comment3b).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + bq := NewBoolQuery() + bq = bq.Must(NewMatchAllQuery()) + bq = bq.Filter(NewHasParentQuery("tweet", NewMatchAllQuery()). + InnerHit(NewInnerHit().Name("tweets"))) + + searchResult, err := client.Search(). + Index(testIndexName). + Query(bq). + Pretty(true). + Do() + if err != nil { + t.Fatal(err) + } + if searchResult.Hits == nil { + t.Errorf("expected SearchResult.Hits != nil; got nil") + } + if searchResult.Hits.TotalHits != 3 { + t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) + } + if len(searchResult.Hits.Hits) != 3 { + t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits)) + } + + hit := searchResult.Hits.Hits[0] + if hit.Id != "c2a" { + t.Fatalf("expected tweet %q; got: %q", "c2a", hit.Id) + } + if hit.InnerHits == nil { + t.Fatalf("expected inner hits; got: %v", hit.InnerHits) + } + if len(hit.InnerHits) != 1 { + t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits)) + } + innerHits, found := hit.InnerHits["tweets"] + if !found { + t.Fatalf("expected inner hits for name %q", "tweets") + } + if innerHits == nil || innerHits.Hits == nil { + t.Fatal("expected inner hits != nil") + } + if len(innerHits.Hits.Hits) != 1 { + t.Fatalf("expected %d inner hits; got: %d", 1, len(innerHits.Hits.Hits)) + } + if innerHits.Hits.Hits[0].Id != "t2" { + t.Fatalf("expected inner hit with id %q; got: %q", "t2", innerHits.Hits.Hits[0].Id) + } + + hit = searchResult.Hits.Hits[1] + if hit.Id != "c3a" { + t.Fatalf("expected tweet %q; got: %q", "c3a", hit.Id) + } + if hit.InnerHits == nil { + t.Fatalf("expected inner hits; got: %v", hit.InnerHits) + } + if len(hit.InnerHits) != 1 { + t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits)) + } + innerHits, found = hit.InnerHits["tweets"] + if !found { + t.Fatalf("expected inner hits for name %q", "tweets") + } + if innerHits == nil || innerHits.Hits == nil { + t.Fatal("expected inner hits != nil") + } + if len(innerHits.Hits.Hits) != 1 { + t.Fatalf("expected %d inner hits; got: %d", 1, len(innerHits.Hits.Hits)) + } + if innerHits.Hits.Hits[0].Id != "t3" { + t.Fatalf("expected inner hit with id %q; got: %q", "t3", innerHits.Hits.Hits[0].Id) + } + + hit = searchResult.Hits.Hits[2] + if hit.Id != "c3b" { + t.Fatalf("expected tweet %q; got: %q", "c3b", hit.Id) + } + if hit.InnerHits == nil { + t.Fatalf("expected inner hits; got: %v", hit.InnerHits) + } + if len(hit.InnerHits) != 1 { + t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits)) + } + innerHits, found = hit.InnerHits["tweets"] + if !found { + t.Fatalf("expected inner hits for name %q", "tweets") + } + if innerHits == nil || innerHits.Hits == nil { + t.Fatal("expected inner hits != nil") + } + if len(innerHits.Hits.Hits) != 1 { + t.Fatalf("expected %d inner hits; got: %d", 1, len(innerHits.Hits.Hits)) + } + if innerHits.Hits.Hits[0].Id != "t3" { + t.Fatalf("expected inner hit with id %q; got: %q", "t3", innerHits.Hits.Hits[0].Id) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/setup_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/setup_test.go new file mode 100644 index 000000000..97af2bb27 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/setup_test.go @@ -0,0 +1,232 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "log" + "math/rand" + "os" + "time" +) + +const ( + testIndexName = "elastic-test" + testIndexName2 = "elastic-test2" + testMapping = ` +{ + "settings":{ + "number_of_shards":1, + "number_of_replicas":0 + }, + "mappings":{ + "_default_": { + "_timestamp": { + "enabled": true + }, + "_ttl": { + "enabled": true + } + }, + "tweet":{ + "properties":{ + "tags":{ + "type":"string" + }, + "location":{ + "type":"geo_point" + }, + "suggest_field":{ + "type":"completion", + "payloads":true + } + } + }, + "comment":{ + "_parent": { + "type": "tweet" + } + }, + "order":{ + "properties":{ + "article":{ + "type":"string" + }, + "manufacturer":{ + "type":"string", + "index" : "not_analyzed" + }, + "price":{ + "type":"float" + }, + "time":{ + "type":"date", + "format": "YYYY-MM-dd" + } + } + } + } +} +` +) + +type tweet struct { + User string `json:"user"` + Message string `json:"message"` + Retweets int `json:"retweets"` + Image string `json:"image,omitempty"` + Created time.Time `json:"created,omitempty"` + Tags []string `json:"tags,omitempty"` + Location string `json:"location,omitempty"` + Suggest *SuggestField `json:"suggest_field,omitempty"` +} + +func (t tweet) String() string { + return fmt.Sprintf("tweet{User:%q,Message:%q,Retweets:%d}", t.User, t.Message, t.Retweets) +} + +type comment struct { + User string `json:"user"` + Comment string `json:"comment"` + Created time.Time `json:"created,omitempty"` +} + +func (c comment) String() string { + return fmt.Sprintf("comment{User:%q,Comment:%q}", c.User, c.Comment) +} + +type order struct { + Article string `json:"article"` + Manufacturer string `json:"manufacturer"` + Price float64 `json:"price"` + Time string `json:"time,omitempty"` +} + +func (o order) String() string { + return fmt.Sprintf("order{Article:%q,Manufacturer:%q,Price:%v,Time:%v}", o.Article, o.Manufacturer, o.Price, o.Time) +} + +func isTravis() bool { + return os.Getenv("TRAVIS") != "" +} + +func travisGoVersion() string { + return os.Getenv("TRAVIS_GO_VERSION") +} + +type logger interface { + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fail() + FailNow() + Log(args ...interface{}) + Logf(format string, args ...interface{}) +} + +func setupTestClient(t logger, options ...ClientOptionFunc) (client *Client) { + var err error + + client, err = NewClient(options...) + if err != nil { + t.Fatal(err) + } + + client.DeleteIndex(testIndexName).Do() + client.DeleteIndex(testIndexName2).Do() + + return client +} + +func setupTestClientAndCreateIndex(t logger, options ...ClientOptionFunc) *Client { + client := setupTestClient(t, options...) + + // Create index + createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do() + if err != nil { + t.Fatal(err) + } + if createIndex == nil { + t.Errorf("expected result to be != nil; got: %v", createIndex) + } + + // Create second index + createIndex2, err := client.CreateIndex(testIndexName2).Body(testMapping).Do() + if err != nil { + t.Fatal(err) + } + if createIndex2 == nil { + t.Errorf("expected result to be != nil; got: %v", createIndex2) + } + + return client +} + +func setupTestClientAndCreateIndexAndLog(t logger, options ...ClientOptionFunc) *Client { + return setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0))) +} + +func setupTestClientAndCreateIndexAndAddDocs(t logger, options ...ClientOptionFunc) *Client { + client := setupTestClientAndCreateIndex(t, options...) + + // Add tweets + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} + tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} + comment1 := comment{User: "nico", Comment: "You bet."} + + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").Routing("someroutingkey").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + _, err = client.Index().Index(testIndexName).Type("comment").Id("1").Parent("3").BodyJson(&comment1).Do() + if err != nil { + t.Fatal(err) + } + + // Add orders + var orders []order + orders = append(orders, order{Article: "Apple MacBook", Manufacturer: "Apple", Price: 1290, Time: "2015-01-18"}) + orders = append(orders, order{Article: "Paper", Manufacturer: "Canon", Price: 100, Time: "2015-03-01"}) + orders = append(orders, order{Article: "Apple iPad", Manufacturer: "Apple", Price: 499, Time: "2015-04-12"}) + orders = append(orders, order{Article: "Dell XPS 13", Manufacturer: "Dell", Price: 1600, Time: "2015-04-18"}) + orders = append(orders, order{Article: "Apple Watch", Manufacturer: "Apple", Price: 349, Time: "2015-04-29"}) + orders = append(orders, order{Article: "Samsung TV", Manufacturer: "Samsung", Price: 790, Time: "2015-05-03"}) + orders = append(orders, order{Article: "Hoodie", Manufacturer: "h&m", Price: 49, Time: "2015-06-03"}) + orders = append(orders, order{Article: "T-Shirt", Manufacturer: "h&m", Price: 19, Time: "2015-06-18"}) + for i, o := range orders { + id := fmt.Sprintf("%d", i) + _, err = client.Index().Index(testIndexName).Type("order").Id(id).BodyJson(&o).Do() + if err != nil { + t.Fatal(err) + } + } + + // Flush + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + return client +} + +var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +func randomString(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + return string(b) +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/sort.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/sort.go new file mode 100644 index 000000000..4c845c505 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/sort.go @@ -0,0 +1,480 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// -- Sorter -- + +// Sorter is an interface for sorting strategies, e.g. ScoreSort or FieldSort. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html. +type Sorter interface { + Source() (interface{}, error) +} + +// -- SortInfo -- + +// SortInfo contains information about sorting a field. +type SortInfo struct { + Sorter + Field string + Ascending bool + Missing interface{} + IgnoreUnmapped *bool + SortMode string + NestedFilter Query + NestedPath string +} + +func (info SortInfo) Source() (interface{}, error) { + prop := make(map[string]interface{}) + if info.Ascending { + prop["order"] = "asc" + } else { + prop["order"] = "desc" + } + if info.Missing != nil { + prop["missing"] = info.Missing + } + if info.IgnoreUnmapped != nil { + prop["ignore_unmapped"] = *info.IgnoreUnmapped + } + if info.SortMode != "" { + prop["sort_mode"] = info.SortMode + } + if info.NestedFilter != nil { + prop["nested_filter"] = info.NestedFilter + } + if info.NestedPath != "" { + prop["nested_path"] = info.NestedPath + } + source := make(map[string]interface{}) + source[info.Field] = prop + return source, nil +} + +// -- ScoreSort -- + +// ScoreSort sorts by relevancy score. +type ScoreSort struct { + Sorter + ascending bool +} + +// NewScoreSort creates a new ScoreSort. +func NewScoreSort() ScoreSort { + return ScoreSort{ascending: false} // Descending by default! +} + +// Order defines whether sorting ascending (default) or descending. +func (s ScoreSort) Order(ascending bool) ScoreSort { + s.ascending = ascending + return s +} + +// Asc sets ascending sort order. +func (s ScoreSort) Asc() ScoreSort { + s.ascending = true + return s +} + +// Desc sets descending sort order. +func (s ScoreSort) Desc() ScoreSort { + s.ascending = false + return s +} + +// Source returns the JSON-serializable data. +func (s ScoreSort) Source() (interface{}, error) { + source := make(map[string]interface{}) + x := make(map[string]interface{}) + source["_score"] = x + if s.ascending { + x["reverse"] = true + } + return source, nil +} + +// -- FieldSort -- + +// FieldSort sorts by a given field. +type FieldSort struct { + Sorter + fieldName string + ascending bool + missing interface{} + ignoreUnmapped *bool + unmappedType *string + sortMode *string + nestedFilter Query + nestedPath *string +} + +// NewFieldSort creates a new FieldSort. +func NewFieldSort(fieldName string) FieldSort { + return FieldSort{ + fieldName: fieldName, + ascending: true, + } +} + +// FieldName specifies the name of the field to be used for sorting. +func (s FieldSort) FieldName(fieldName string) FieldSort { + s.fieldName = fieldName + return s +} + +// Order defines whether sorting ascending (default) or descending. +func (s FieldSort) Order(ascending bool) FieldSort { + s.ascending = ascending + return s +} + +// Asc sets ascending sort order. +func (s FieldSort) Asc() FieldSort { + s.ascending = true + return s +} + +// Desc sets descending sort order. +func (s FieldSort) Desc() FieldSort { + s.ascending = false + return s +} + +// Missing sets the value to be used when a field is missing in a document. +// You can also use "_last" or "_first" to sort missing last or first +// respectively. +func (s FieldSort) Missing(missing interface{}) FieldSort { + s.missing = missing + return s +} + +// IgnoreUnmapped specifies what happens if the field does not exist in +// the index. Set it to true to ignore, or set it to false to not ignore (default). +func (s FieldSort) IgnoreUnmapped(ignoreUnmapped bool) FieldSort { + s.ignoreUnmapped = &ignoreUnmapped + return s +} + +// UnmappedType sets the type to use when the current field is not mapped +// in an index. +func (s FieldSort) UnmappedType(typ string) FieldSort { + s.unmappedType = &typ + return s +} + +// SortMode specifies what values to pick in case a document contains +// multiple values for the targeted sort field. Possible values are: +// min, max, sum, and avg. +func (s FieldSort) SortMode(sortMode string) FieldSort { + s.sortMode = &sortMode + return s +} + +// NestedFilter sets a filter that nested objects should match with +// in order to be taken into account for sorting. +func (s FieldSort) NestedFilter(nestedFilter Query) FieldSort { + s.nestedFilter = nestedFilter + return s +} + +// NestedPath is used if sorting occurs on a field that is inside a +// nested object. +func (s FieldSort) NestedPath(nestedPath string) FieldSort { + s.nestedPath = &nestedPath + return s +} + +// Source returns the JSON-serializable data. +func (s FieldSort) Source() (interface{}, error) { + source := make(map[string]interface{}) + x := make(map[string]interface{}) + source[s.fieldName] = x + if s.ascending { + x["order"] = "asc" + } else { + x["order"] = "desc" + } + if s.missing != nil { + x["missing"] = s.missing + } + if s.ignoreUnmapped != nil { + x["ignore_unmapped"] = *s.ignoreUnmapped + } + if s.unmappedType != nil { + x["unmapped_type"] = *s.unmappedType + } + if s.sortMode != nil { + x["mode"] = *s.sortMode + } + if s.nestedFilter != nil { + src, err := s.nestedFilter.Source() + if err != nil { + return nil, err + } + x["nested_filter"] = src + } + if s.nestedPath != nil { + x["nested_path"] = *s.nestedPath + } + return source, nil +} + +// -- GeoDistanceSort -- + +// GeoDistanceSort allows for sorting by geographic distance. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html#_geo_distance_sorting. +type GeoDistanceSort struct { + Sorter + fieldName string + points []*GeoPoint + geohashes []string + geoDistance *string + unit string + ascending bool + sortMode *string + nestedFilter Query + nestedPath *string +} + +// NewGeoDistanceSort creates a new sorter for geo distances. +func NewGeoDistanceSort(fieldName string) GeoDistanceSort { + return GeoDistanceSort{ + fieldName: fieldName, + points: make([]*GeoPoint, 0), + geohashes: make([]string, 0), + ascending: true, + } +} + +// FieldName specifies the name of the (geo) field to use for sorting. +func (s GeoDistanceSort) FieldName(fieldName string) GeoDistanceSort { + s.fieldName = fieldName + return s +} + +// Order defines whether sorting ascending (default) or descending. +func (s GeoDistanceSort) Order(ascending bool) GeoDistanceSort { + s.ascending = ascending + return s +} + +// Asc sets ascending sort order. +func (s GeoDistanceSort) Asc() GeoDistanceSort { + s.ascending = true + return s +} + +// Desc sets descending sort order. +func (s GeoDistanceSort) Desc() GeoDistanceSort { + s.ascending = false + return s +} + +// Point specifies a point to create the range distance aggregations from. +func (s GeoDistanceSort) Point(lat, lon float64) GeoDistanceSort { + s.points = append(s.points, GeoPointFromLatLon(lat, lon)) + return s +} + +// Points specifies the geo point(s) to create the range distance aggregations from. +func (s GeoDistanceSort) Points(points ...*GeoPoint) GeoDistanceSort { + s.points = append(s.points, points...) + return s +} + +// GeoHashes specifies the geo point to create the range distance aggregations from. +func (s GeoDistanceSort) GeoHashes(geohashes ...string) GeoDistanceSort { + s.geohashes = append(s.geohashes, geohashes...) + return s +} + +// GeoDistance represents how to compute the distance. +// It can be sloppy_arc (default), arc, or plane. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html#_geo_distance_sorting. +func (s GeoDistanceSort) GeoDistance(geoDistance string) GeoDistanceSort { + s.geoDistance = &geoDistance + return s +} + +// Unit specifies the distance unit to use. It defaults to km. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/common-options.html#distance-units +// for details. +func (s GeoDistanceSort) Unit(unit string) GeoDistanceSort { + s.unit = unit + return s +} + +// SortMode specifies what values to pick in case a document contains +// multiple values for the targeted sort field. Possible values are: +// min, max, sum, and avg. +func (s GeoDistanceSort) SortMode(sortMode string) GeoDistanceSort { + s.sortMode = &sortMode + return s +} + +// NestedFilter sets a filter that nested objects should match with +// in order to be taken into account for sorting. +func (s GeoDistanceSort) NestedFilter(nestedFilter Query) GeoDistanceSort { + s.nestedFilter = nestedFilter + return s +} + +// NestedPath is used if sorting occurs on a field that is inside a +// nested object. +func (s GeoDistanceSort) NestedPath(nestedPath string) GeoDistanceSort { + s.nestedPath = &nestedPath + return s +} + +// Source returns the JSON-serializable data. +func (s GeoDistanceSort) Source() (interface{}, error) { + source := make(map[string]interface{}) + x := make(map[string]interface{}) + source["_geo_distance"] = x + + // Points + ptarr := make([]interface{}, 0) + for _, pt := range s.points { + ptarr = append(ptarr, pt.Source()) + } + for _, geohash := range s.geohashes { + ptarr = append(ptarr, geohash) + } + x[s.fieldName] = ptarr + + if s.unit != "" { + x["unit"] = s.unit + } + if s.geoDistance != nil { + x["distance_type"] = *s.geoDistance + } + + if !s.ascending { + x["reverse"] = true + } + if s.sortMode != nil { + x["mode"] = *s.sortMode + } + if s.nestedFilter != nil { + src, err := s.nestedFilter.Source() + if err != nil { + return nil, err + } + x["nested_filter"] = src + } + if s.nestedPath != nil { + x["nested_path"] = *s.nestedPath + } + return source, nil +} + +// -- ScriptSort -- + +// ScriptSort sorts by a custom script. See +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/modules-scripting.html#modules-scripting +// for details about scripting. +type ScriptSort struct { + Sorter + script *Script + typ string + ascending bool + sortMode *string + nestedFilter Query + nestedPath *string +} + +// NewScriptSort creates and initializes a new ScriptSort. +// You must provide a script and a type, e.g. "string" or "number". +func NewScriptSort(script *Script, typ string) ScriptSort { + return ScriptSort{ + script: script, + typ: typ, + ascending: true, + } +} + +// Type sets the script type, which can be either "string" or "number". +func (s ScriptSort) Type(typ string) ScriptSort { + s.typ = typ + return s +} + +// Order defines whether sorting ascending (default) or descending. +func (s ScriptSort) Order(ascending bool) ScriptSort { + s.ascending = ascending + return s +} + +// Asc sets ascending sort order. +func (s ScriptSort) Asc() ScriptSort { + s.ascending = true + return s +} + +// Desc sets descending sort order. +func (s ScriptSort) Desc() ScriptSort { + s.ascending = false + return s +} + +// SortMode specifies what values to pick in case a document contains +// multiple values for the targeted sort field. Possible values are: +// min or max. +func (s ScriptSort) SortMode(sortMode string) ScriptSort { + s.sortMode = &sortMode + return s +} + +// NestedFilter sets a filter that nested objects should match with +// in order to be taken into account for sorting. +func (s ScriptSort) NestedFilter(nestedFilter Query) ScriptSort { + s.nestedFilter = nestedFilter + return s +} + +// NestedPath is used if sorting occurs on a field that is inside a +// nested object. +func (s ScriptSort) NestedPath(nestedPath string) ScriptSort { + s.nestedPath = &nestedPath + return s +} + +// Source returns the JSON-serializable data. +func (s ScriptSort) Source() (interface{}, error) { + if s.script == nil { + return nil, errors.New("ScriptSort expected a script") + } + source := make(map[string]interface{}) + x := make(map[string]interface{}) + source["_script"] = x + + src, err := s.script.Source() + if err != nil { + return nil, err + } + x["script"] = src + + x["type"] = s.typ + + if !s.ascending { + x["reverse"] = true + } + if s.sortMode != nil { + x["mode"] = *s.sortMode + } + if s.nestedFilter != nil { + src, err := s.nestedFilter.Source() + if err != nil { + return nil, err + } + x["nested_filter"] = src + } + if s.nestedPath != nil { + x["nested_path"] = *s.nestedPath + } + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/sort_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/sort_test.go new file mode 100644 index 000000000..a0f9ddfc8 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/sort_test.go @@ -0,0 +1,214 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSortInfo(t *testing.T) { + builder := SortInfo{Field: "grade", Ascending: false} + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"grade":{"order":"desc"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestScoreSort(t *testing.T) { + builder := NewScoreSort() + if builder.ascending != false { + t.Error("expected score sorter to be ascending by default") + } + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"_score":{}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestScoreSortOrderAscending(t *testing.T) { + builder := NewScoreSort().Asc() + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"_score":{"reverse":true}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestScoreSortOrderDescending(t *testing.T) { + builder := NewScoreSort().Desc() + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"_score":{}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFieldSort(t *testing.T) { + builder := NewFieldSort("grade") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"grade":{"order":"asc"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFieldSortOrderDesc(t *testing.T) { + builder := NewFieldSort("grade").Desc() + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"grade":{"order":"desc"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFieldSortComplex(t *testing.T) { + builder := NewFieldSort("price").Desc(). + SortMode("avg"). + Missing("_last"). + UnmappedType("product"). + NestedFilter(NewTermQuery("product.color", "blue")). + NestedPath("variant") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"price":{"missing":"_last","mode":"avg","nested_filter":{"term":{"product.color":"blue"}},"nested_path":"variant","order":"desc","unmapped_type":"product"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoDistanceSort(t *testing.T) { + builder := NewGeoDistanceSort("pin.location"). + Point(-70, 40). + Order(true). + Unit("km"). + SortMode("min"). + GeoDistance("sloppy_arc") + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"_geo_distance":{"distance_type":"sloppy_arc","mode":"min","pin.location":[{"lat":-70,"lon":40}],"unit":"km"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestGeoDistanceSortOrderDesc(t *testing.T) { + builder := NewGeoDistanceSort("pin.location"). + Point(-70, 40). + Unit("km"). + SortMode("min"). + GeoDistance("sloppy_arc"). + Desc() + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"_geo_distance":{"distance_type":"sloppy_arc","mode":"min","pin.location":[{"lat":-70,"lon":40}],"reverse":true,"unit":"km"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} +func TestScriptSort(t *testing.T) { + builder := NewScriptSort(NewScript("doc['field_name'].value * factor").Param("factor", 1.1), "number").Order(true) + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"_script":{"script":{"inline":"doc['field_name'].value * factor","params":{"factor":1.1}},"type":"number"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestScriptSortOrderDesc(t *testing.T) { + builder := NewScriptSort(NewScript("doc['field_name'].value * factor").Param("factor", 1.1), "number").Desc() + src, err := builder.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"_script":{"reverse":true,"script":{"inline":"doc['field_name'].value * factor","params":{"factor":1.1}},"type":"number"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggest.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggest.go new file mode 100644 index 000000000..1fb48ac0b --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggest.go @@ -0,0 +1,143 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// SuggestService returns suggestions for text. +type SuggestService struct { + client *Client + pretty bool + routing string + preference string + indices []string + suggesters []Suggester +} + +func NewSuggestService(client *Client) *SuggestService { + builder := &SuggestService{ + client: client, + indices: make([]string, 0), + suggesters: make([]Suggester, 0), + } + return builder +} + +func (s *SuggestService) Index(indices ...string) *SuggestService { + s.indices = append(s.indices, indices...) + return s +} + +func (s *SuggestService) Pretty(pretty bool) *SuggestService { + s.pretty = pretty + return s +} + +func (s *SuggestService) Routing(routing string) *SuggestService { + s.routing = routing + return s +} + +func (s *SuggestService) Preference(preference string) *SuggestService { + s.preference = preference + return s +} + +func (s *SuggestService) Suggester(suggester Suggester) *SuggestService { + s.suggesters = append(s.suggesters, suggester) + return s +} + +func (s *SuggestService) Do() (SuggestResult, error) { + // Build url + path := "/" + + // Indices part + indexPart := make([]string, 0) + for _, index := range s.indices { + index, err := uritemplates.Expand("{index}", map[string]string{ + "index": index, + }) + if err != nil { + return nil, err + } + indexPart = append(indexPart, index) + } + path += strings.Join(indexPart, ",") + + // Suggest + path += "/_suggest" + + // Parameters + params := make(url.Values) + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + + // Set body + body := make(map[string]interface{}) + for _, s := range s.suggesters { + src, err := s.Source(false) + if err != nil { + return nil, err + } + body[s.Name()] = src + } + + // Get response + res, err := s.client.PerformRequest("POST", path, params, body) + if err != nil { + return nil, err + } + + // There is a _shard object that cannot be deserialized. + // So we use json.RawMessage instead. + var suggestions map[string]*json.RawMessage + if err := json.Unmarshal(res.Body, &suggestions); err != nil { + return nil, err + } + + ret := make(SuggestResult) + for name, result := range suggestions { + if name != "_shards" { + var s []Suggestion + if err := json.Unmarshal(*result, &s); err != nil { + return nil, err + } + ret[name] = s + } + } + + return ret, nil +} + +type SuggestResult map[string][]Suggestion + +type Suggestion struct { + Text string `json:"text"` + Offset int `json:"offset"` + Length int `json:"length"` + Options []suggestionOption `json:"options"` +} + +type suggestionOption struct { + Text string `json:"text"` + Score float64 `json:"score"` + Freq int `json:"freq"` + Payload interface{} `json:"payload"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggest_field.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggest_field.go new file mode 100644 index 000000000..4738d9910 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggest_field.go @@ -0,0 +1,100 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" +) + +// SuggestField can be used by the caller to specify a suggest field +// at index time. For a detailed example, see e.g. +// http://www.elasticsearch.org/blog/you-complete-me/. +type SuggestField struct { + inputs []string + output *string + payload interface{} + weight int + contextQueries []SuggesterContextQuery +} + +func NewSuggestField() *SuggestField { + return &SuggestField{weight: -1} +} + +func (f *SuggestField) Input(input ...string) *SuggestField { + if f.inputs == nil { + f.inputs = make([]string, 0) + } + f.inputs = append(f.inputs, input...) + return f +} + +func (f *SuggestField) Output(output string) *SuggestField { + f.output = &output + return f +} + +func (f *SuggestField) Payload(payload interface{}) *SuggestField { + f.payload = payload + return f +} + +func (f *SuggestField) Weight(weight int) *SuggestField { + f.weight = weight + return f +} + +func (f *SuggestField) ContextQuery(queries ...SuggesterContextQuery) *SuggestField { + f.contextQueries = append(f.contextQueries, queries...) + return f +} + +// MarshalJSON encodes SuggestField into JSON. +func (f *SuggestField) MarshalJSON() ([]byte, error) { + source := make(map[string]interface{}) + + if f.inputs != nil { + switch len(f.inputs) { + case 1: + source["input"] = f.inputs[0] + default: + source["input"] = f.inputs + } + } + + if f.output != nil { + source["output"] = *f.output + } + + if f.payload != nil { + source["payload"] = f.payload + } + + if f.weight >= 0 { + source["weight"] = f.weight + } + + switch len(f.contextQueries) { + case 0: + case 1: + src, err := f.contextQueries[0].Source() + if err != nil { + return nil, err + } + source["context"] = src + default: + var ctxq []interface{} + for _, query := range f.contextQueries { + src, err := query.Source() + if err != nil { + return nil, err + } + ctxq = append(ctxq, src) + } + source["context"] = ctxq + } + + return json.Marshal(source) +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggest_field_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggest_field_test.go new file mode 100644 index 000000000..b01cf0af0 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggest_field_test.go @@ -0,0 +1,30 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSuggestField(t *testing.T) { + field := NewSuggestField(). + Input("Welcome to Golang and Elasticsearch.", "Golang and Elasticsearch"). + Output("Golang and Elasticsearch: An introduction."). + Weight(1). + ContextQuery( + NewSuggesterCategoryMapping("color").FieldName("color_field").DefaultValues("red", "green", "blue"), + NewSuggesterGeoMapping("location").Precision("5m").Neighbors(true).DefaultLocations(GeoPointFromLatLon(52.516275, 13.377704)), + ) + data, err := json.Marshal(field) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"context":[{"color":{"default":["red","green","blue"],"path":"color_field","type":"category"}},{"location":{"default":{"lat":52.516275,"lon":13.377704},"neighbors":true,"precision":["5m"],"type":"geo"}}],"input":["Welcome to Golang and Elasticsearch.","Golang and Elasticsearch"],"output":"Golang and Elasticsearch: An introduction.","weight":1}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggest_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggest_test.go new file mode 100644 index 000000000..50a4a0952 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggest_test.go @@ -0,0 +1,131 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + _ "net/http" + "testing" +) + +func TestSuggestService(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{ + User: "olivere", + Message: "Welcome to Golang and Elasticsearch.", + Tags: []string{"golang", "elasticsearch"}, + Location: "48.1333,11.5667", // lat,lon + Suggest: NewSuggestField(). + Input("Welcome to Golang and Elasticsearch.", "Golang and Elasticsearch"). + Output("Golang and Elasticsearch: An introduction."). + Weight(0), + } + tweet2 := tweet{ + User: "olivere", + Message: "Another unrelated topic.", + Tags: []string{"golang"}, + Location: "48.1189,11.4289", // lat,lon + Suggest: NewSuggestField(). + Input("Another unrelated topic.", "Golang topic."). + Output("About Golang."). + Weight(1), + } + tweet3 := tweet{ + User: "sandrae", + Message: "Cycling is fun.", + Tags: []string{"sports", "cycling"}, + Location: "47.7167,11.7167", // lat,lon + Suggest: NewSuggestField(). + Input("Cycling is fun."). + Output("Cycling is a fun sport."), + } + + // Add all documents + _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() + if err != nil { + t.Fatal(err) + } + + _, err = client.Flush().Index(testIndexName).Do() + if err != nil { + t.Fatal(err) + } + + // Test _suggest endpoint + termSuggesterName := "my-term-suggester" + termSuggester := NewTermSuggester(termSuggesterName).Text("Goolang").Field("message") + phraseSuggesterName := "my-phrase-suggester" + phraseSuggester := NewPhraseSuggester(phraseSuggesterName).Text("Goolang").Field("message") + completionSuggesterName := "my-completion-suggester" + completionSuggester := NewCompletionSuggester(completionSuggesterName).Text("Go").Field("suggest_field") + + result, err := client.Suggest(). + Index(testIndexName). + Suggester(termSuggester). + Suggester(phraseSuggester). + Suggester(completionSuggester). + Do() + if err != nil { + t.Fatal(err) + } + if result == nil { + t.Errorf("expected result != nil; got nil") + } + if len(result) != 3 { + t.Errorf("expected 3 suggester results; got %d", len(result)) + } + + termSuggestions, found := result[termSuggesterName] + if !found { + t.Errorf("expected to find Suggest[%s]; got false", termSuggesterName) + } + if termSuggestions == nil { + t.Errorf("expected Suggest[%s] != nil; got nil", termSuggesterName) + } + if len(termSuggestions) != 1 { + t.Errorf("expected 1 suggestion; got %d", len(termSuggestions)) + } + + phraseSuggestions, found := result[phraseSuggesterName] + if !found { + t.Errorf("expected to find Suggest[%s]; got false", phraseSuggesterName) + } + if phraseSuggestions == nil { + t.Errorf("expected Suggest[%s] != nil; got nil", phraseSuggesterName) + } + if len(phraseSuggestions) != 1 { + t.Errorf("expected 1 suggestion; got %d", len(phraseSuggestions)) + } + + completionSuggestions, found := result[completionSuggesterName] + if !found { + t.Errorf("expected to find Suggest[%s]; got false", completionSuggesterName) + } + if completionSuggestions == nil { + t.Errorf("expected Suggest[%s] != nil; got nil", completionSuggesterName) + } + if len(completionSuggestions) != 1 { + t.Errorf("expected 1 suggestion; got %d", len(completionSuggestions)) + } + if len(completionSuggestions[0].Options) != 2 { + t.Errorf("expected 2 suggestion options; got %d", len(completionSuggestions[0].Options)) + } + if completionSuggestions[0].Options[0].Text != "About Golang." { + t.Errorf("expected Suggest[%s][0].Options[0].Text == %q; got %q", completionSuggesterName, "About Golang.", completionSuggestions[0].Options[0].Text) + } + if completionSuggestions[0].Options[1].Text != "Golang and Elasticsearch: An introduction." { + t.Errorf("expected Suggest[%s][0].Options[1].Text == %q; got %q", completionSuggesterName, "Golang and Elasticsearch: An introduction.", completionSuggestions[0].Options[1].Text) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester.go new file mode 100644 index 000000000..c342b10d3 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester.go @@ -0,0 +1,15 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// Represents the generic suggester interface. +// A suggester's only purpose is to return the +// source of the query as a JSON-serializable +// object. Returning a map[string]interface{} +// will do. +type Suggester interface { + Name() string + Source(includeName bool) (interface{}, error) +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_completion.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_completion.go new file mode 100644 index 000000000..e0f5a3861 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_completion.go @@ -0,0 +1,129 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// CompletionSuggester is a fast suggester for e.g. type-ahead completion. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-completion.html +// for more details. +type CompletionSuggester struct { + Suggester + name string + text string + field string + analyzer string + size *int + shardSize *int + contextQueries []SuggesterContextQuery +} + +// Creates a new completion suggester. +func NewCompletionSuggester(name string) *CompletionSuggester { + return &CompletionSuggester{ + name: name, + contextQueries: make([]SuggesterContextQuery, 0), + } +} + +func (q *CompletionSuggester) Name() string { + return q.name +} + +func (q *CompletionSuggester) Text(text string) *CompletionSuggester { + q.text = text + return q +} + +func (q *CompletionSuggester) Field(field string) *CompletionSuggester { + q.field = field + return q +} + +func (q *CompletionSuggester) Analyzer(analyzer string) *CompletionSuggester { + q.analyzer = analyzer + return q +} + +func (q *CompletionSuggester) Size(size int) *CompletionSuggester { + q.size = &size + return q +} + +func (q *CompletionSuggester) ShardSize(shardSize int) *CompletionSuggester { + q.shardSize = &shardSize + return q +} + +func (q *CompletionSuggester) ContextQuery(query SuggesterContextQuery) *CompletionSuggester { + q.contextQueries = append(q.contextQueries, query) + return q +} + +func (q *CompletionSuggester) ContextQueries(queries ...SuggesterContextQuery) *CompletionSuggester { + q.contextQueries = append(q.contextQueries, queries...) + return q +} + +// completionSuggesterRequest is necessary because the order in which +// the JSON elements are routed to Elasticsearch is relevant. +// We got into trouble when using plain maps because the text element +// needs to go before the completion element. +type completionSuggesterRequest struct { + Text string `json:"text"` + Completion interface{} `json:"completion"` +} + +// Creates the source for the completion suggester. +func (q *CompletionSuggester) Source(includeName bool) (interface{}, error) { + cs := &completionSuggesterRequest{} + + if q.text != "" { + cs.Text = q.text + } + + suggester := make(map[string]interface{}) + cs.Completion = suggester + + if q.analyzer != "" { + suggester["analyzer"] = q.analyzer + } + if q.field != "" { + suggester["field"] = q.field + } + if q.size != nil { + suggester["size"] = *q.size + } + if q.shardSize != nil { + suggester["shard_size"] = *q.shardSize + } + switch len(q.contextQueries) { + case 0: + case 1: + src, err := q.contextQueries[0].Source() + if err != nil { + return nil, err + } + suggester["context"] = src + default: + ctxq := make([]interface{}, 0) + for _, query := range q.contextQueries { + src, err := query.Source() + if err != nil { + return nil, err + } + ctxq = append(ctxq, src) + } + suggester["context"] = ctxq + } + + // TODO(oe) Add completion-suggester specific parameters here + + if !includeName { + return cs, nil + } + + source := make(map[string]interface{}) + source[q.name] = cs + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_completion_fuzzy.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_completion_fuzzy.go new file mode 100644 index 000000000..1c4455a61 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_completion_fuzzy.go @@ -0,0 +1,179 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// FuzzyFuzzyCompletionSuggester is a FuzzyCompletionSuggester that allows fuzzy +// completion. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-completion.html +// for details, and +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-completion.html#fuzzy +// for details about the fuzzy completion suggester. +type FuzzyCompletionSuggester struct { + Suggester + name string + text string + field string + analyzer string + size *int + shardSize *int + contextQueries []SuggesterContextQuery + + fuzziness interface{} + fuzzyTranspositions *bool + fuzzyMinLength *int + fuzzyPrefixLength *int + unicodeAware *bool +} + +// Fuzziness defines the fuzziness which is used in FuzzyCompletionSuggester. +type Fuzziness struct { +} + +// Creates a new completion suggester. +func NewFuzzyCompletionSuggester(name string) *FuzzyCompletionSuggester { + return &FuzzyCompletionSuggester{ + name: name, + contextQueries: make([]SuggesterContextQuery, 0), + } +} + +func (q *FuzzyCompletionSuggester) Name() string { + return q.name +} + +func (q *FuzzyCompletionSuggester) Text(text string) *FuzzyCompletionSuggester { + q.text = text + return q +} + +func (q *FuzzyCompletionSuggester) Field(field string) *FuzzyCompletionSuggester { + q.field = field + return q +} + +func (q *FuzzyCompletionSuggester) Analyzer(analyzer string) *FuzzyCompletionSuggester { + q.analyzer = analyzer + return q +} + +func (q *FuzzyCompletionSuggester) Size(size int) *FuzzyCompletionSuggester { + q.size = &size + return q +} + +func (q *FuzzyCompletionSuggester) ShardSize(shardSize int) *FuzzyCompletionSuggester { + q.shardSize = &shardSize + return q +} + +func (q *FuzzyCompletionSuggester) ContextQuery(query SuggesterContextQuery) *FuzzyCompletionSuggester { + q.contextQueries = append(q.contextQueries, query) + return q +} + +func (q *FuzzyCompletionSuggester) ContextQueries(queries ...SuggesterContextQuery) *FuzzyCompletionSuggester { + q.contextQueries = append(q.contextQueries, queries...) + return q +} + +// Fuzziness defines the strategy used to describe what "fuzzy" actually +// means for the suggester, e.g. 1, 2, "0", "1..2", ">4", or "AUTO". +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/common-options.html#fuzziness +// for a detailed description. +func (q *FuzzyCompletionSuggester) Fuzziness(fuzziness interface{}) *FuzzyCompletionSuggester { + q.fuzziness = fuzziness + return q +} + +func (q *FuzzyCompletionSuggester) FuzzyTranspositions(fuzzyTranspositions bool) *FuzzyCompletionSuggester { + q.fuzzyTranspositions = &fuzzyTranspositions + return q +} + +func (q *FuzzyCompletionSuggester) FuzzyMinLength(minLength int) *FuzzyCompletionSuggester { + q.fuzzyMinLength = &minLength + return q +} + +func (q *FuzzyCompletionSuggester) FuzzyPrefixLength(prefixLength int) *FuzzyCompletionSuggester { + q.fuzzyPrefixLength = &prefixLength + return q +} + +func (q *FuzzyCompletionSuggester) UnicodeAware(unicodeAware bool) *FuzzyCompletionSuggester { + q.unicodeAware = &unicodeAware + return q +} + +// Creates the source for the completion suggester. +func (q *FuzzyCompletionSuggester) Source(includeName bool) (interface{}, error) { + cs := &completionSuggesterRequest{} + + if q.text != "" { + cs.Text = q.text + } + + suggester := make(map[string]interface{}) + cs.Completion = suggester + + if q.analyzer != "" { + suggester["analyzer"] = q.analyzer + } + if q.field != "" { + suggester["field"] = q.field + } + if q.size != nil { + suggester["size"] = *q.size + } + if q.shardSize != nil { + suggester["shard_size"] = *q.shardSize + } + switch len(q.contextQueries) { + case 0: + case 1: + src, err := q.contextQueries[0].Source() + if err != nil { + return nil, err + } + suggester["context"] = src + default: + ctxq := make([]interface{}, 0) + for _, query := range q.contextQueries { + src, err := query.Source() + if err != nil { + return nil, err + } + ctxq = append(ctxq, src) + } + suggester["context"] = ctxq + } + + // Fuzzy Completion Suggester fields + fuzzy := make(map[string]interface{}) + suggester["fuzzy"] = fuzzy + if q.fuzziness != nil { + fuzzy["fuzziness"] = q.fuzziness + } + if q.fuzzyTranspositions != nil { + fuzzy["transpositions"] = *q.fuzzyTranspositions + } + if q.fuzzyMinLength != nil { + fuzzy["min_length"] = *q.fuzzyMinLength + } + if q.fuzzyPrefixLength != nil { + fuzzy["prefix_length"] = *q.fuzzyPrefixLength + } + if q.unicodeAware != nil { + fuzzy["unicode_aware"] = *q.unicodeAware + } + + if !includeName { + return cs, nil + } + + source := make(map[string]interface{}) + source[q.name] = cs + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_completion_fuzzy_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_completion_fuzzy_test.go new file mode 100644 index 000000000..29fcba55f --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_completion_fuzzy_test.go @@ -0,0 +1,50 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestFuzzyCompletionSuggesterSource(t *testing.T) { + s := NewFuzzyCompletionSuggester("song-suggest"). + Text("n"). + Field("suggest"). + Fuzziness(2) + src, err := s.Source(true) + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"song-suggest":{"text":"n","completion":{"field":"suggest","fuzzy":{"fuzziness":2}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestFuzzyCompletionSuggesterWithStringFuzzinessSource(t *testing.T) { + s := NewFuzzyCompletionSuggester("song-suggest"). + Text("n"). + Field("suggest"). + Fuzziness("1..4") + src, err := s.Source(true) + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"song-suggest":{"text":"n","completion":{"field":"suggest","fuzzy":{"fuzziness":"1..4"}}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_completion_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_completion_test.go new file mode 100644 index 000000000..986d3da01 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_completion_test.go @@ -0,0 +1,29 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestCompletionSuggesterSource(t *testing.T) { + s := NewCompletionSuggester("song-suggest"). + Text("n"). + Field("suggest") + src, err := s.Source(true) + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"song-suggest":{"text":"n","completion":{"field":"suggest"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_context.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_context.go new file mode 100644 index 000000000..0903f2171 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_context.go @@ -0,0 +1,11 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SuggesterContextQuery is used to define context information within +// a suggestion request. +type SuggesterContextQuery interface { + Source() (interface{}, error) +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_context_category.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_context_category.go new file mode 100644 index 000000000..4b8e43f88 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_context_category.go @@ -0,0 +1,99 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// -- SuggesterCategoryMapping -- + +// SuggesterCategoryMapping provides a mapping for a category context in a suggester. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_category_mapping. +type SuggesterCategoryMapping struct { + name string + fieldName string + defaultValues []string +} + +// NewSuggesterCategoryMapping creates a new SuggesterCategoryMapping. +func NewSuggesterCategoryMapping(name string) *SuggesterCategoryMapping { + return &SuggesterCategoryMapping{ + name: name, + defaultValues: make([]string, 0), + } +} + +func (q *SuggesterCategoryMapping) DefaultValues(values ...string) *SuggesterCategoryMapping { + q.defaultValues = append(q.defaultValues, values...) + return q +} + +func (q *SuggesterCategoryMapping) FieldName(fieldName string) *SuggesterCategoryMapping { + q.fieldName = fieldName + return q +} + +// Source returns a map that will be used to serialize the context query as JSON. +func (q *SuggesterCategoryMapping) Source() (interface{}, error) { + source := make(map[string]interface{}) + + x := make(map[string]interface{}) + source[q.name] = x + + x["type"] = "category" + + switch len(q.defaultValues) { + case 0: + x["default"] = q.defaultValues + case 1: + x["default"] = q.defaultValues[0] + default: + x["default"] = q.defaultValues + } + + if q.fieldName != "" { + x["path"] = q.fieldName + } + return source, nil +} + +// -- SuggesterCategoryQuery -- + +// SuggesterCategoryQuery provides querying a category context in a suggester. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_category_query. +type SuggesterCategoryQuery struct { + name string + values []string +} + +// NewSuggesterCategoryQuery creates a new SuggesterCategoryQuery. +func NewSuggesterCategoryQuery(name string, values ...string) *SuggesterCategoryQuery { + q := &SuggesterCategoryQuery{ + name: name, + values: make([]string, 0), + } + if len(values) > 0 { + q.values = append(q.values, values...) + } + return q +} + +func (q *SuggesterCategoryQuery) Values(values ...string) *SuggesterCategoryQuery { + q.values = append(q.values, values...) + return q +} + +// Source returns a map that will be used to serialize the context query as JSON. +func (q *SuggesterCategoryQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + + switch len(q.values) { + case 0: + source[q.name] = q.values + case 1: + source[q.name] = q.values[0] + default: + source[q.name] = q.values + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_context_category_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_context_category_test.go new file mode 100644 index 000000000..7ca045801 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_context_category_test.go @@ -0,0 +1,97 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSuggesterCategoryMapping(t *testing.T) { + q := NewSuggesterCategoryMapping("color").DefaultValues("red") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"color":{"default":"red","type":"category"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSuggesterCategoryMappingWithTwoDefaultValues(t *testing.T) { + q := NewSuggesterCategoryMapping("color").DefaultValues("red", "orange") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"color":{"default":["red","orange"],"type":"category"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSuggesterCategoryMappingWithFieldName(t *testing.T) { + q := NewSuggesterCategoryMapping("color"). + DefaultValues("red", "orange"). + FieldName("color_field") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"color":{"default":["red","orange"],"path":"color_field","type":"category"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSuggesterCategoryQuery(t *testing.T) { + q := NewSuggesterCategoryQuery("color", "red") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"color":"red"}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSuggesterCategoryQueryWithTwoValues(t *testing.T) { + q := NewSuggesterCategoryQuery("color", "red", "yellow") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"color":["red","yellow"]}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_context_geo.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_context_geo.go new file mode 100644 index 000000000..bde1a4067 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_context_geo.go @@ -0,0 +1,132 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// -- SuggesterGeoMapping -- + +// SuggesterGeoMapping provides a mapping for a geolocation context in a suggester. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_geo_location_mapping. +type SuggesterGeoMapping struct { + name string + defaultLocations []*GeoPoint + precision []string + neighbors *bool + fieldName string +} + +// NewSuggesterGeoMapping creates a new SuggesterGeoMapping. +func NewSuggesterGeoMapping(name string) *SuggesterGeoMapping { + return &SuggesterGeoMapping{ + name: name, + defaultLocations: make([]*GeoPoint, 0), + precision: make([]string, 0), + } +} + +func (q *SuggesterGeoMapping) DefaultLocations(locations ...*GeoPoint) *SuggesterGeoMapping { + q.defaultLocations = append(q.defaultLocations, locations...) + return q +} + +func (q *SuggesterGeoMapping) Precision(precision ...string) *SuggesterGeoMapping { + q.precision = append(q.precision, precision...) + return q +} + +func (q *SuggesterGeoMapping) Neighbors(neighbors bool) *SuggesterGeoMapping { + q.neighbors = &neighbors + return q +} + +func (q *SuggesterGeoMapping) FieldName(fieldName string) *SuggesterGeoMapping { + q.fieldName = fieldName + return q +} + +// Source returns a map that will be used to serialize the context query as JSON. +func (q *SuggesterGeoMapping) Source() (interface{}, error) { + source := make(map[string]interface{}) + + x := make(map[string]interface{}) + source[q.name] = x + + x["type"] = "geo" + + if len(q.precision) > 0 { + x["precision"] = q.precision + } + if q.neighbors != nil { + x["neighbors"] = *q.neighbors + } + + switch len(q.defaultLocations) { + case 0: + case 1: + x["default"] = q.defaultLocations[0].Source() + default: + arr := make([]interface{}, 0) + for _, p := range q.defaultLocations { + arr = append(arr, p.Source()) + } + x["default"] = arr + } + + if q.fieldName != "" { + x["path"] = q.fieldName + } + return source, nil +} + +// -- SuggesterGeoQuery -- + +// SuggesterGeoQuery provides querying a geolocation context in a suggester. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_geo_location_query +type SuggesterGeoQuery struct { + name string + location *GeoPoint + precision []string +} + +// NewSuggesterGeoQuery creates a new SuggesterGeoQuery. +func NewSuggesterGeoQuery(name string, location *GeoPoint) *SuggesterGeoQuery { + return &SuggesterGeoQuery{ + name: name, + location: location, + precision: make([]string, 0), + } +} + +func (q *SuggesterGeoQuery) Precision(precision ...string) *SuggesterGeoQuery { + q.precision = append(q.precision, precision...) + return q +} + +// Source returns a map that will be used to serialize the context query as JSON. +func (q *SuggesterGeoQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + + if len(q.precision) == 0 { + if q.location != nil { + source[q.name] = q.location.Source() + } + } else { + x := make(map[string]interface{}) + source[q.name] = x + + if q.location != nil { + x["value"] = q.location.Source() + } + + switch len(q.precision) { + case 0: + case 1: + x["precision"] = q.precision[0] + default: + x["precision"] = q.precision + } + } + + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_context_geo_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_context_geo_test.go new file mode 100644 index 000000000..331276dab --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_context_geo_test.go @@ -0,0 +1,48 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestSuggesterGeoMapping(t *testing.T) { + q := NewSuggesterGeoMapping("location"). + Precision("1km", "5m"). + Neighbors(true). + FieldName("pin"). + DefaultLocations(GeoPointFromLatLon(0.0, 0.0)) + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"location":{"default":{"lat":0,"lon":0},"neighbors":true,"path":"pin","precision":["1km","5m"],"type":"geo"}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestSuggesterGeoQuery(t *testing.T) { + q := NewSuggesterGeoQuery("location", GeoPointFromLatLon(11.5, 62.71)).Precision("1km") + src, err := q.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"location":{"precision":"1km","value":{"lat":11.5,"lon":62.71}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_phrase.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_phrase.go new file mode 100644 index 000000000..60c48d88b --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_phrase.go @@ -0,0 +1,554 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// For more details, see +// http://www.elasticsearch.org/guide/reference/api/search/phrase-suggest/ +type PhraseSuggester struct { + Suggester + name string + text string + field string + analyzer string + size *int + shardSize *int + contextQueries []SuggesterContextQuery + + // fields specific to a phrase suggester + maxErrors *float64 + separator *string + realWordErrorLikelihood *float64 + confidence *float64 + generators map[string][]CandidateGenerator + gramSize *int + smoothingModel SmoothingModel + forceUnigrams *bool + tokenLimit *int + preTag, postTag *string + collateQuery *string + collateFilter *string + collatePreference *string + collateParams map[string]interface{} + collatePrune *bool +} + +// Creates a new phrase suggester. +func NewPhraseSuggester(name string) *PhraseSuggester { + return &PhraseSuggester{ + name: name, + contextQueries: make([]SuggesterContextQuery, 0), + collateParams: make(map[string]interface{}), + } +} + +func (q *PhraseSuggester) Name() string { + return q.name +} + +func (q *PhraseSuggester) Text(text string) *PhraseSuggester { + q.text = text + return q +} + +func (q *PhraseSuggester) Field(field string) *PhraseSuggester { + q.field = field + return q +} + +func (q *PhraseSuggester) Analyzer(analyzer string) *PhraseSuggester { + q.analyzer = analyzer + return q +} + +func (q *PhraseSuggester) Size(size int) *PhraseSuggester { + q.size = &size + return q +} + +func (q *PhraseSuggester) ShardSize(shardSize int) *PhraseSuggester { + q.shardSize = &shardSize + return q +} + +func (q *PhraseSuggester) ContextQuery(query SuggesterContextQuery) *PhraseSuggester { + q.contextQueries = append(q.contextQueries, query) + return q +} + +func (q *PhraseSuggester) ContextQueries(queries ...SuggesterContextQuery) *PhraseSuggester { + q.contextQueries = append(q.contextQueries, queries...) + return q +} + +func (q *PhraseSuggester) GramSize(gramSize int) *PhraseSuggester { + if gramSize >= 1 { + q.gramSize = &gramSize + } + return q +} + +func (q *PhraseSuggester) MaxErrors(maxErrors float64) *PhraseSuggester { + q.maxErrors = &maxErrors + return q +} + +func (q *PhraseSuggester) Separator(separator string) *PhraseSuggester { + q.separator = &separator + return q +} + +func (q *PhraseSuggester) RealWordErrorLikelihood(realWordErrorLikelihood float64) *PhraseSuggester { + q.realWordErrorLikelihood = &realWordErrorLikelihood + return q +} + +func (q *PhraseSuggester) Confidence(confidence float64) *PhraseSuggester { + q.confidence = &confidence + return q +} + +func (q *PhraseSuggester) CandidateGenerator(generator CandidateGenerator) *PhraseSuggester { + if q.generators == nil { + q.generators = make(map[string][]CandidateGenerator) + } + typ := generator.Type() + if _, found := q.generators[typ]; !found { + q.generators[typ] = make([]CandidateGenerator, 0) + } + q.generators[typ] = append(q.generators[typ], generator) + return q +} + +func (q *PhraseSuggester) CandidateGenerators(generators ...CandidateGenerator) *PhraseSuggester { + for _, g := range generators { + q = q.CandidateGenerator(g) + } + return q +} + +func (q *PhraseSuggester) ClearCandidateGenerator() *PhraseSuggester { + q.generators = nil + return q +} + +func (q *PhraseSuggester) ForceUnigrams(forceUnigrams bool) *PhraseSuggester { + q.forceUnigrams = &forceUnigrams + return q +} + +func (q *PhraseSuggester) SmoothingModel(smoothingModel SmoothingModel) *PhraseSuggester { + q.smoothingModel = smoothingModel + return q +} + +func (q *PhraseSuggester) TokenLimit(tokenLimit int) *PhraseSuggester { + q.tokenLimit = &tokenLimit + return q +} + +func (q *PhraseSuggester) Highlight(preTag, postTag string) *PhraseSuggester { + q.preTag = &preTag + q.postTag = &postTag + return q +} + +func (q *PhraseSuggester) CollateQuery(collateQuery string) *PhraseSuggester { + q.collateQuery = &collateQuery + return q +} + +func (q *PhraseSuggester) CollateFilter(collateFilter string) *PhraseSuggester { + q.collateFilter = &collateFilter + return q +} + +func (q *PhraseSuggester) CollatePreference(collatePreference string) *PhraseSuggester { + q.collatePreference = &collatePreference + return q +} + +func (q *PhraseSuggester) CollateParams(collateParams map[string]interface{}) *PhraseSuggester { + q.collateParams = collateParams + return q +} + +func (q *PhraseSuggester) CollatePrune(collatePrune bool) *PhraseSuggester { + q.collatePrune = &collatePrune + return q +} + +// simplePhraseSuggesterRequest is necessary because the order in which +// the JSON elements are routed to Elasticsearch is relevant. +// We got into trouble when using plain maps because the text element +// needs to go before the simple_phrase element. +type phraseSuggesterRequest struct { + Text string `json:"text"` + Phrase interface{} `json:"phrase"` +} + +// Creates the source for the phrase suggester. +func (q *PhraseSuggester) Source(includeName bool) (interface{}, error) { + ps := &phraseSuggesterRequest{} + + if q.text != "" { + ps.Text = q.text + } + + suggester := make(map[string]interface{}) + ps.Phrase = suggester + + if q.analyzer != "" { + suggester["analyzer"] = q.analyzer + } + if q.field != "" { + suggester["field"] = q.field + } + if q.size != nil { + suggester["size"] = *q.size + } + if q.shardSize != nil { + suggester["shard_size"] = *q.shardSize + } + switch len(q.contextQueries) { + case 0: + case 1: + src, err := q.contextQueries[0].Source() + if err != nil { + return nil, err + } + suggester["context"] = src + default: + ctxq := make([]interface{}, 0) + for _, query := range q.contextQueries { + src, err := query.Source() + if err != nil { + return nil, err + } + ctxq = append(ctxq, src) + } + suggester["context"] = ctxq + } + + // Phase-specified parameters + if q.realWordErrorLikelihood != nil { + suggester["real_word_error_likelihood"] = *q.realWordErrorLikelihood + } + if q.confidence != nil { + suggester["confidence"] = *q.confidence + } + if q.separator != nil { + suggester["separator"] = *q.separator + } + if q.maxErrors != nil { + suggester["max_errors"] = *q.maxErrors + } + if q.gramSize != nil { + suggester["gram_size"] = *q.gramSize + } + if q.forceUnigrams != nil { + suggester["force_unigrams"] = *q.forceUnigrams + } + if q.tokenLimit != nil { + suggester["token_limit"] = *q.tokenLimit + } + if q.generators != nil && len(q.generators) > 0 { + for typ, generators := range q.generators { + arr := make([]interface{}, 0) + for _, g := range generators { + src, err := g.Source() + if err != nil { + return nil, err + } + arr = append(arr, src) + } + suggester[typ] = arr + } + } + if q.smoothingModel != nil { + src, err := q.smoothingModel.Source() + if err != nil { + return nil, err + } + x := make(map[string]interface{}) + x[q.smoothingModel.Type()] = src + suggester["smoothing"] = x + } + if q.preTag != nil { + hl := make(map[string]string) + hl["pre_tag"] = *q.preTag + if q.postTag != nil { + hl["post_tag"] = *q.postTag + } + suggester["highlight"] = hl + } + if q.collateQuery != nil || q.collateFilter != nil { + collate := make(map[string]interface{}) + suggester["collate"] = collate + if q.collateQuery != nil { + collate["query"] = *q.collateQuery + } + if q.collateFilter != nil { + collate["filter"] = *q.collateFilter + } + if q.collatePreference != nil { + collate["preference"] = *q.collatePreference + } + if len(q.collateParams) > 0 { + collate["params"] = q.collateParams + } + if q.collatePrune != nil { + collate["prune"] = *q.collatePrune + } + } + + if !includeName { + return ps, nil + } + + source := make(map[string]interface{}) + source[q.name] = ps + return source, nil +} + +// -- Smoothing models -- + +type SmoothingModel interface { + Type() string + Source() (interface{}, error) +} + +// StupidBackoffSmoothingModel implements a stupid backoff smoothing model. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models +// for details about smoothing models. +type StupidBackoffSmoothingModel struct { + discount float64 +} + +func NewStupidBackoffSmoothingModel(discount float64) *StupidBackoffSmoothingModel { + return &StupidBackoffSmoothingModel{ + discount: discount, + } +} + +func (sm *StupidBackoffSmoothingModel) Type() string { + return "stupid_backoff" +} + +func (sm *StupidBackoffSmoothingModel) Source() (interface{}, error) { + source := make(map[string]interface{}) + source["discount"] = sm.discount + return source, nil +} + +// -- + +// LaplaceSmoothingModel implements a laplace smoothing model. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models +// for details about smoothing models. +type LaplaceSmoothingModel struct { + alpha float64 +} + +func NewLaplaceSmoothingModel(alpha float64) *LaplaceSmoothingModel { + return &LaplaceSmoothingModel{ + alpha: alpha, + } +} + +func (sm *LaplaceSmoothingModel) Type() string { + return "laplace" +} + +func (sm *LaplaceSmoothingModel) Source() (interface{}, error) { + source := make(map[string]interface{}) + source["alpha"] = sm.alpha + return source, nil +} + +// -- + +// LinearInterpolationSmoothingModel implements a linear interpolation +// smoothing model. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models +// for details about smoothing models. +type LinearInterpolationSmoothingModel struct { + trigramLamda float64 + bigramLambda float64 + unigramLambda float64 +} + +func NewLinearInterpolationSmoothingModel(trigramLamda, bigramLambda, unigramLambda float64) *LinearInterpolationSmoothingModel { + return &LinearInterpolationSmoothingModel{ + trigramLamda: trigramLamda, + bigramLambda: bigramLambda, + unigramLambda: unigramLambda, + } +} + +func (sm *LinearInterpolationSmoothingModel) Type() string { + return "linear_interpolation" +} + +func (sm *LinearInterpolationSmoothingModel) Source() (interface{}, error) { + source := make(map[string]interface{}) + source["trigram_lambda"] = sm.trigramLamda + source["bigram_lambda"] = sm.bigramLambda + source["unigram_lambda"] = sm.unigramLambda + return source, nil +} + +// -- CandidateGenerator -- + +type CandidateGenerator interface { + Type() string + Source() (interface{}, error) +} + +// DirectCandidateGenerator implements a direct candidate generator. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models +// for details about smoothing models. +type DirectCandidateGenerator struct { + field string + preFilter *string + postFilter *string + suggestMode *string + accuracy *float64 + size *int + sort *string + stringDistance *string + maxEdits *int + maxInspections *int + maxTermFreq *float64 + prefixLength *int + minWordLength *int + minDocFreq *float64 +} + +func NewDirectCandidateGenerator(field string) *DirectCandidateGenerator { + return &DirectCandidateGenerator{ + field: field, + } +} + +func (g *DirectCandidateGenerator) Type() string { + return "direct_generator" +} + +func (g *DirectCandidateGenerator) Field(field string) *DirectCandidateGenerator { + g.field = field + return g +} + +func (g *DirectCandidateGenerator) PreFilter(preFilter string) *DirectCandidateGenerator { + g.preFilter = &preFilter + return g +} + +func (g *DirectCandidateGenerator) PostFilter(postFilter string) *DirectCandidateGenerator { + g.postFilter = &postFilter + return g +} + +func (g *DirectCandidateGenerator) SuggestMode(suggestMode string) *DirectCandidateGenerator { + g.suggestMode = &suggestMode + return g +} + +func (g *DirectCandidateGenerator) Accuracy(accuracy float64) *DirectCandidateGenerator { + g.accuracy = &accuracy + return g +} + +func (g *DirectCandidateGenerator) Size(size int) *DirectCandidateGenerator { + g.size = &size + return g +} + +func (g *DirectCandidateGenerator) Sort(sort string) *DirectCandidateGenerator { + g.sort = &sort + return g +} + +func (g *DirectCandidateGenerator) StringDistance(stringDistance string) *DirectCandidateGenerator { + g.stringDistance = &stringDistance + return g +} + +func (g *DirectCandidateGenerator) MaxEdits(maxEdits int) *DirectCandidateGenerator { + g.maxEdits = &maxEdits + return g +} + +func (g *DirectCandidateGenerator) MaxInspections(maxInspections int) *DirectCandidateGenerator { + g.maxInspections = &maxInspections + return g +} + +func (g *DirectCandidateGenerator) MaxTermFreq(maxTermFreq float64) *DirectCandidateGenerator { + g.maxTermFreq = &maxTermFreq + return g +} + +func (g *DirectCandidateGenerator) PrefixLength(prefixLength int) *DirectCandidateGenerator { + g.prefixLength = &prefixLength + return g +} + +func (g *DirectCandidateGenerator) MinWordLength(minWordLength int) *DirectCandidateGenerator { + g.minWordLength = &minWordLength + return g +} + +func (g *DirectCandidateGenerator) MinDocFreq(minDocFreq float64) *DirectCandidateGenerator { + g.minDocFreq = &minDocFreq + return g +} + +func (g *DirectCandidateGenerator) Source() (interface{}, error) { + source := make(map[string]interface{}) + if g.field != "" { + source["field"] = g.field + } + if g.suggestMode != nil { + source["suggest_mode"] = *g.suggestMode + } + if g.accuracy != nil { + source["accuracy"] = *g.accuracy + } + if g.size != nil { + source["size"] = *g.size + } + if g.sort != nil { + source["sort"] = *g.sort + } + if g.stringDistance != nil { + source["string_distance"] = *g.stringDistance + } + if g.maxEdits != nil { + source["max_edits"] = *g.maxEdits + } + if g.maxInspections != nil { + source["max_inspections"] = *g.maxInspections + } + if g.maxTermFreq != nil { + source["max_term_freq"] = *g.maxTermFreq + } + if g.prefixLength != nil { + source["prefix_length"] = *g.prefixLength + } + if g.minWordLength != nil { + source["min_word_length"] = *g.minWordLength + } + if g.minDocFreq != nil { + source["min_doc_freq"] = *g.minDocFreq + } + if g.preFilter != nil { + source["pre_filter"] = *g.preFilter + } + if g.postFilter != nil { + source["post_filter"] = *g.postFilter + } + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_phrase_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_phrase_test.go new file mode 100644 index 000000000..1eb46ce44 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_phrase_test.go @@ -0,0 +1,169 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestPhraseSuggesterSource(t *testing.T) { + s := NewPhraseSuggester("name"). + Text("Xor the Got-Jewel"). + Analyzer("body"). + Field("bigram"). + Size(1). + RealWordErrorLikelihood(0.95). + MaxErrors(0.5). + GramSize(2). + Highlight("", "") + src, err := s.Source(true) + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"name":{"text":"Xor the Got-Jewel","phrase":{"analyzer":"body","field":"bigram","gram_size":2,"highlight":{"post_tag":"\u003c/em\u003e","pre_tag":"\u003cem\u003e"},"max_errors":0.5,"real_word_error_likelihood":0.95,"size":1}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPhraseSuggesterSourceWithContextQuery(t *testing.T) { + geomapQ := NewSuggesterGeoMapping("location"). + Precision("1km", "5m"). + Neighbors(true). + FieldName("pin"). + DefaultLocations(GeoPointFromLatLon(0.0, 0.0)) + + s := NewPhraseSuggester("name"). + Text("Xor the Got-Jewel"). + Analyzer("body"). + Field("bigram"). + Size(1). + RealWordErrorLikelihood(0.95). + MaxErrors(0.5). + GramSize(2). + Highlight("", ""). + ContextQuery(geomapQ) + src, err := s.Source(true) + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"name":{"text":"Xor the Got-Jewel","phrase":{"analyzer":"body","context":{"location":{"default":{"lat":0,"lon":0},"neighbors":true,"path":"pin","precision":["1km","5m"],"type":"geo"}},"field":"bigram","gram_size":2,"highlight":{"post_tag":"\u003c/em\u003e","pre_tag":"\u003cem\u003e"},"max_errors":0.5,"real_word_error_likelihood":0.95,"size":1}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPhraseSuggesterComplexSource(t *testing.T) { + g1 := NewDirectCandidateGenerator("body"). + SuggestMode("always"). + MinWordLength(1) + + g2 := NewDirectCandidateGenerator("reverse"). + SuggestMode("always"). + MinWordLength(1). + PreFilter("reverse"). + PostFilter("reverse") + + s := NewPhraseSuggester("simple_phrase"). + Text("Xor the Got-Jewel"). + Analyzer("body"). + Field("bigram"). + Size(4). + RealWordErrorLikelihood(0.95). + Confidence(2.0). + GramSize(2). + CandidateGenerators(g1, g2). + CollateQuery(`"match":{"{{field_name}}" : "{{suggestion}}"}`). + CollateParams(map[string]interface{}{"field_name": "title"}). + CollatePreference("_primary"). + CollatePrune(true) + src, err := s.Source(true) + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"simple_phrase":{"text":"Xor the Got-Jewel","phrase":{"analyzer":"body","collate":{"params":{"field_name":"title"},"preference":"_primary","prune":true,"query":"\"match\":{\"{{field_name}}\" : \"{{suggestion}}\"}"},"confidence":2,"direct_generator":[{"field":"body","min_word_length":1,"suggest_mode":"always"},{"field":"reverse","min_word_length":1,"post_filter":"reverse","pre_filter":"reverse","suggest_mode":"always"}],"field":"bigram","gram_size":2,"real_word_error_likelihood":0.95,"size":4}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} + +func TestPhraseStupidBackoffSmoothingModel(t *testing.T) { + s := NewStupidBackoffSmoothingModel(0.42) + src, err := s.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + // The source does NOT include the smoothing model type! + expected := `{"discount":0.42}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } + if s.Type() != "stupid_backoff" { + t.Errorf("expected %q, got: %q", "stupid_backoff", s.Type()) + } +} + +func TestPhraseLaplaceSmoothingModel(t *testing.T) { + s := NewLaplaceSmoothingModel(0.63) + src, err := s.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + // The source does NOT include the smoothing model type! + expected := `{"alpha":0.63}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } + if s.Type() != "laplace" { + t.Errorf("expected %q, got: %q", "laplace", s.Type()) + } +} + +func TestLinearInterpolationSmoothingModel(t *testing.T) { + s := NewLinearInterpolationSmoothingModel(0.3, 0.2, 0.05) + src, err := s.Source() + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + // The source does NOT include the smoothing model type! + expected := `{"bigram_lambda":0.2,"trigram_lambda":0.3,"unigram_lambda":0.05}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } + if s.Type() != "linear_interpolation" { + t.Errorf("expected %q, got: %q", "linear_interpolation", s.Type()) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_term.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_term.go new file mode 100644 index 000000000..116af405a --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_term.go @@ -0,0 +1,233 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// For more details, see +// http://www.elasticsearch.org/guide/reference/api/search/term-suggest/ +type TermSuggester struct { + Suggester + name string + text string + field string + analyzer string + size *int + shardSize *int + contextQueries []SuggesterContextQuery + + // fields specific to term suggester + suggestMode string + accuracy *float64 + sort string + stringDistance string + maxEdits *int + maxInspections *int + maxTermFreq *float64 + prefixLength *int + minWordLength *int + minDocFreq *float64 +} + +// Creates a new term suggester. +func NewTermSuggester(name string) *TermSuggester { + return &TermSuggester{ + name: name, + contextQueries: make([]SuggesterContextQuery, 0), + } +} + +func (q *TermSuggester) Name() string { + return q.name +} + +func (q *TermSuggester) Text(text string) *TermSuggester { + q.text = text + return q +} + +func (q *TermSuggester) Field(field string) *TermSuggester { + q.field = field + return q +} + +func (q *TermSuggester) Analyzer(analyzer string) *TermSuggester { + q.analyzer = analyzer + return q +} + +func (q *TermSuggester) Size(size int) *TermSuggester { + q.size = &size + return q +} + +func (q *TermSuggester) ShardSize(shardSize int) *TermSuggester { + q.shardSize = &shardSize + return q +} + +func (q *TermSuggester) ContextQuery(query SuggesterContextQuery) *TermSuggester { + q.contextQueries = append(q.contextQueries, query) + return q +} + +func (q *TermSuggester) ContextQueries(queries ...SuggesterContextQuery) *TermSuggester { + q.contextQueries = append(q.contextQueries, queries...) + return q +} + +func (q *TermSuggester) SuggestMode(suggestMode string) *TermSuggester { + q.suggestMode = suggestMode + return q +} + +func (q *TermSuggester) Accuracy(accuracy float64) *TermSuggester { + q.accuracy = &accuracy + return q +} + +func (q *TermSuggester) Sort(sort string) *TermSuggester { + q.sort = sort + return q +} + +func (q *TermSuggester) StringDistance(stringDistance string) *TermSuggester { + q.stringDistance = stringDistance + return q +} + +func (q *TermSuggester) MaxEdits(maxEdits int) *TermSuggester { + q.maxEdits = &maxEdits + return q +} + +func (q *TermSuggester) MaxInspections(maxInspections int) *TermSuggester { + q.maxInspections = &maxInspections + return q +} + +func (q *TermSuggester) MaxTermFreq(maxTermFreq float64) *TermSuggester { + q.maxTermFreq = &maxTermFreq + return q +} + +func (q *TermSuggester) PrefixLength(prefixLength int) *TermSuggester { + q.prefixLength = &prefixLength + return q +} + +func (q *TermSuggester) MinWordLength(minWordLength int) *TermSuggester { + q.minWordLength = &minWordLength + return q +} + +func (q *TermSuggester) MinDocFreq(minDocFreq float64) *TermSuggester { + q.minDocFreq = &minDocFreq + return q +} + +// termSuggesterRequest is necessary because the order in which +// the JSON elements are routed to Elasticsearch is relevant. +// We got into trouble when using plain maps because the text element +// needs to go before the term element. +type termSuggesterRequest struct { + Text string `json:"text"` + Term interface{} `json:"term"` +} + +// Creates the source for the term suggester. +func (q *TermSuggester) Source(includeName bool) (interface{}, error) { + // "suggest" : { + // "my-suggest-1" : { + // "text" : "the amsterdma meetpu", + // "term" : { + // "field" : "body" + // } + // }, + // "my-suggest-2" : { + // "text" : "the rottredam meetpu", + // "term" : { + // "field" : "title", + // } + // } + // } + ts := &termSuggesterRequest{} + if q.text != "" { + ts.Text = q.text + } + + suggester := make(map[string]interface{}) + ts.Term = suggester + + if q.analyzer != "" { + suggester["analyzer"] = q.analyzer + } + if q.field != "" { + suggester["field"] = q.field + } + if q.size != nil { + suggester["size"] = *q.size + } + if q.shardSize != nil { + suggester["shard_size"] = *q.shardSize + } + switch len(q.contextQueries) { + case 0: + case 1: + src, err := q.contextQueries[0].Source() + if err != nil { + return nil, err + } + suggester["context"] = src + default: + ctxq := make([]interface{}, 0) + for _, query := range q.contextQueries { + src, err := query.Source() + if err != nil { + return nil, err + } + ctxq = append(ctxq, src) + } + suggester["context"] = ctxq + } + + // Specific to term suggester + if q.suggestMode != "" { + suggester["suggest_mode"] = q.suggestMode + } + if q.accuracy != nil { + suggester["accuracy"] = *q.accuracy + } + if q.sort != "" { + suggester["sort"] = q.sort + } + if q.stringDistance != "" { + suggester["string_distance"] = q.stringDistance + } + if q.maxEdits != nil { + suggester["max_edits"] = *q.maxEdits + } + if q.maxInspections != nil { + suggester["max_inspections"] = *q.maxInspections + } + if q.maxTermFreq != nil { + suggester["max_term_freq"] = *q.maxTermFreq + } + if q.prefixLength != nil { + suggester["prefix_len"] = *q.prefixLength + } + if q.minWordLength != nil { + suggester["min_word_len"] = *q.minWordLength + } + if q.minDocFreq != nil { + suggester["min_doc_freq"] = *q.minDocFreq + } + + if !includeName { + return ts, nil + } + + source := make(map[string]interface{}) + source[q.name] = ts + return source, nil +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_term_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_term_test.go new file mode 100644 index 000000000..869049890 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/suggester_term_test.go @@ -0,0 +1,29 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "testing" +) + +func TestTermSuggesterSource(t *testing.T) { + s := NewTermSuggester("name"). + Text("n"). + Field("suggest") + src, err := s.Source(true) + if err != nil { + t.Fatal(err) + } + data, err := json.Marshal(src) + if err != nil { + t.Fatalf("marshaling to JSON failed: %v", err) + } + got := string(data) + expected := `{"name":{"text":"n","term":{"field":"suggest"}}}` + if got != expected { + t.Errorf("expected\n%s\n,got:\n%s", expected, got) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/termvectors.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/termvectors.go new file mode 100644 index 000000000..355108200 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/termvectors.go @@ -0,0 +1,458 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// TermvectorsService returns information and statistics on terms in the +// fields of a particular document. The document could be stored in the +// index or artificially provided by the user. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/2.1/docs-termvectors.html +// for documentation. +type TermvectorsService struct { + client *Client + pretty bool + id string + index string + typ string + dfs *bool + doc interface{} + fieldStatistics *bool + fields []string + filter *TermvectorsFilterSettings + perFieldAnalyzer map[string]string + offsets *bool + parent string + payloads *bool + positions *bool + preference string + realtime *bool + routing string + termStatistics *bool + version interface{} + versionType string + bodyJson interface{} + bodyString string +} + +// NewTermvectorsService creates a new TermvectorsService. +func NewTermvectorsService(client *Client) *TermvectorsService { + return &TermvectorsService{ + client: client, + } +} + +// Index in which the document resides. +func (s *TermvectorsService) Index(index string) *TermvectorsService { + s.index = index + return s +} + +// Type of the document. +func (s *TermvectorsService) Type(typ string) *TermvectorsService { + s.typ = typ + return s +} + +// Id of the document. +func (s *TermvectorsService) Id(id string) *TermvectorsService { + s.id = id + return s +} + +// Dfs specifies if distributed frequencies should be returned instead +// shard frequencies. +func (s *TermvectorsService) Dfs(dfs bool) *TermvectorsService { + s.dfs = &dfs + return s +} + +// Doc is the document to analyze. +func (s *TermvectorsService) Doc(doc interface{}) *TermvectorsService { + s.doc = doc + return s +} + +// FieldStatistics specifies if document count, sum of document frequencies +// and sum of total term frequencies should be returned. +func (s *TermvectorsService) FieldStatistics(fieldStatistics bool) *TermvectorsService { + s.fieldStatistics = &fieldStatistics + return s +} + +// Fields a list of fields to return. +func (s *TermvectorsService) Fields(fields ...string) *TermvectorsService { + if s.fields == nil { + s.fields = make([]string, 0) + } + s.fields = append(s.fields, fields...) + return s +} + +// Filter adds terms filter settings. +func (s *TermvectorsService) Filter(filter *TermvectorsFilterSettings) *TermvectorsService { + s.filter = filter + return s +} + +// PerFieldAnalyzer allows to specify a different analyzer than the one +// at the field. +func (s *TermvectorsService) PerFieldAnalyzer(perFieldAnalyzer map[string]string) *TermvectorsService { + s.perFieldAnalyzer = perFieldAnalyzer + return s +} + +// Offsets specifies if term offsets should be returned. +func (s *TermvectorsService) Offsets(offsets bool) *TermvectorsService { + s.offsets = &offsets + return s +} + +// Parent id of documents. +func (s *TermvectorsService) Parent(parent string) *TermvectorsService { + s.parent = parent + return s +} + +// Payloads specifies if term payloads should be returned. +func (s *TermvectorsService) Payloads(payloads bool) *TermvectorsService { + s.payloads = &payloads + return s +} + +// Positions specifies if term positions should be returned. +func (s *TermvectorsService) Positions(positions bool) *TermvectorsService { + s.positions = &positions + return s +} + +// Preference specify the node or shard the operation +// should be performed on (default: random). +func (s *TermvectorsService) Preference(preference string) *TermvectorsService { + s.preference = preference + return s +} + +// Realtime specifies if request is real-time as opposed to +// near-real-time (default: true). +func (s *TermvectorsService) Realtime(realtime bool) *TermvectorsService { + s.realtime = &realtime + return s +} + +// Routing is a specific routing value. +func (s *TermvectorsService) Routing(routing string) *TermvectorsService { + s.routing = routing + return s +} + +// TermStatistics specifies if total term frequency and document frequency +// should be returned. +func (s *TermvectorsService) TermStatistics(termStatistics bool) *TermvectorsService { + s.termStatistics = &termStatistics + return s +} + +// Version an explicit version number for concurrency control. +func (s *TermvectorsService) Version(version interface{}) *TermvectorsService { + s.version = version + return s +} + +// VersionType specifies a version type ("internal", "external", "external_gte", or "force"). +func (s *TermvectorsService) VersionType(versionType string) *TermvectorsService { + s.versionType = versionType + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *TermvectorsService) Pretty(pretty bool) *TermvectorsService { + s.pretty = pretty + return s +} + +// BodyJson defines the body parameters. See documentation. +func (s *TermvectorsService) BodyJson(body interface{}) *TermvectorsService { + s.bodyJson = body + return s +} + +// BodyString defines the body parameters as a string. See documentation. +func (s *TermvectorsService) BodyString(body string) *TermvectorsService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *TermvectorsService) buildURL() (string, url.Values, error) { + var pathParam = map[string]string{ + "index": s.index, + "type": s.typ, + } + var path string + var err error + + // Build URL + if s.id != "" { + pathParam["id"] = s.id + path, err = uritemplates.Expand("/{index}/{type}/{id}/_termvectors", pathParam) + } else { + path, err = uritemplates.Expand("/{index}/{type}/_termvectors", pathParam) + } + + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.dfs != nil { + params.Set("dfs", fmt.Sprintf("%v", *s.dfs)) + } + if s.fieldStatistics != nil { + params.Set("field_statistics", fmt.Sprintf("%v", *s.fieldStatistics)) + } + if len(s.fields) > 0 { + params.Set("fields", strings.Join(s.fields, ",")) + } + if s.offsets != nil { + params.Set("offsets", fmt.Sprintf("%v", *s.offsets)) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.payloads != nil { + params.Set("payloads", fmt.Sprintf("%v", *s.payloads)) + } + if s.positions != nil { + params.Set("positions", fmt.Sprintf("%v", *s.positions)) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.realtime != nil { + params.Set("realtime", fmt.Sprintf("%v", *s.realtime)) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.termStatistics != nil { + params.Set("term_statistics", fmt.Sprintf("%v", *s.termStatistics)) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *TermvectorsService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *TermvectorsService) Do() (*TermvectorsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else if s.bodyString != "" { + body = s.bodyString + } else { + data := make(map[string]interface{}) + if s.doc != nil { + data["doc"] = s.doc + } + if len(s.perFieldAnalyzer) > 0 { + data["per_field_analyzer"] = s.perFieldAnalyzer + } + if s.filter != nil { + src, err := s.filter.Source() + if err != nil { + return nil, err + } + data["filter"] = src + } + if len(data) > 0 { + body = data + } + } + + // Get HTTP response + res, err := s.client.PerformRequest("GET", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(TermvectorsResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Filter settings -- + +// TermvectorsFilterSettings adds additional filters to a Termsvector request. +// It allows to filter terms based on their tf-idf scores. +// See https://www.elastic.co/guide/en/elasticsearch/reference/2.1/docs-termvectors.html#_terms_filtering +// for more information. +type TermvectorsFilterSettings struct { + maxNumTerms *int64 + minTermFreq *int64 + maxTermFreq *int64 + minDocFreq *int64 + maxDocFreq *int64 + minWordLength *int64 + maxWordLength *int64 +} + +// NewTermvectorsFilterSettings creates and initializes a new TermvectorsFilterSettings struct. +func NewTermvectorsFilterSettings() *TermvectorsFilterSettings { + return &TermvectorsFilterSettings{} +} + +// MaxNumTerms specifies the maximum number of terms the must be returned per field. +func (fs *TermvectorsFilterSettings) MaxNumTerms(value int64) *TermvectorsFilterSettings { + fs.maxNumTerms = &value + return fs +} + +// MinTermFreq ignores words with less than this frequency in the source doc. +func (fs *TermvectorsFilterSettings) MinTermFreq(value int64) *TermvectorsFilterSettings { + fs.minTermFreq = &value + return fs +} + +// MaxTermFreq ignores words with more than this frequency in the source doc. +func (fs *TermvectorsFilterSettings) MaxTermFreq(value int64) *TermvectorsFilterSettings { + fs.maxTermFreq = &value + return fs +} + +// MinDocFreq ignores terms which do not occur in at least this many docs. +func (fs *TermvectorsFilterSettings) MinDocFreq(value int64) *TermvectorsFilterSettings { + fs.minDocFreq = &value + return fs +} + +// MaxDocFreq ignores terms which occur in more than this many docs. +func (fs *TermvectorsFilterSettings) MaxDocFreq(value int64) *TermvectorsFilterSettings { + fs.maxDocFreq = &value + return fs +} + +// MinWordLength specifies the minimum word length below which words will be ignored. +func (fs *TermvectorsFilterSettings) MinWordLength(value int64) *TermvectorsFilterSettings { + fs.minWordLength = &value + return fs +} + +// MaxWordLength specifies the maximum word length above which words will be ignored. +func (fs *TermvectorsFilterSettings) MaxWordLength(value int64) *TermvectorsFilterSettings { + fs.maxWordLength = &value + return fs +} + +// Source returns JSON for the query. +func (fs *TermvectorsFilterSettings) Source() (interface{}, error) { + source := make(map[string]interface{}) + if fs.maxNumTerms != nil { + source["max_num_terms"] = *fs.maxNumTerms + } + if fs.minTermFreq != nil { + source["min_term_freq"] = *fs.minTermFreq + } + if fs.maxTermFreq != nil { + source["max_term_freq"] = *fs.maxTermFreq + } + if fs.minDocFreq != nil { + source["min_doc_freq"] = *fs.minDocFreq + } + if fs.maxDocFreq != nil { + source["max_doc_freq"] = *fs.maxDocFreq + } + if fs.minWordLength != nil { + source["min_word_length"] = *fs.minWordLength + } + if fs.maxWordLength != nil { + source["max_word_length"] = *fs.maxWordLength + } + return source, nil +} + +// -- Response types -- + +type TokenInfo struct { + StartOffset int64 `json:"start_offset"` + EndOffset int64 `json:"end_offset"` + Position int64 `json:"position"` + Payload string `json:"payload"` +} + +type TermsInfo struct { + DocFreq int64 `json:"doc_freq"` + TermFreq int64 `json:"term_freq"` + Ttf int64 `json:"ttf"` + Tokens []TokenInfo `json:"tokens"` +} + +type FieldStatistics struct { + DocCount int64 `json:"doc_count"` + SumDocFreq int64 `json:"sum_doc_freq"` + SumTtf int64 `json:"sum_ttf"` +} + +type TermVectorsFieldInfo struct { + FieldStatistics FieldStatistics `json:"field_statistics"` + Terms map[string]TermsInfo `json:"terms"` +} + +// TermvectorsResponse is the response of TermvectorsService.Do. +type TermvectorsResponse struct { + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id,omitempty"` + Version int `json:"_version"` + Found bool `json:"found"` + Took int64 `json:"took"` + TermVectors map[string]TermVectorsFieldInfo `json:"term_vectors"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/termvectors_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/termvectors_test.go new file mode 100644 index 000000000..e487a24a4 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/termvectors_test.go @@ -0,0 +1,165 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "testing" + "time" +) + +func TestTermVectorsBuildURL(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tests := []struct { + Index string + Type string + Id string + Expected string + }{ + { + "twitter", + "tweet", + "", + "/twitter/tweet/_termvectors", + }, + { + "twitter", + "tweet", + "1", + "/twitter/tweet/1/_termvectors", + }, + } + + for _, test := range tests { + builder := client.TermVectors(test.Index, test.Type) + if test.Id != "" { + builder = builder.Id(test.Id) + } + path, _, err := builder.buildURL() + if err != nil { + t.Fatal(err) + } + if path != test.Expected { + t.Errorf("expected %q; got: %q", test.Expected, path) + } + } +} + +func TestTermVectorsWithId(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} + + // Add a document + indexResult, err := client.Index(). + Index(testIndexName). + Type("tweet"). + Id("1"). + BodyJson(&tweet1). + Refresh(true). + Do() + if err != nil { + t.Fatal(err) + } + if indexResult == nil { + t.Errorf("expected result to be != nil; got: %v", indexResult) + } + + // TermVectors by specifying ID + field := "Message" + result, err := client.TermVectors(testIndexName, "tweet"). + Id("1"). + Fields(field). + FieldStatistics(true). + TermStatistics(true). + Do() + if err != nil { + t.Fatal(err) + } + if result == nil { + t.Fatal("expected to return information and statistics") + } + if !result.Found { + t.Errorf("expected found to be %v; got: %v", true, result.Found) + } + if result.Took <= 0 { + t.Errorf("expected took in millis > 0; got: %v", result.Took) + } +} + +func TestTermVectorsWithDoc(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Travis lags sometimes + if isTravis() { + time.Sleep(2 * time.Second) + } + + // TermVectors by specifying Doc + var doc = map[string]interface{}{ + "fullname": "John Doe", + "text": "twitter test test test", + } + var perFieldAnalyzer = map[string]string{ + "fullname": "keyword", + } + + result, err := client.TermVectors(testIndexName, "tweet"). + Doc(doc). + PerFieldAnalyzer(perFieldAnalyzer). + FieldStatistics(true). + TermStatistics(true). + Do() + if err != nil { + t.Fatal(err) + } + if result == nil { + t.Fatal("expected to return information and statistics") + } + if !result.Found { + t.Errorf("expected found to be %v; got: %v", true, result.Found) + } + if result.Took <= 0 { + t.Errorf("expected took in millis > 0; got: %v", result.Took) + } +} + +func TestTermVectorsWithFilter(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + // Travis lags sometimes + if isTravis() { + time.Sleep(2 * time.Second) + } + + // TermVectors by specifying Doc + var doc = map[string]interface{}{ + "fullname": "John Doe", + "text": "twitter test test test", + } + var perFieldAnalyzer = map[string]string{ + "fullname": "keyword", + } + + result, err := client.TermVectors(testIndexName, "tweet"). + Doc(doc). + PerFieldAnalyzer(perFieldAnalyzer). + FieldStatistics(true). + TermStatistics(true). + Filter(NewTermvectorsFilterSettings().MinTermFreq(1)). + Do() + if err != nil { + t.Fatal(err) + } + if result == nil { + t.Fatal("expected to return information and statistics") + } + if !result.Found { + t.Errorf("expected found to be %v; got: %v", true, result.Found) + } + if result.Took <= 0 { + t.Errorf("expected took in millis > 0; got: %v", result.Took) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/update.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/update.go new file mode 100644 index 000000000..a20149b1c --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/update.go @@ -0,0 +1,300 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "gopkg.in/olivere/elastic.v3/uritemplates" +) + +// UpdateService updates a document in Elasticsearch. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-update.html +// for details. +type UpdateService struct { + client *Client + index string + typ string + id string + routing string + parent string + script *Script + fields []string + version *int64 + versionType string + retryOnConflict *int + refresh *bool + replicationType string + consistencyLevel string + upsert interface{} + scriptedUpsert *bool + docAsUpsert *bool + detectNoop *bool + doc interface{} + timeout string + pretty bool +} + +// NewUpdateService creates the service to update documents in Elasticsearch. +func NewUpdateService(client *Client) *UpdateService { + builder := &UpdateService{ + client: client, + fields: make([]string, 0), + } + return builder +} + +// Index is the name of the Elasticsearch index (required). +func (b *UpdateService) Index(name string) *UpdateService { + b.index = name + return b +} + +// Type is the type of the document (required). +func (b *UpdateService) Type(typ string) *UpdateService { + b.typ = typ + return b +} + +// Id is the identifier of the document to update (required). +func (b *UpdateService) Id(id string) *UpdateService { + b.id = id + return b +} + +// Routing specifies a specific routing value. +func (b *UpdateService) Routing(routing string) *UpdateService { + b.routing = routing + return b +} + +// Parent sets the id of the parent document. +func (b *UpdateService) Parent(parent string) *UpdateService { + b.parent = parent + return b +} + +// Script is the script definition. +func (b *UpdateService) Script(script *Script) *UpdateService { + b.script = script + return b +} + +// RetryOnConflict specifies how many times the operation should be retried +// when a conflict occurs (default: 0). +func (b *UpdateService) RetryOnConflict(retryOnConflict int) *UpdateService { + b.retryOnConflict = &retryOnConflict + return b +} + +// Fields is a list of fields to return in the response. +func (b *UpdateService) Fields(fields ...string) *UpdateService { + b.fields = make([]string, 0, len(fields)) + b.fields = append(b.fields, fields...) + return b +} + +// Version defines the explicit version number for concurrency control. +func (b *UpdateService) Version(version int64) *UpdateService { + b.version = &version + return b +} + +// VersionType is one of "internal" or "force". +func (b *UpdateService) VersionType(versionType string) *UpdateService { + b.versionType = versionType + return b +} + +// Refresh the index after performing the update. +func (b *UpdateService) Refresh(refresh bool) *UpdateService { + b.refresh = &refresh + return b +} + +// ReplicationType is one of "sync" or "async". +func (b *UpdateService) ReplicationType(replicationType string) *UpdateService { + b.replicationType = replicationType + return b +} + +// ConsistencyLevel is one of "one", "quorum", or "all". +// It sets the write consistency setting for the update operation. +func (b *UpdateService) ConsistencyLevel(consistencyLevel string) *UpdateService { + b.consistencyLevel = consistencyLevel + return b +} + +// Doc allows for updating a partial document. +func (b *UpdateService) Doc(doc interface{}) *UpdateService { + b.doc = doc + return b +} + +// Upsert can be used to index the document when it doesn't exist yet. +// Use this e.g. to initialize a document with a default value. +func (b *UpdateService) Upsert(doc interface{}) *UpdateService { + b.upsert = doc + return b +} + +// DocAsUpsert can be used to insert the document if it doesn't already exist. +func (b *UpdateService) DocAsUpsert(docAsUpsert bool) *UpdateService { + b.docAsUpsert = &docAsUpsert + return b +} + +// DetectNoop will instruct Elasticsearch to check if changes will occur +// when updating via Doc. It there aren't any changes, the request will +// turn into a no-op. +func (b *UpdateService) DetectNoop(detectNoop bool) *UpdateService { + b.detectNoop = &detectNoop + return b +} + +// ScriptedUpsert should be set to true if the referenced script +// (defined in Script or ScriptId) should be called to perform an insert. +// The default is false. +func (b *UpdateService) ScriptedUpsert(scriptedUpsert bool) *UpdateService { + b.scriptedUpsert = &scriptedUpsert + return b +} + +// Timeout is an explicit timeout for the operation, e.g. "1000", "1s" or "500ms". +func (b *UpdateService) Timeout(timeout string) *UpdateService { + b.timeout = timeout + return b +} + +// Pretty instructs to return human readable, prettified JSON. +func (b *UpdateService) Pretty(pretty bool) *UpdateService { + b.pretty = pretty + return b +} + +// url returns the URL part of the document request. +func (b *UpdateService) url() (string, url.Values, error) { + // Build url + path := "/{index}/{type}/{id}/_update" + path, err := uritemplates.Expand(path, map[string]string{ + "index": b.index, + "type": b.typ, + "id": b.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Parameters + params := make(url.Values) + if b.pretty { + params.Set("pretty", "true") + } + if b.routing != "" { + params.Set("routing", b.routing) + } + if b.parent != "" { + params.Set("parent", b.parent) + } + if b.timeout != "" { + params.Set("timeout", b.timeout) + } + if b.refresh != nil { + params.Set("refresh", fmt.Sprintf("%v", *b.refresh)) + } + if b.replicationType != "" { + params.Set("replication", b.replicationType) + } + if b.consistencyLevel != "" { + params.Set("consistency", b.consistencyLevel) + } + if len(b.fields) > 0 { + params.Set("fields", strings.Join(b.fields, ",")) + } + if b.version != nil { + params.Set("version", fmt.Sprintf("%d", *b.version)) + } + if b.versionType != "" { + params.Set("version_type", b.versionType) + } + if b.retryOnConflict != nil { + params.Set("retry_on_conflict", fmt.Sprintf("%v", *b.retryOnConflict)) + } + + return path, params, nil +} + +// body returns the body part of the document request. +func (b *UpdateService) body() (interface{}, error) { + source := make(map[string]interface{}) + + if b.script != nil { + src, err := b.script.Source() + if err != nil { + return nil, err + } + source["script"] = src + } + + if b.scriptedUpsert != nil { + source["scripted_upsert"] = *b.scriptedUpsert + } + + if b.upsert != nil { + source["upsert"] = b.upsert + } + + if b.doc != nil { + source["doc"] = b.doc + } + if b.docAsUpsert != nil { + source["doc_as_upsert"] = *b.docAsUpsert + } + if b.detectNoop != nil { + source["detect_noop"] = *b.detectNoop + } + + return source, nil +} + +// Do executes the update operation. +func (b *UpdateService) Do() (*UpdateResponse, error) { + path, params, err := b.url() + if err != nil { + return nil, err + } + + // Get body of the request + body, err := b.body() + if err != nil { + return nil, err + } + + // Get response + res, err := b.client.PerformRequest("POST", path, params, body) + if err != nil { + return nil, err + } + + // Return result + ret := new(UpdateResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// UpdateResponse is the result of updating a document in Elasticsearch. +type UpdateResponse struct { + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + Version int `json:"_version"` + Created bool `json:"created"` + GetResult *GetResult `json:"get"` +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/update_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/update_test.go new file mode 100644 index 000000000..57b26dc0e --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/update_test.go @@ -0,0 +1,312 @@ +// Copyright 2012-2015 Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "net/url" + "testing" +) + +func TestUpdateViaScript(t *testing.T) { + client := setupTestClient(t) + update := client.Update(). + Index("test").Type("type1").Id("1"). + Script(NewScript("ctx._source.tags += tag").Params(map[string]interface{}{"tag": "blue"}).Lang("groovy")) + path, params, err := update.url() + if err != nil { + t.Fatalf("expected to return URL, got: %v", err) + } + expectedPath := `/test/type1/1/_update` + if expectedPath != path { + t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path) + } + expectedParams := url.Values{} + if expectedParams.Encode() != params.Encode() { + t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode()) + } + body, err := update.body() + if err != nil { + t.Fatalf("expected to return body, got: %v", err) + } + data, err := json.Marshal(body) + if err != nil { + t.Fatalf("expected to marshal body as JSON, got: %v", err) + } + got := string(data) + expected := `{"script":{"inline":"ctx._source.tags += tag","lang":"groovy","params":{"tag":"blue"}}}` + if got != expected { + t.Errorf("expected\n%s\ngot:\n%s", expected, got) + } +} + +func TestUpdateViaScriptId(t *testing.T) { + client := setupTestClient(t) + + scriptParams := map[string]interface{}{ + "pageViewEvent": map[string]interface{}{ + "url": "foo.com/bar", + "response": 404, + "time": "2014-01-01 12:32", + }, + } + script := NewScriptId("my_web_session_summariser").Params(scriptParams) + + update := client.Update(). + Index("sessions").Type("session").Id("dh3sgudg8gsrgl"). + Script(script). + ScriptedUpsert(true). + Upsert(map[string]interface{}{}) + path, params, err := update.url() + if err != nil { + t.Fatalf("expected to return URL, got: %v", err) + } + expectedPath := `/sessions/session/dh3sgudg8gsrgl/_update` + if expectedPath != path { + t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path) + } + expectedParams := url.Values{} + if expectedParams.Encode() != params.Encode() { + t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode()) + } + body, err := update.body() + if err != nil { + t.Fatalf("expected to return body, got: %v", err) + } + data, err := json.Marshal(body) + if err != nil { + t.Fatalf("expected to marshal body as JSON, got: %v", err) + } + got := string(data) + expected := `{"script":{"id":"my_web_session_summariser","params":{"pageViewEvent":{"response":404,"time":"2014-01-01 12:32","url":"foo.com/bar"}}},"scripted_upsert":true,"upsert":{}}` + if got != expected { + t.Errorf("expected\n%s\ngot:\n%s", expected, got) + } +} + +func TestUpdateViaScriptFile(t *testing.T) { + client := setupTestClient(t) + + scriptParams := map[string]interface{}{ + "pageViewEvent": map[string]interface{}{ + "url": "foo.com/bar", + "response": 404, + "time": "2014-01-01 12:32", + }, + } + script := NewScriptFile("update_script").Params(scriptParams) + + update := client.Update(). + Index("sessions").Type("session").Id("dh3sgudg8gsrgl"). + Script(script). + ScriptedUpsert(true). + Upsert(map[string]interface{}{}) + + path, params, err := update.url() + if err != nil { + t.Fatalf("expected to return URL, got: %v", err) + } + expectedPath := `/sessions/session/dh3sgudg8gsrgl/_update` + if expectedPath != path { + t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path) + } + expectedParams := url.Values{} + if expectedParams.Encode() != params.Encode() { + t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode()) + } + body, err := update.body() + if err != nil { + t.Fatalf("expected to return body, got: %v", err) + } + data, err := json.Marshal(body) + if err != nil { + t.Fatalf("expected to marshal body as JSON, got: %v", err) + } + got := string(data) + expected := `{"script":{"file":"update_script","params":{"pageViewEvent":{"response":404,"time":"2014-01-01 12:32","url":"foo.com/bar"}}},"scripted_upsert":true,"upsert":{}}` + if got != expected { + t.Errorf("expected\n%s\ngot:\n%s", expected, got) + } +} + +func TestUpdateViaScriptAndUpsert(t *testing.T) { + client := setupTestClient(t) + update := client.Update(). + Index("test").Type("type1").Id("1"). + Script(NewScript("ctx._source.counter += count").Params(map[string]interface{}{"count": 4})). + Upsert(map[string]interface{}{"counter": 1}) + path, params, err := update.url() + if err != nil { + t.Fatalf("expected to return URL, got: %v", err) + } + expectedPath := `/test/type1/1/_update` + if expectedPath != path { + t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path) + } + expectedParams := url.Values{} + if expectedParams.Encode() != params.Encode() { + t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode()) + } + body, err := update.body() + if err != nil { + t.Fatalf("expected to return body, got: %v", err) + } + data, err := json.Marshal(body) + if err != nil { + t.Fatalf("expected to marshal body as JSON, got: %v", err) + } + got := string(data) + expected := `{"script":{"inline":"ctx._source.counter += count","params":{"count":4}},"upsert":{"counter":1}}` + if got != expected { + t.Errorf("expected\n%s\ngot:\n%s", expected, got) + } +} + +func TestUpdateViaDoc(t *testing.T) { + client := setupTestClient(t) + update := client.Update(). + Index("test").Type("type1").Id("1"). + Doc(map[string]interface{}{"name": "new_name"}). + DetectNoop(true) + path, params, err := update.url() + if err != nil { + t.Fatalf("expected to return URL, got: %v", err) + } + expectedPath := `/test/type1/1/_update` + if expectedPath != path { + t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path) + } + expectedParams := url.Values{} + if expectedParams.Encode() != params.Encode() { + t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode()) + } + body, err := update.body() + if err != nil { + t.Fatalf("expected to return body, got: %v", err) + } + data, err := json.Marshal(body) + if err != nil { + t.Fatalf("expected to marshal body as JSON, got: %v", err) + } + got := string(data) + expected := `{"detect_noop":true,"doc":{"name":"new_name"}}` + if got != expected { + t.Errorf("expected\n%s\ngot:\n%s", expected, got) + } +} + +func TestUpdateViaDocAndUpsert(t *testing.T) { + client := setupTestClient(t) + update := client.Update(). + Index("test").Type("type1").Id("1"). + Doc(map[string]interface{}{"name": "new_name"}). + DocAsUpsert(true). + Timeout("1s"). + Refresh(true) + path, params, err := update.url() + if err != nil { + t.Fatalf("expected to return URL, got: %v", err) + } + expectedPath := `/test/type1/1/_update` + if expectedPath != path { + t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path) + } + expectedParams := url.Values{"refresh": []string{"true"}, "timeout": []string{"1s"}} + if expectedParams.Encode() != params.Encode() { + t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode()) + } + body, err := update.body() + if err != nil { + t.Fatalf("expected to return body, got: %v", err) + } + data, err := json.Marshal(body) + if err != nil { + t.Fatalf("expected to marshal body as JSON, got: %v", err) + } + got := string(data) + expected := `{"doc":{"name":"new_name"},"doc_as_upsert":true}` + if got != expected { + t.Errorf("expected\n%s\ngot:\n%s", expected, got) + } +} + +func TestUpdateViaScriptIntegration(t *testing.T) { + client := setupTestClientAndCreateIndex(t) + + esversion, err := client.ElasticsearchVersion(DefaultURL) + if err != nil { + t.Fatal(err) + } + if esversion >= "1.4.3" || (esversion < "1.4.0" && esversion >= "1.3.8") { + t.Skip("groovy scripting has been disabled as for [1.3.8,1.4.0) and 1.4.3+") + return + } + + tweet1 := tweet{User: "olivere", Retweets: 10, Message: "Welcome to Golang and Elasticsearch."} + + // Add a document + indexResult, err := client.Index(). + Index(testIndexName). + Type("tweet"). + Id("1"). + BodyJson(&tweet1). + Do() + if err != nil { + t.Fatal(err) + } + if indexResult == nil { + t.Errorf("expected result to be != nil; got: %v", indexResult) + } + + // Update number of retweets + increment := 1 + script := NewScript("ctx._source.retweets += num"). + Params(map[string]interface{}{"num": increment}). + Lang("groovy") // Use "groovy" as default language as 1.3 uses MVEL by default + update, err := client.Update().Index(testIndexName).Type("tweet").Id("1"). + Script(script). + Do() + if err != nil { + t.Fatal(err) + } + if update == nil { + t.Errorf("expected update to be != nil; got %v", update) + } + if update.Version != indexResult.Version+1 { + t.Errorf("expected version to be %d; got %d", indexResult.Version+1, update.Version) + } + + // Get document + getResult, err := client.Get(). + Index(testIndexName). + Type("tweet"). + Id("1"). + Do() + if err != nil { + t.Fatal(err) + } + if getResult.Index != testIndexName { + t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index) + } + if getResult.Type != "tweet" { + t.Errorf("expected GetResult.Type %q; got %q", "tweet", getResult.Type) + } + if getResult.Id != "1" { + t.Errorf("expected GetResult.Id %q; got %q", "1", getResult.Id) + } + if getResult.Source == nil { + t.Errorf("expected GetResult.Source to be != nil; got nil") + } + + // Decode the Source field + var tweetGot tweet + err = json.Unmarshal(*getResult.Source, &tweetGot) + if err != nil { + t.Fatal(err) + } + if tweetGot.Retweets != tweet1.Retweets+increment { + t.Errorf("expected Tweet.Retweets to be %d; got %d", tweet1.Retweets+increment, tweetGot.Retweets) + } +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/uritemplates/LICENSE b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/uritemplates/LICENSE new file mode 100644 index 000000000..de9c88cb6 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/uritemplates/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2013 Joshua Tacoma + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/uritemplates/uritemplates.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/uritemplates/uritemplates.go new file mode 100644 index 000000000..8a84813fe --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/uritemplates/uritemplates.go @@ -0,0 +1,359 @@ +// Copyright 2013 Joshua Tacoma. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uritemplates is a level 4 implementation of RFC 6570 (URI +// Template, http://tools.ietf.org/html/rfc6570). +// +// To use uritemplates, parse a template string and expand it with a value +// map: +// +// template, _ := uritemplates.Parse("https://api.github.com/repos{/user,repo}") +// values := make(map[string]interface{}) +// values["user"] = "jtacoma" +// values["repo"] = "uritemplates" +// expanded, _ := template.ExpandString(values) +// fmt.Printf(expanded) +// +package uritemplates + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]") + reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]") + validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$") + hex = []byte("0123456789ABCDEF") +) + +func pctEncode(src []byte) []byte { + dst := make([]byte, len(src)*3) + for i, b := range src { + buf := dst[i*3 : i*3+3] + buf[0] = 0x25 + buf[1] = hex[b/16] + buf[2] = hex[b%16] + } + return dst +} + +func escape(s string, allowReserved bool) (escaped string) { + if allowReserved { + escaped = string(reserved.ReplaceAllFunc([]byte(s), pctEncode)) + } else { + escaped = string(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) + } + return escaped +} + +// A UriTemplate is a parsed representation of a URI template. +type UriTemplate struct { + raw string + parts []templatePart +} + +// Parse parses a URI template string into a UriTemplate object. +func Parse(rawtemplate string) (template *UriTemplate, err error) { + template = new(UriTemplate) + template.raw = rawtemplate + split := strings.Split(rawtemplate, "{") + template.parts = make([]templatePart, len(split)*2-1) + for i, s := range split { + if i == 0 { + if strings.Contains(s, "}") { + err = errors.New("unexpected }") + break + } + template.parts[i].raw = s + } else { + subsplit := strings.Split(s, "}") + if len(subsplit) != 2 { + err = errors.New("malformed template") + break + } + expression := subsplit[0] + template.parts[i*2-1], err = parseExpression(expression) + if err != nil { + break + } + template.parts[i*2].raw = subsplit[1] + } + } + if err != nil { + template = nil + } + return template, err +} + +type templatePart struct { + raw string + terms []templateTerm + first string + sep string + named bool + ifemp string + allowReserved bool +} + +type templateTerm struct { + name string + explode bool + truncate int +} + +func parseExpression(expression string) (result templatePart, err error) { + switch expression[0] { + case '+': + result.sep = "," + result.allowReserved = true + expression = expression[1:] + case '.': + result.first = "." + result.sep = "." + expression = expression[1:] + case '/': + result.first = "/" + result.sep = "/" + expression = expression[1:] + case ';': + result.first = ";" + result.sep = ";" + result.named = true + expression = expression[1:] + case '?': + result.first = "?" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '&': + result.first = "&" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '#': + result.first = "#" + result.sep = "," + result.allowReserved = true + expression = expression[1:] + default: + result.sep = "," + } + rawterms := strings.Split(expression, ",") + result.terms = make([]templateTerm, len(rawterms)) + for i, raw := range rawterms { + result.terms[i], err = parseTerm(raw) + if err != nil { + break + } + } + return result, err +} + +func parseTerm(term string) (result templateTerm, err error) { + if strings.HasSuffix(term, "*") { + result.explode = true + term = term[:len(term)-1] + } + split := strings.Split(term, ":") + if len(split) == 1 { + result.name = term + } else if len(split) == 2 { + result.name = split[0] + var parsed int64 + parsed, err = strconv.ParseInt(split[1], 10, 0) + result.truncate = int(parsed) + } else { + err = errors.New("multiple colons in same term") + } + if !validname.MatchString(result.name) { + err = errors.New("not a valid name: " + result.name) + } + if result.explode && result.truncate > 0 { + err = errors.New("both explode and prefix modifers on same term") + } + return result, err +} + +// Expand expands a URI template with a set of values to produce a string. +func (self *UriTemplate) Expand(value interface{}) (string, error) { + values, ismap := value.(map[string]interface{}) + if !ismap { + if m, ismap := struct2map(value); !ismap { + return "", errors.New("expected map[string]interface{}, struct, or pointer to struct.") + } else { + return self.Expand(m) + } + } + var buf bytes.Buffer + for _, p := range self.parts { + err := p.expand(&buf, values) + if err != nil { + return "", err + } + } + return buf.String(), nil +} + +func (self *templatePart) expand(buf *bytes.Buffer, values map[string]interface{}) error { + if len(self.raw) > 0 { + buf.WriteString(self.raw) + return nil + } + var zeroLen = buf.Len() + buf.WriteString(self.first) + var firstLen = buf.Len() + for _, term := range self.terms { + value, exists := values[term.name] + if !exists { + continue + } + if buf.Len() != firstLen { + buf.WriteString(self.sep) + } + switch v := value.(type) { + case string: + self.expandString(buf, term, v) + case []interface{}: + self.expandArray(buf, term, v) + case map[string]interface{}: + if term.truncate > 0 { + return errors.New("cannot truncate a map expansion") + } + self.expandMap(buf, term, v) + default: + if m, ismap := struct2map(value); ismap { + if term.truncate > 0 { + return errors.New("cannot truncate a map expansion") + } + self.expandMap(buf, term, m) + } else { + str := fmt.Sprintf("%v", value) + self.expandString(buf, term, str) + } + } + } + if buf.Len() == firstLen { + original := buf.Bytes()[:zeroLen] + buf.Reset() + buf.Write(original) + } + return nil +} + +func (self *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) { + if self.named { + buf.WriteString(name) + if empty { + buf.WriteString(self.ifemp) + } else { + buf.WriteString("=") + } + } +} + +func (self *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) { + if len(s) > t.truncate && t.truncate > 0 { + s = s[:t.truncate] + } + self.expandName(buf, t.name, len(s) == 0) + buf.WriteString(escape(s, self.allowReserved)) +} + +func (self *templatePart) expandArray(buf *bytes.Buffer, t templateTerm, a []interface{}) { + if len(a) == 0 { + return + } else if !t.explode { + self.expandName(buf, t.name, false) + } + for i, value := range a { + if t.explode && i > 0 { + buf.WriteString(self.sep) + } else if i > 0 { + buf.WriteString(",") + } + var s string + switch v := value.(type) { + case string: + s = v + default: + s = fmt.Sprintf("%v", v) + } + if len(s) > t.truncate && t.truncate > 0 { + s = s[:t.truncate] + } + if self.named && t.explode { + self.expandName(buf, t.name, len(s) == 0) + } + buf.WriteString(escape(s, self.allowReserved)) + } +} + +func (self *templatePart) expandMap(buf *bytes.Buffer, t templateTerm, m map[string]interface{}) { + if len(m) == 0 { + return + } + if !t.explode { + self.expandName(buf, t.name, len(m) == 0) + } + var firstLen = buf.Len() + for k, value := range m { + if firstLen != buf.Len() { + if t.explode { + buf.WriteString(self.sep) + } else { + buf.WriteString(",") + } + } + var s string + switch v := value.(type) { + case string: + s = v + default: + s = fmt.Sprintf("%v", v) + } + if t.explode { + buf.WriteString(escape(k, self.allowReserved)) + buf.WriteRune('=') + buf.WriteString(escape(s, self.allowReserved)) + } else { + buf.WriteString(escape(k, self.allowReserved)) + buf.WriteRune(',') + buf.WriteString(escape(s, self.allowReserved)) + } + } +} + +func struct2map(v interface{}) (map[string]interface{}, bool) { + value := reflect.ValueOf(v) + switch value.Type().Kind() { + case reflect.Ptr: + return struct2map(value.Elem().Interface()) + case reflect.Struct: + m := make(map[string]interface{}) + for i := 0; i < value.NumField(); i++ { + tag := value.Type().Field(i).Tag + var name string + if strings.Contains(string(tag), ":") { + name = tag.Get("uri") + } else { + name = strings.TrimSpace(string(tag)) + } + if len(name) == 0 { + name = value.Type().Field(i).Name + } + m[name] = value.Field(i).Interface() + } + return m, true + } + return nil, false +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/uritemplates/utils.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/uritemplates/utils.go new file mode 100644 index 000000000..399ef4623 --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/uritemplates/utils.go @@ -0,0 +1,13 @@ +package uritemplates + +func Expand(path string, expansions map[string]string) (string, error) { + template, err := Parse(path) + if err != nil { + return "", err + } + values := make(map[string]interface{}) + for k, v := range expansions { + values[k] = v + } + return template.Expand(values) +} diff --git a/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/uritemplates/utils_test.go b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/uritemplates/utils_test.go new file mode 100644 index 000000000..633949b6f --- /dev/null +++ b/services/templeton/vendor/src/gopkg.in/olivere/elastic.v3/uritemplates/utils_test.go @@ -0,0 +1,105 @@ +package uritemplates + +import ( + "testing" +) + +type ExpandTest struct { + in string + expansions map[string]string + want string +} + +var expandTests = []ExpandTest{ + // #0: no expansions + { + "http://www.golang.org/", + map[string]string{}, + "http://www.golang.org/", + }, + // #1: one expansion, no escaping + { + "http://www.golang.org/{bucket}/delete", + map[string]string{ + "bucket": "red", + }, + "http://www.golang.org/red/delete", + }, + // #2: one expansion, with hex escapes + { + "http://www.golang.org/{bucket}/delete", + map[string]string{ + "bucket": "red/blue", + }, + "http://www.golang.org/red%2Fblue/delete", + }, + // #3: one expansion, with space + { + "http://www.golang.org/{bucket}/delete", + map[string]string{ + "bucket": "red or blue", + }, + "http://www.golang.org/red%20or%20blue/delete", + }, + // #4: expansion not found + { + "http://www.golang.org/{object}/delete", + map[string]string{ + "bucket": "red or blue", + }, + "http://www.golang.org//delete", + }, + // #5: multiple expansions + { + "http://www.golang.org/{one}/{two}/{three}/get", + map[string]string{ + "one": "ONE", + "two": "TWO", + "three": "THREE", + }, + "http://www.golang.org/ONE/TWO/THREE/get", + }, + // #6: utf-8 characters + { + "http://www.golang.org/{bucket}/get", + map[string]string{ + "bucket": "£100", + }, + "http://www.golang.org/%C2%A3100/get", + }, + // #7: punctuations + { + "http://www.golang.org/{bucket}/get", + map[string]string{ + "bucket": `/\@:,.*~`, + }, + "http://www.golang.org/%2F%5C%40%3A%2C.%2A~/get", + }, + // #8: mis-matched brackets + { + "http://www.golang.org/{bucket/get", + map[string]string{ + "bucket": "red", + }, + "", + }, + // #9: "+" prefix for suppressing escape + // See also: http://tools.ietf.org/html/rfc6570#section-3.2.3 + { + "http://www.golang.org/{+topic}", + map[string]string{ + "topic": "/topics/myproject/mytopic", + }, + // The double slashes here look weird, but it's intentional + "http://www.golang.org//topics/myproject/mytopic", + }, +} + +func TestExpand(t *testing.T) { + for i, test := range expandTests { + got, _ := Expand(test.in, test.expansions) + if got != test.want { + t.Errorf("got %q expected %q in test %d", got, test.want, i) + } + } +} From 5db420e2a342399c12c7b1b76792f56bb870a7cd Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Tue, 16 Feb 2016 17:26:08 -0700 Subject: [PATCH 090/183] Switch to stable version of elastic via gopkg. Clean up vendored dependencies. --- .../templeton/elasticsearch/elasticsearch.go | 2 +- services/templeton/vendor/manifest | 18 - .../src/github.com/araddon/gou/LICENSE.md | 21 - .../src/github.com/araddon/gou/README.md | 129 - .../src/github.com/araddon/gou/coerce.go | 274 -- .../src/github.com/araddon/gou/coerce_test.go | 25 - .../github.com/araddon/gou/goutest/assert.go | 33 - .../vendor/src/github.com/araddon/gou/http.go | 205 -- .../src/github.com/araddon/gou/jsonhelper.go | 694 ---- .../github.com/araddon/gou/jsonhelper_test.go | 221 -- .../vendor/src/github.com/araddon/gou/log.go | 412 --- .../src/github.com/araddon/gou/log_unix.go | 29 - .../src/github.com/araddon/gou/log_windows.go | 8 - .../src/github.com/araddon/gou/testutil.go | 75 - .../src/github.com/araddon/gou/throttle.go | 56 - .../github.com/araddon/gou/throttle_test.go | 30 - .../vendor/src/github.com/araddon/gou/uid.go | 94 - .../src/github.com/araddon/gou/uid_test.go | 11 - .../src/github.com/bitly/go-hostpool/LICENSE | 21 - .../github.com/bitly/go-hostpool/README.md | 17 - .../bitly/go-hostpool/epsilon_greedy.go | 205 -- .../go-hostpool/epsilon_value_calculators.go | 40 - .../bitly/go-hostpool/example_test.go | 13 - .../bitly/go-hostpool/host_entry.go | 62 - .../github.com/bitly/go-hostpool/hostpool.go | 201 -- .../bitly/go-hostpool/hostpool_test.go | 145 - .../olivere/elastic/CHANGELOG-3.0.md | 363 -- .../olivere/elastic/CONTRIBUTING.md | 40 - .../github.com/olivere/elastic/CONTRIBUTORS | 35 - .../src/github.com/olivere/elastic/LICENSE | 20 - .../src/github.com/olivere/elastic/README.md | 415 --- .../olivere/elastic/backoff/LICENSE | 22 - .../olivere/elastic/backoff/backoff.go | 159 - .../olivere/elastic/backoff/backoff_test.go | 146 - .../olivere/elastic/backoff/retry.go | 53 - .../olivere/elastic/backoff/retry_test.go | 44 - .../src/github.com/olivere/elastic/bulk.go | 314 -- .../olivere/elastic/bulk_delete_request.go | 112 - .../elastic/bulk_delete_request_test.go | 42 - .../olivere/elastic/bulk_index_request.go | 173 - .../elastic/bulk_index_request_test.go | 63 - .../olivere/elastic/bulk_processor.go | 515 --- .../olivere/elastic/bulk_processor_test.go | 406 --- .../olivere/elastic/bulk_request.go | 17 - .../github.com/olivere/elastic/bulk_test.go | 463 --- .../olivere/elastic/bulk_update_request.go | 219 -- .../elastic/bulk_update_request_test.go | 77 - .../olivere/elastic/canonicalize.go | 28 - .../olivere/elastic/canonicalize_test.go | 41 - .../olivere/elastic/clear_scroll.go | 102 - .../olivere/elastic/clear_scroll_test.go | 85 - .../src/github.com/olivere/elastic/client.go | 1551 --------- .../github.com/olivere/elastic/client_test.go | 899 ----- .../olivere/elastic/cluster-test/Makefile | 16 - .../olivere/elastic/cluster-test/README.md | 63 - .../elastic/cluster-test/cluster-test.go | 356 -- .../olivere/elastic/cluster_health.go | 244 -- .../olivere/elastic/cluster_health_test.go | 109 - .../olivere/elastic/cluster_state.go | 284 -- .../olivere/elastic/cluster_state_test.go | 92 - .../olivere/elastic/cluster_stats.go | 349 -- .../olivere/elastic/cluster_stats_test.go | 85 - .../olivere/elastic/config/elasticsearch.yml | 103 - .../github.com/olivere/elastic/connection.go | 90 - .../src/github.com/olivere/elastic/count.go | 310 -- .../github.com/olivere/elastic/count_test.go | 124 - .../src/github.com/olivere/elastic/decoder.go | 26 - .../olivere/elastic/decoder_test.go | 49 - .../src/github.com/olivere/elastic/delete.go | 214 -- .../olivere/elastic/delete_by_query.go | 302 -- .../olivere/elastic/delete_by_query_test.go | 114 - .../olivere/elastic/delete_template.go | 118 - .../olivere/elastic/delete_template_test.go | 22 - .../github.com/olivere/elastic/delete_test.go | 118 - .../src/github.com/olivere/elastic/doc.go | 51 - .../src/github.com/olivere/elastic/errors.go | 141 - .../github.com/olivere/elastic/errors_test.go | 202 -- .../olivere/elastic/example_test.go | 547 --- .../src/github.com/olivere/elastic/exists.go | 175 - .../github.com/olivere/elastic/exists_test.go | 52 - .../src/github.com/olivere/elastic/explain.go | 330 -- .../olivere/elastic/explain_test.go | 41 - .../olivere/elastic/fetch_source_context.go | 74 - .../elastic/fetch_source_context_test.go | 125 - .../github.com/olivere/elastic/geo_point.go | 48 - .../olivere/elastic/geo_point_test.go | 24 - .../src/github.com/olivere/elastic/get.go | 271 -- .../olivere/elastic/get_template.go | 113 - .../olivere/elastic/get_template_test.go | 51 - .../github.com/olivere/elastic/get_test.go | 165 - .../github.com/olivere/elastic/highlight.go | 455 --- .../olivere/elastic/highlight_test.go | 192 -- .../src/github.com/olivere/elastic/index.go | 284 -- .../github.com/olivere/elastic/index_test.go | 279 -- .../olivere/elastic/indices_close.go | 153 - .../olivere/elastic/indices_close_test.go | 81 - .../olivere/elastic/indices_create.go | 129 - .../olivere/elastic/indices_create_test.go | 60 - .../olivere/elastic/indices_delete.go | 129 - .../elastic/indices_delete_template.go | 122 - .../olivere/elastic/indices_delete_test.go | 20 - .../olivere/elastic/indices_delete_warmer.go | 131 - .../elastic/indices_delete_warmer_test.go | 48 - .../olivere/elastic/indices_exists.go | 149 - .../elastic/indices_exists_template.go | 112 - .../elastic/indices_exists_template_test.go | 68 - .../olivere/elastic/indices_exists_test.go | 20 - .../olivere/elastic/indices_exists_type.go | 161 - .../elastic/indices_exists_type_test.go | 134 - .../olivere/elastic/indices_flush.go | 169 - .../olivere/elastic/indices_flush_test.go | 69 - .../olivere/elastic/indices_forcemerge.go | 200 -- .../elastic/indices_forcemerge_test.go | 56 - .../github.com/olivere/elastic/indices_get.go | 202 -- .../olivere/elastic/indices_get_aliases.go | 155 - .../elastic/indices_get_aliases_test.go | 146 - .../olivere/elastic/indices_get_mapping.go | 170 - .../elastic/indices_get_mapping_test.go | 50 - .../olivere/elastic/indices_get_settings.go | 183 - .../elastic/indices_get_settings_test.go | 81 - .../olivere/elastic/indices_get_template.go | 128 - .../elastic/indices_get_template_test.go | 41 - .../olivere/elastic/indices_get_test.go | 97 - .../olivere/elastic/indices_get_warmer.go | 194 -- .../elastic/indices_get_warmer_test.go | 83 - .../olivere/elastic/indices_open.go | 157 - .../olivere/elastic/indices_open_test.go | 20 - .../olivere/elastic/indices_put_alias.go | 111 - .../olivere/elastic/indices_put_alias_test.go | 123 - .../olivere/elastic/indices_put_mapping.go | 221 -- .../elastic/indices_put_mapping_test.go | 82 - .../olivere/elastic/indices_put_settings.go | 184 - .../elastic/indices_put_settings_test.go | 92 - .../olivere/elastic/indices_put_template.go | 179 - .../olivere/elastic/indices_put_warmer.go | 222 -- .../elastic/indices_put_warmer_test.go | 100 - .../olivere/elastic/indices_refresh.go | 94 - .../olivere/elastic/indices_refresh_test.go | 47 - .../olivere/elastic/indices_stats.go | 385 --- .../olivere/elastic/indices_stats_test.go | 85 - .../github.com/olivere/elastic/inner_hit.go | 160 - .../olivere/elastic/inner_hit_test.go | 44 - .../src/github.com/olivere/elastic/logger.go | 10 - .../src/github.com/olivere/elastic/mget.go | 219 -- .../github.com/olivere/elastic/mget_test.go | 95 - .../src/github.com/olivere/elastic/msearch.go | 96 - .../olivere/elastic/msearch_test.go | 197 -- .../github.com/olivere/elastic/nodes_info.go | 318 -- .../olivere/elastic/nodes_info_test.go | 40 - .../github.com/olivere/elastic/optimize.go | 130 - .../olivere/elastic/optimize_test.go | 47 - .../github.com/olivere/elastic/percolate.go | 309 -- .../olivere/elastic/percolate_test.go | 92 - .../src/github.com/olivere/elastic/ping.go | 126 - .../github.com/olivere/elastic/ping_test.go | 64 - .../src/github.com/olivere/elastic/plugins.go | 38 - .../olivere/elastic/plugins_test.go | 32 - .../src/github.com/olivere/elastic/query.go | 13 - .../github.com/olivere/elastic/reindexer.go | 270 -- .../olivere/elastic/reindexer_test.go | 285 -- .../src/github.com/olivere/elastic/request.go | 123 - .../src/github.com/olivere/elastic/rescore.go | 44 - .../github.com/olivere/elastic/rescorer.go | 64 - .../github.com/olivere/elastic/response.go | 43 - .../src/github.com/olivere/elastic/scan.go | 359 -- .../github.com/olivere/elastic/scan_test.go | 559 --- .../src/github.com/olivere/elastic/script.go | 131 - .../github.com/olivere/elastic/script_test.go | 78 - .../src/github.com/olivere/elastic/scroll.go | 208 -- .../github.com/olivere/elastic/scroll_test.go | 106 - .../src/github.com/olivere/elastic/search.go | 429 --- .../github.com/olivere/elastic/search_aggs.go | 1270 ------- .../elastic/search_aggs_bucket_children.go | 76 - .../search_aggs_bucket_children_test.go | 46 - .../search_aggs_bucket_date_histogram.go | 285 -- .../search_aggs_bucket_date_histogram_test.go | 49 - .../elastic/search_aggs_bucket_date_range.go | 234 -- .../search_aggs_bucket_date_range_test.go | 130 - .../elastic/search_aggs_bucket_filter.go | 77 - .../elastic/search_aggs_bucket_filter_test.go | 66 - .../elastic/search_aggs_bucket_filters.go | 96 - .../search_aggs_bucket_filters_test.go | 68 - .../search_aggs_bucket_geo_distance.go | 194 -- .../search_aggs_bucket_geo_distance_test.go | 71 - .../elastic/search_aggs_bucket_global.go | 71 - .../elastic/search_aggs_bucket_global_test.go | 44 - .../elastic/search_aggs_bucket_histogram.go | 253 -- .../search_aggs_bucket_histogram_test.go | 61 - .../elastic/search_aggs_bucket_missing.go | 81 - .../search_aggs_bucket_missing_test.go | 44 - .../elastic/search_aggs_bucket_nested.go | 82 - .../elastic/search_aggs_bucket_nested_test.go | 62 - .../elastic/search_aggs_bucket_range.go | 232 -- .../elastic/search_aggs_bucket_range_test.go | 156 - .../elastic/search_aggs_bucket_sampler.go | 145 - .../search_aggs_bucket_sampler_test.go | 52 - .../search_aggs_bucket_significant_terms.go | 141 - ...arch_aggs_bucket_significant_terms_test.go | 86 - .../elastic/search_aggs_bucket_terms.go | 341 -- .../elastic/search_aggs_bucket_terms_test.go | 104 - .../elastic/search_aggs_metrics_avg.go | 101 - .../elastic/search_aggs_metrics_avg_test.go | 61 - .../search_aggs_metrics_cardinality.go | 120 - .../search_aggs_metrics_cardinality_test.go | 78 - .../search_aggs_metrics_extended_stats.go | 99 - ...search_aggs_metrics_extended_stats_test.go | 44 - .../elastic/search_aggs_metrics_geo_bounds.go | 105 - .../search_aggs_metrics_geo_bounds_test.go | 61 - .../elastic/search_aggs_metrics_max.go | 99 - .../elastic/search_aggs_metrics_max_test.go | 61 - .../elastic/search_aggs_metrics_min.go | 100 - .../elastic/search_aggs_metrics_min_test.go | 61 - .../search_aggs_metrics_percentile_ranks.go | 131 - ...arch_aggs_metrics_percentile_ranks_test.go | 78 - .../search_aggs_metrics_percentiles.go | 130 - .../search_aggs_metrics_percentiles_test.go | 78 - .../elastic/search_aggs_metrics_stats.go | 99 - .../elastic/search_aggs_metrics_stats_test.go | 61 - .../elastic/search_aggs_metrics_sum.go | 99 - .../elastic/search_aggs_metrics_sum_test.go | 61 - .../elastic/search_aggs_metrics_top_hits.go | 143 - .../search_aggs_metrics_top_hits_test.go | 31 - .../search_aggs_metrics_value_count.go | 102 - .../search_aggs_metrics_value_count_test.go | 63 - .../search_aggs_pipeline_avg_bucket.go | 113 - .../search_aggs_pipeline_avg_bucket_test.go | 27 - .../search_aggs_pipeline_bucket_script.go | 132 - ...search_aggs_pipeline_bucket_script_test.go | 30 - .../search_aggs_pipeline_bucket_selector.go | 134 - ...arch_aggs_pipeline_bucket_selector_test.go | 29 - .../search_aggs_pipeline_cumulative_sum.go | 90 - ...earch_aggs_pipeline_cumulative_sum_test.go | 27 - .../search_aggs_pipeline_derivative.go | 124 - .../search_aggs_pipeline_derivative_test.go | 27 - .../search_aggs_pipeline_max_bucket.go | 114 - .../search_aggs_pipeline_max_bucket_test.go | 27 - .../search_aggs_pipeline_min_bucket.go | 114 - .../search_aggs_pipeline_min_bucket_test.go | 27 - .../elastic/search_aggs_pipeline_mov_avg.go | 393 --- .../search_aggs_pipeline_mov_avg_test.go | 132 - .../search_aggs_pipeline_serial_diff.go | 124 - .../search_aggs_pipeline_serial_diff_test.go | 27 - .../search_aggs_pipeline_sum_bucket.go | 113 - .../search_aggs_pipeline_sum_bucket_test.go | 27 - .../elastic/search_aggs_pipeline_test.go | 1000 ------ .../olivere/elastic/search_aggs_test.go | 2996 ----------------- .../olivere/elastic/search_queries_bool.go | 212 -- .../elastic/search_queries_bool_test.go | 34 - .../elastic/search_queries_boosting.go | 97 - .../elastic/search_queries_boosting_test.go | 30 - .../elastic/search_queries_common_terms.go | 146 - .../search_queries_common_terms_test.go | 84 - .../elastic/search_queries_constant_score.go | 59 - .../search_queries_constant_score_test.go | 27 - .../olivere/elastic/search_queries_dis_max.go | 104 - .../elastic/search_queries_dis_max_test.go | 28 - .../olivere/elastic/search_queries_exists.go | 49 - .../elastic/search_queries_exists_test.go | 27 - .../olivere/elastic/search_queries_fsq.go | 172 - .../elastic/search_queries_fsq_score_funcs.go | 567 ---- .../elastic/search_queries_fsq_test.go | 166 - .../olivere/elastic/search_queries_fuzzy.go | 120 - .../elastic/search_queries_fuzzy_test.go | 27 - .../search_queries_geo_bounding_box.go | 121 - .../search_queries_geo_bounding_box_test.go | 63 - .../elastic/search_queries_geo_distance.go | 116 - .../search_queries_geo_distance_test.go | 70 - .../elastic/search_queries_geo_polygon.go | 72 - .../search_queries_geo_polygon_test.go | 58 - .../elastic/search_queries_has_child.go | 129 - .../elastic/search_queries_has_child_test.go | 45 - .../elastic/search_queries_has_parent.go | 97 - .../elastic/search_queries_has_parent_test.go | 27 - .../olivere/elastic/search_queries_ids.go | 76 - .../elastic/search_queries_ids_test.go | 27 - .../olivere/elastic/search_queries_indices.go | 89 - .../elastic/search_queries_indices_test.go | 46 - .../olivere/elastic/search_queries_match.go | 214 -- .../elastic/search_queries_match_all.go | 41 - .../elastic/search_queries_match_all_test.go | 44 - .../elastic/search_queries_match_test.go | 78 - .../olivere/elastic/search_queries_missing.go | 67 - .../elastic/search_queries_missing_test.go | 44 - .../elastic/search_queries_more_like_this.go | 412 --- .../search_queries_more_like_this_test.go | 91 - .../elastic/search_queries_multi_match.go | 275 -- .../search_queries_multi_match_test.go | 131 - .../olivere/elastic/search_queries_nested.go | 85 - .../elastic/search_queries_nested_test.go | 52 - .../olivere/elastic/search_queries_not.go | 45 - .../elastic/search_queries_not_test.go | 46 - .../olivere/elastic/search_queries_prefix.go | 67 - .../elastic/search_queries_prefix_test.go | 45 - .../elastic/search_queries_query_string.go | 349 -- .../search_queries_query_string_test.go | 28 - .../olivere/elastic/search_queries_range.go | 145 - .../elastic/search_queries_range_test.go | 68 - .../olivere/elastic/search_queries_regexp.go | 82 - .../elastic/search_queries_regexp_test.go | 47 - .../olivere/elastic/search_queries_script.go | 51 - .../elastic/search_queries_script_test.go | 45 - .../search_queries_simple_query_string.go | 185 - ...search_queries_simple_query_string_test.go | 86 - .../elastic/search_queries_template_query.go | 84 - .../search_queries_template_query_test.go | 65 - .../olivere/elastic/search_queries_term.go | 58 - .../elastic/search_queries_term_test.go | 46 - .../olivere/elastic/search_queries_terms.go | 58 - .../elastic/search_queries_terms_test.go | 46 - .../olivere/elastic/search_queries_type.go | 26 - .../elastic/search_queries_type_test.go | 27 - .../elastic/search_queries_wildcard.go | 81 - .../elastic/search_queries_wildcard_test.go | 67 - .../olivere/elastic/search_request.go | 153 - .../olivere/elastic/search_request_test.go | 48 - .../olivere/elastic/search_source.go | 511 --- .../olivere/elastic/search_source_test.go | 238 -- .../olivere/elastic/search_suggester_test.go | 259 -- .../olivere/elastic/search_template.go | 152 - .../olivere/elastic/search_templates_test.go | 98 - .../github.com/olivere/elastic/search_test.go | 885 ----- .../github.com/olivere/elastic/setup_test.go | 232 -- .../src/github.com/olivere/elastic/sort.go | 480 --- .../github.com/olivere/elastic/sort_test.go | 214 -- .../src/github.com/olivere/elastic/suggest.go | 143 - .../olivere/elastic/suggest_field.go | 100 - .../olivere/elastic/suggest_field_test.go | 30 - .../olivere/elastic/suggest_test.go | 131 - .../github.com/olivere/elastic/suggester.go | 15 - .../olivere/elastic/suggester_completion.go | 129 - .../elastic/suggester_completion_fuzzy.go | 179 - .../suggester_completion_fuzzy_test.go | 50 - .../elastic/suggester_completion_test.go | 29 - .../olivere/elastic/suggester_context.go | 11 - .../elastic/suggester_context_category.go | 99 - .../suggester_context_category_test.go | 97 - .../olivere/elastic/suggester_context_geo.go | 132 - .../elastic/suggester_context_geo_test.go | 48 - .../olivere/elastic/suggester_phrase.go | 554 --- .../olivere/elastic/suggester_phrase_test.go | 169 - .../olivere/elastic/suggester_term.go | 233 -- .../olivere/elastic/suggester_term_test.go | 29 - .../github.com/olivere/elastic/termvectors.go | 458 --- .../olivere/elastic/termvectors_test.go | 165 - .../src/github.com/olivere/elastic/update.go | 300 -- .../github.com/olivere/elastic/update_test.go | 312 -- .../olivere/elastic/uritemplates/LICENSE | 18 - .../elastic/uritemplates/uritemplates.go | 359 -- .../olivere/elastic/uritemplates/utils.go | 13 - .../elastic/uritemplates/utils_test.go | 105 - 350 files changed, 1 insertion(+), 52678 deletions(-) delete mode 100644 services/templeton/vendor/src/github.com/araddon/gou/LICENSE.md delete mode 100644 services/templeton/vendor/src/github.com/araddon/gou/README.md delete mode 100644 services/templeton/vendor/src/github.com/araddon/gou/coerce.go delete mode 100644 services/templeton/vendor/src/github.com/araddon/gou/coerce_test.go delete mode 100644 services/templeton/vendor/src/github.com/araddon/gou/goutest/assert.go delete mode 100644 services/templeton/vendor/src/github.com/araddon/gou/http.go delete mode 100644 services/templeton/vendor/src/github.com/araddon/gou/jsonhelper.go delete mode 100644 services/templeton/vendor/src/github.com/araddon/gou/jsonhelper_test.go delete mode 100644 services/templeton/vendor/src/github.com/araddon/gou/log.go delete mode 100644 services/templeton/vendor/src/github.com/araddon/gou/log_unix.go delete mode 100644 services/templeton/vendor/src/github.com/araddon/gou/log_windows.go delete mode 100644 services/templeton/vendor/src/github.com/araddon/gou/testutil.go delete mode 100644 services/templeton/vendor/src/github.com/araddon/gou/throttle.go delete mode 100644 services/templeton/vendor/src/github.com/araddon/gou/throttle_test.go delete mode 100644 services/templeton/vendor/src/github.com/araddon/gou/uid.go delete mode 100644 services/templeton/vendor/src/github.com/araddon/gou/uid_test.go delete mode 100644 services/templeton/vendor/src/github.com/bitly/go-hostpool/LICENSE delete mode 100644 services/templeton/vendor/src/github.com/bitly/go-hostpool/README.md delete mode 100644 services/templeton/vendor/src/github.com/bitly/go-hostpool/epsilon_greedy.go delete mode 100644 services/templeton/vendor/src/github.com/bitly/go-hostpool/epsilon_value_calculators.go delete mode 100644 services/templeton/vendor/src/github.com/bitly/go-hostpool/example_test.go delete mode 100644 services/templeton/vendor/src/github.com/bitly/go-hostpool/host_entry.go delete mode 100644 services/templeton/vendor/src/github.com/bitly/go-hostpool/hostpool.go delete mode 100644 services/templeton/vendor/src/github.com/bitly/go-hostpool/hostpool_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/CHANGELOG-3.0.md delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/CONTRIBUTING.md delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/CONTRIBUTORS delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/LICENSE delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/README.md delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/backoff/LICENSE delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/backoff/backoff.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/backoff/backoff_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/backoff/retry.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/backoff/retry_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/bulk.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/bulk_delete_request.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/bulk_delete_request_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/bulk_index_request.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/bulk_index_request_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/bulk_processor.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/bulk_processor_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/bulk_request.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/bulk_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/bulk_update_request.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/bulk_update_request_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/canonicalize.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/canonicalize_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/clear_scroll.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/clear_scroll_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/client.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/client_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/cluster-test/Makefile delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/cluster-test/README.md delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/cluster-test/cluster-test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/cluster_health.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/cluster_health_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/cluster_state.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/cluster_state_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/cluster_stats.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/cluster_stats_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/config/elasticsearch.yml delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/connection.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/count.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/count_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/decoder.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/decoder_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/delete.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/delete_by_query.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/delete_by_query_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/delete_template.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/delete_template_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/delete_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/doc.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/errors.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/errors_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/example_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/exists.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/exists_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/explain.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/explain_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/fetch_source_context.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/fetch_source_context_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/geo_point.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/geo_point_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/get.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/get_template.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/get_template_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/get_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/highlight.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/highlight_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/index.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/index_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_close.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_close_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_create.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_create_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_delete.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_template.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_warmer.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_warmer_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_exists.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_template.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_template_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_type.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_type_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_flush.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_flush_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_forcemerge.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_forcemerge_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_get.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_get_aliases.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_get_aliases_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_get_mapping.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_get_mapping_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_get_settings.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_get_settings_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_get_template.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_get_template_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_get_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_get_warmer.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_get_warmer_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_open.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_open_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_put_alias.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_put_alias_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_put_mapping.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_put_mapping_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_put_settings.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_put_settings_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_put_template.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_put_warmer.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_put_warmer_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_refresh.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_refresh_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_stats.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/indices_stats_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/inner_hit.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/inner_hit_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/logger.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/mget.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/mget_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/msearch.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/msearch_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/nodes_info.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/nodes_info_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/optimize.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/optimize_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/percolate.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/percolate_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/ping.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/ping_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/plugins.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/plugins_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/query.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/reindexer.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/reindexer_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/request.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/rescore.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/rescorer.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/response.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/scan.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/scan_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/script.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/script_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/scroll.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/scroll_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_children.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_children_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_histogram.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_histogram_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_range.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_range_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filter.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filter_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filters.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filters_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_geo_distance.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_geo_distance_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_global.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_global_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_histogram.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_histogram_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_missing.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_missing_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_nested.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_nested_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_range.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_range_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_sampler.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_sampler_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_significant_terms.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_significant_terms_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_terms.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_terms_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_avg.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_avg_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_cardinality.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_cardinality_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_extended_stats.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_extended_stats_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_geo_bounds.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_geo_bounds_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_max.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_max_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_min.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_min_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentiles.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentiles_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_stats.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_stats_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_sum.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_sum_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_top_hits.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_top_hits_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_value_count.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_value_count_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_script.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_script_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_derivative.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_derivative_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_max_bucket.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_max_bucket_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_min_bucket.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_min_bucket_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_mov_avg.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_mov_avg_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_serial_diff.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_serial_diff_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_bool.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_bool_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_boosting.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_boosting_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_common_terms.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_common_terms_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_constant_score.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_constant_score_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_dis_max.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_dis_max_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_exists.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_exists_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fsq.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fsq_score_funcs.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fsq_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fuzzy.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fuzzy_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_bounding_box.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_bounding_box_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_distance.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_distance_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_polygon.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_polygon_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_child.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_child_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_parent.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_parent_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_ids.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_ids_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_indices.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_indices_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match_all.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match_all_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_missing.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_missing_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_more_like_this.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_more_like_this_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_multi_match.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_multi_match_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_nested.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_nested_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_not.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_not_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_prefix.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_prefix_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_query_string.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_query_string_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_range.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_range_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_regexp.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_regexp_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_script.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_script_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_simple_query_string.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_simple_query_string_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_template_query.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_template_query_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_term.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_term_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_terms.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_terms_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_type.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_type_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_wildcard.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_queries_wildcard_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_request.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_request_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_source.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_source_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_suggester_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_template.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_templates_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/search_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/setup_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/sort.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/sort_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggest.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggest_field.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggest_field_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggest_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion_fuzzy.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion_fuzzy_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester_context.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_category.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_category_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_geo.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_geo_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester_phrase.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester_phrase_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester_term.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/suggester_term_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/termvectors.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/termvectors_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/update.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/update_test.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/LICENSE delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/uritemplates.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/utils.go delete mode 100644 services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/utils_test.go diff --git a/services/templeton/src/templeton/elasticsearch/elasticsearch.go b/services/templeton/src/templeton/elasticsearch/elasticsearch.go index 511ddeeb6..ce56bd3b7 100644 --- a/services/templeton/src/templeton/elasticsearch/elasticsearch.go +++ b/services/templeton/src/templeton/elasticsearch/elasticsearch.go @@ -3,7 +3,7 @@ package elasticsearch import ( "logcabin" - "github.com/olivere/elastic" + "gopkg.in/olivere/elastic.v3" "templeton/database" "templeton/model" diff --git a/services/templeton/vendor/manifest b/services/templeton/vendor/manifest index 52120fd92..13beb5830 100644 --- a/services/templeton/vendor/manifest +++ b/services/templeton/vendor/manifest @@ -1,18 +1,6 @@ { "version": 0, "dependencies": [ - { - "importpath": "github.com/araddon/gou", - "repository": "https://github.com/araddon/gou", - "revision": "1fd0868458fb611a8a956fae0ed0d0cc657cd321", - "branch": "master" - }, - { - "importpath": "github.com/bitly/go-hostpool", - "repository": "https://github.com/bitly/go-hostpool", - "revision": "d0e59c22a56e8dadfed24f74f452cea5a52722d2", - "branch": "master" - }, { "importpath": "github.com/lib/pq", "repository": "https://github.com/lib/pq", @@ -25,12 +13,6 @@ "revision": "e3edea7d68b76222b5118cc2e1cf3825e30abb80", "branch": "master" }, - { - "importpath": "github.com/olivere/elastic", - "repository": "https://github.com/olivere/elastic", - "revision": "a35245a5e2ecff49265ce16b1650cb8eccc3aea5", - "branch": "release-branch.v3" - }, { "importpath": "gopkg.in/olivere/elastic.v3", "repository": "https://gopkg.in/olivere/elastic.v3", diff --git a/services/templeton/vendor/src/github.com/araddon/gou/LICENSE.md b/services/templeton/vendor/src/github.com/araddon/gou/LICENSE.md deleted file mode 100644 index 628f430fb..000000000 --- a/services/templeton/vendor/src/github.com/araddon/gou/LICENSE.md +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2012-2014 Aaron Raddon and contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. \ No newline at end of file diff --git a/services/templeton/vendor/src/github.com/araddon/gou/README.md b/services/templeton/vendor/src/github.com/araddon/gou/README.md deleted file mode 100644 index 5c773adbb..000000000 --- a/services/templeton/vendor/src/github.com/araddon/gou/README.md +++ /dev/null @@ -1,129 +0,0 @@ -gou - Go Utilities -=========================== - -Go Utilities (logging, json) - -JsonHelper -=============== - -A Go Json Helper, focused on Type coercion, and json path query. - -```go - package main - import . "github.com/araddon/gou" - import . "github.com/araddon/gou/goutest" - import "testing" - - - func TestJsonHelper() { - - var jsonData := []byte(`{ - "name":"aaron", - "nullstring":null, - "ints":[1,2,3,4], - "int":1, - "intstr":"1", - "int64":1234567890, - "MaxSize" : 1048576, - "strings":["string1"], - "stringscsv":"string1,string2", - "nested":{ - "nest":"string2", - "strings":["string1"], - "int":2, - "list":["value"], - "nest2":{ - "test":"good" - } - }, - "nested2":[ - {"sub":2} - ], - "period.name":"value" - }` - - jh := NewJsonHelper(jsonData) - - // String method - Assert(jh.String("name") == "aaron", t, "should get 'aaron' %s", jh.String("name")) - // Int Method - Assert(jh.Int("int") == 1, t, "get int ") - // Selecting items from an array - Assert(jh.Int("ints[0]") == 1, t, "get int from array %d", jh.Int("ints[0]")) - Assert(jh.Int("ints[2]") == 3, t, "get int from array %d", jh.Int("ints[0]")) - // Getting arrays - Assert(len(jh.Ints("ints")) == 4, t, "get int array %v", jh.Ints("ints")) - // Type coercion to Int64 - Assert(jh.Int64("int64") == 1234567890, t, "get int") - Assert(jh.Int("nested.int") == 2, t, "get int") - - // Path based selection - Assert(jh.String("nested.nest") == "string2", t, "should get string %s", jh.String("nested.nest")) - Assert(jh.String("nested.nest2.test") == "good", t, "should get string %s", jh.String("nested.nest2.test")) - Assert(jh.String("nested.list[0]") == "value", t, "get string from array") - Assert(jh.Int("nested2[0].sub") == 2, t, "get int from obj in array %d", jh.Int("nested2[0].sub")) - - // casing? - Assert(jh.Int("MaxSize") == 1048576, t, "get int, test capitalization? ") - sl := jh.Strings("strings") - Assert(len(sl) == 1 && sl[0] == "string1", t, "get strings ") - sl = jh.Strings("stringscsv") - Assert(len(sl) == 2 && sl[0] == "string1", t, "get strings ") - - // Safe gets - i64, ok := jh.Int64Safe("int64") - Assert(ok, t, "int64safe ok") - Assert(i64 == 1234567890, t, "int64safe value") - - i, ok := jh.IntSafe("int") - Assert(ok, t, "intsafe ok") - Assert(i == 1, t, "intsafe value") - - l := jh.List("nested2") - Assert(len(l) == 1, t, "get list") - - jhm := jh.Helpers("nested2") - Assert(len(jhm) == 1, t, "get list of helpers") - Assert(jhm[0].Int("sub") == 2, t, "Should get list of helpers") - - // Now lets test xpath type syntax - Assert(jh.Int("/MaxSize") == 1048576, t, "get int, test capitalization? ") - Assert(jh.String("/nested/nest") == "string2", t, "should get string %s", jh.String("/nested/nest")) - Assert(jh.String("/nested/list[0]") == "value", t, "get string from array") - // note this one has period in name - Assert(jh.String("/period.name") == "value", t, "test period in name ") - } - -``` - - -Logging -=============== - -Yet Another Go Logger, configureable logging. - -```go - package main - import "github.com/araddon/gou" - import "flag" - - var logLevel *string = flag.String("logging", "debug", "Which log level: [debug,info,warn,error,fatal]") - - func main() { - - flag.Parse() - gou.SetupLogging(*logLevel) - - // logging methods - gou.Debug("hello", thing, " more ", stuff) - - gou.Error("hello") - - gou.Errorf("hello %v", thing) - } - -``` - -License -=============== -MIT License diff --git a/services/templeton/vendor/src/github.com/araddon/gou/coerce.go b/services/templeton/vendor/src/github.com/araddon/gou/coerce.go deleted file mode 100644 index 8b461b456..000000000 --- a/services/templeton/vendor/src/github.com/araddon/gou/coerce.go +++ /dev/null @@ -1,274 +0,0 @@ -package gou - -import ( - "encoding/json" - "fmt" - "math" - "strconv" -) - -// Coerce types (string,int,int64, float, []byte) into String type -func CoerceString(v interface{}) (string, error) { - switch val := v.(type) { - case string: - if val == "null" || val == "NULL" { - return "", nil - } - return val, nil - case int: - return strconv.Itoa(val), nil - case int32: - return strconv.FormatInt(int64(val), 10), nil - case int64: - return strconv.FormatInt(val, 10), nil - case uint32: - return strconv.FormatUint(uint64(val), 10), nil - case uint64: - return strconv.FormatUint(val, 10), nil - case float32: - return strconv.FormatFloat(float64(val), 'f', -1, 32), nil - case float64: - return strconv.FormatFloat(val, 'f', -1, 64), nil - case []byte: - if string(val) == "null" || string(val) == "NULL" { - return "", nil - } - return string(val), nil - case json.RawMessage: - if string(val) == "null" || string(val) == "NULL" { - return "", nil - } - return string(val), nil - } - return "", fmt.Errorf("Could not coerce to string: %v", v) -} - -// Coerce type to string, returning zero length string if error or nil -func CoerceStringShort(v interface{}) string { - val, _ := CoerceString(v) - return val -} - -func CoerceFloat(v interface{}) (float64, error) { - switch val := v.(type) { - case int: - return float64(val), nil - case int32: - return float64(val), nil - case int64: - return float64(val), nil - case uint32: - return float64(val), nil - case uint64: - return float64(val), nil - case float64: - return val, nil - case string: - if len(val) > 0 { - if iv, err := strconv.ParseFloat(val, 64); err == nil { - return iv, nil - } - } - case []byte: - if len(val) > 0 { - if iv, err := strconv.ParseFloat(string(val), 64); err == nil { - return iv, nil - } - } - case json.RawMessage: - if len(val) > 0 { - if iv, err := strconv.ParseFloat(string(val), 64); err == nil { - return iv, nil - } - } - case nil: - return math.NaN(), nil - } - return 0, fmt.Errorf("Could not Coerce Value: %v", v) -} -func CoerceFloatShort(v interface{}) float64 { - val, _ := CoerceFloat(v) - return val -} - -func CoerceInt64(v interface{}) (int64, error) { - val, ok := valToInt64(v) - if ok { - return val, nil - } - return 0, fmt.Errorf("Could not coerce to int64: %v", v) -} -func CoerceInt64Short(v interface{}) int64 { - val, ok := valToInt64(v) - if ok { - return val - } - return 0 -} - -func CoerceInt(v interface{}) (int, error) { - val, ok := valToInt(v) - if ok { - return val, nil - } - return 0, fmt.Errorf("Could not coerce to int64: %v", v) -} -func CoerceIntShort(v interface{}) int { - val, ok := valToInt(v) - if ok { - return val - } - return 0 -} - -// Coerce a val(interface{}) into a Uint64 -func CoerceUint(v interface{}) (uint64, error) { - u64, ok := valToUint64(v) - if !ok { - return 0, fmt.Errorf("Could not Coerce %v", v) - } - return u64, nil -} - -// Coerce a Val(interface{}) into Uint64 -func CoerceUintShort(v interface{}) uint64 { - val, _ := CoerceUint(v) - return val -} - -// Given any numeric type (float*, int*, uint*, string) return an int. Returns false if it would -// overflow or if the the argument is not numeric. -func valToInt(i interface{}) (int, bool) { - i64, ok := valToInt64(i) - if !ok { - return -1, false - } - if i64 > MaxInt || i64 < MinInt { - return -1, false - } - return int(i64), true -} - -// Given any simple type (float*, int*, uint*, string, []byte, json.RawMessage) return an int64. -// Returns false if it would overflow or if the the argument is not numeric. -func valToInt64(i interface{}) (int64, bool) { - switch x := i.(type) { - case float32: - return int64(x), true - case float64: - return int64(x), true - case uint8: - return int64(x), true - case uint16: - return int64(x), true - case uint32: - return int64(x), true - case uint64: - if x > math.MaxInt64 { - return 0, false - } - return int64(x), true - case int8: - return int64(x), true - case int16: - return int64(x), true - case int32: - return int64(x), true - case int64: - return int64(x), true - case int: - return int64(x), true - case uint: - if uint64(x) > math.MaxInt64 { - return 0, false - } - return int64(x), true - case string: - if len(x) > 0 { - if iv, err := strconv.ParseInt(x, 10, 64); err == nil { - return iv, true - } - if iv, err := strconv.ParseFloat(x, 64); err == nil { - return valToInt64(iv) - } - } - case []byte: - if len(x) > 0 { - if iv, err := strconv.ParseInt(string(x), 10, 64); err == nil { - return iv, true - } - if iv, err := strconv.ParseFloat(string(x), 64); err == nil { - return valToInt64(iv) - } - } - case json.RawMessage: - if len(x) > 0 { - if iv, err := strconv.ParseInt(string(x), 10, 64); err == nil { - return iv, true - } - if iv, err := strconv.ParseFloat(string(x), 64); err == nil { - return valToInt64(iv) - } - } - } - return 0, false -} - -// Given any simple type (float*, int*, uint*, string, []byte, json.RawMessage) return an int64. -// Returns false if it would overflow or if the the argument is not numeric. -func valToUint64(i interface{}) (uint64, bool) { - switch x := i.(type) { - case float32: - return uint64(x), true - case float64: - return uint64(x), true - case uint8: - return uint64(x), true - case uint16: - return uint64(x), true - case uint32: - return uint64(x), true - case uint64: - return x, true - case int8: - return uint64(x), true - case int16: - return uint64(x), true - case int32: - return uint64(x), true - case int64: - return uint64(x), true - case int: - return uint64(x), true - case uint: - return uint64(x), true - case string: - if len(x) > 0 { - if uiv, err := strconv.ParseUint(x, 10, 64); err == nil { - return uiv, true - } - if fv, err := strconv.ParseFloat(x, 64); err == nil { - return uint64(fv), true - } - } - case []byte: - if len(x) > 0 { - if uiv, err := strconv.ParseUint(string(x), 10, 64); err == nil { - return uiv, true - } - if fv, err := strconv.ParseFloat(string(x), 64); err == nil { - return uint64(fv), true - } - } - case json.RawMessage: - if len(x) > 0 { - if uiv, err := strconv.ParseUint(string(x), 10, 64); err == nil { - return uiv, true - } - if fv, err := strconv.ParseFloat(string(x), 64); err == nil { - return uint64(fv), true - } - } - } - return 0, false -} diff --git a/services/templeton/vendor/src/github.com/araddon/gou/coerce_test.go b/services/templeton/vendor/src/github.com/araddon/gou/coerce_test.go deleted file mode 100644 index 58922fbbc..000000000 --- a/services/templeton/vendor/src/github.com/araddon/gou/coerce_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package gou - -import ( - . "github.com/araddon/gou/goutest" - "testing" -) - -func TestCoerce(t *testing.T) { - - data := map[string]interface{}{ - "int": 4, - "float": 45.3, - "string": "22", - "stringf": "22.2", - } - Assert(CoerceStringShort(data["int"]) == "4", t, "get int as string") - Assert(CoerceStringShort(data["float"]) == "45.3", t, "get float as string: %v", data["float"]) - Assert(CoerceStringShort(data["string"]) == "22", t, "get string as string: %v", data["string"]) - Assert(CoerceStringShort(data["stringf"]) == "22.2", t, "get stringf as string: %v", data["stringf"]) - - Assert(CoerceIntShort(data["int"]) == 4, t, "get int as int: %v", data["int"]) - Assert(CoerceIntShort(data["float"]) == 45, t, "get float as int: %v", data["float"]) - Assert(CoerceIntShort(data["string"]) == 22, t, "get string as int: %v", data["string"]) - Assert(CoerceIntShort(data["stringf"]) == 22, t, "get stringf as int: %v", data["stringf"]) -} diff --git a/services/templeton/vendor/src/github.com/araddon/gou/goutest/assert.go b/services/templeton/vendor/src/github.com/araddon/gou/goutest/assert.go deleted file mode 100644 index d74759555..000000000 --- a/services/templeton/vendor/src/github.com/araddon/gou/goutest/assert.go +++ /dev/null @@ -1,33 +0,0 @@ -package goutest - -import ( - "fmt" - "testing" -) - -// dumb simple assert for testing, printing -// Assert(len(items) == 9, t, "Should be 9 but was %d", len(items)) -func Assert(is bool, t *testing.T, args ...interface{}) { - if is == false { - msg := "" - if len(args) > 1 { - switch val := args[0].(type) { - case string: - msg = fmt.Sprintf(val, args[1:len(args)-1]) - default: - msg = fmt.Sprint(args) - } - - } else if len(args) == 1 { - switch val := args[0].(type) { - case string: - msg = val - default: - msg = fmt.Sprint(val) - } - } - - //gou.DoLog(3, gou.ERROR, msg) - t.Fatal(msg) - } -} diff --git a/services/templeton/vendor/src/github.com/araddon/gou/http.go b/services/templeton/vendor/src/github.com/araddon/gou/http.go deleted file mode 100644 index d1dd85a95..000000000 --- a/services/templeton/vendor/src/github.com/araddon/gou/http.go +++ /dev/null @@ -1,205 +0,0 @@ -package gou - -import ( - "bytes" - "encoding/json" - "io" - "io/ioutil" - "net/http" - "net/url" -) - -// Simple Fetch Wrapper, given a url it returns bytes -func Fetch(url string) (ret []byte, err error) { - resp, err := http.Get(url) - defer func() { - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - }() - if err != nil { - Log(WARN, err.Error()) - return - } - ret, err = ioutil.ReadAll(resp.Body) - if err != nil { - return - } - return -} - -// Simple Fetch Wrapper, given a url it returns bytes and response -func FetchResp(url string) (ret []byte, err error, resp *http.Response) { - resp, err = http.Get(url) - defer func() { - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - }() - if err != nil { - Log(WARN, err.Error()) - } - if resp == nil || resp.Body == nil { - return - } - ret, err = ioutil.ReadAll(resp.Body) - return -} - -// Simple Fetch Wrapper, given a url it returns Helper, error -// Sends as type application/json, interprets whatever datatype is sent in appropriately -func JsonHelperHttp(method, urlStr string, data interface{}) (JsonHelper, error) { - var body io.Reader - if data != nil { - switch val := data.(type) { - case string: - body = bytes.NewReader([]byte(val)) - case io.Reader: - body = val - case url.Values: - body = bytes.NewReader([]byte(val.Encode())) - default: - by, err := json.Marshal(data) - if err != nil { - return nil, err - } - body = bytes.NewReader(by) - } - - } - req, err := http.NewRequest(method, urlStr, body) - if err != nil { - return nil, err - } - req.Header.Add("Accept", "application/json") - - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - jh, err := NewJsonHelperReader(resp.Body) - return jh, err -} - -// posts an application/json to url with body -// ie: type = application/json -func PostJson(postUrl string, data interface{}) (ret string, err error, resp *http.Response) { - var buf io.Reader - if data != nil { - switch val := data.(type) { - case string: - buf = bytes.NewBufferString(val) - case []byte: - buf = bytes.NewReader(val) - case json.RawMessage: - buf = bytes.NewReader([]byte(val)) - case io.Reader: - buf = val - case url.Values: - buf = bytes.NewBufferString(val.Encode()) - default: - by, err := json.Marshal(data) - if err != nil { - return "", err, nil - } - buf = bytes.NewReader(by) - } - } - - resp, err = http.Post(postUrl, "application/json", buf) - defer func() { - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - }() - if err != nil { - Log(WARN, err.Error()) - return "", err, resp - } - bodyBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err, resp - } - - return string(bodyBytes), nil, resp -} - -// issues http delete an application/json to url with body -func DeleteJson(url, body string) (ret string, err error, resp *http.Response) { - //Post(url string, bodyType string, body io.Reader) - buf := bytes.NewBufferString(body) - Debug(buf.Len()) - req, err := http.NewRequest("DELETE", url, buf) - if err != nil { - Debug(err) - return - } - - req.Header.Add("Content-Type", "application/json") - resp, err = http.DefaultClient.Do(req) //(url, "application/json", buf) - defer func() { - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - }() - if err != nil { - Log(WARN, err.Error()) - return "", err, resp - } - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err, resp - } - - return string(data), nil, resp -} - -// posts a www-form encoded form to url with body -func PostForm(url, body string) (ret string, err error, resp *http.Response) { - //Post(url string, bodyType string, body io.Reader) - buf := bytes.NewBufferString(body) - resp, err = http.Post(url, "application/x-www-form-urlencoded", buf) - defer func() { - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - }() - if err != nil { - Log(WARN, url, " ", body, " ", err.Error()) - return "", err, resp - } - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err, resp - } - - return string(data), nil, resp -} - -// issues http put an application/json to url with optional body -func PutJson(url, body string) (ret string, err error, resp *http.Response) { - buf := bytes.NewBufferString(body) - req, err := http.NewRequest("PUT", url, buf) - if err != nil { - Debug(err) - return - } - req.Header.Add("Content-Type", "application/json") - resp, err = http.DefaultClient.Do(req) - defer func() { - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - }() - if err != nil { - Log(WARN, err.Error()) - return "", err, resp - } - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err, resp - } - - return string(data), nil, resp -} diff --git a/services/templeton/vendor/src/github.com/araddon/gou/jsonhelper.go b/services/templeton/vendor/src/github.com/araddon/gou/jsonhelper.go deleted file mode 100644 index dc3ee693b..000000000 --- a/services/templeton/vendor/src/github.com/araddon/gou/jsonhelper.go +++ /dev/null @@ -1,694 +0,0 @@ -package gou - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "math" - "net/http" - "net/url" - "strconv" - "strings" - "unicode/utf8" -) - -// Convert a slice of bytes into an array by ensuring it is wrapped -// with [] -func MakeJsonList(b []byte) []byte { - if !bytes.HasPrefix(b, []byte{'['}) { - b = append([]byte{'['}, b...) - b = append(b, ']') - } - return b -} - -func JsonString(v interface{}) string { - b, err := json.Marshal(v) - if err != nil { - return `""` - } - return string(b) -} - -func firstNonWsRune(by []byte) (r rune, ok bool) { - for { - if len(by) == 0 { - return 0, false - } - r, numBytes := utf8.DecodeRune(by) - switch r { - case '\t', '\n', '\r', ' ': - by = by[numBytes:] // advance past the current whitespace rune and continue - continue - case utf8.RuneError: // This is returned when invalid UTF8 is found - return 0, false - } - return r, true - } - return 0, false -} - -// Determines if the bytes is a json array, only looks at prefix -// not parsing the entire thing -func IsJson(by []byte) bool { - firstRune, ok := firstNonWsRune(by) - if !ok { - return false - } - if firstRune == '[' || firstRune == '{' { - return true - } - return false -} - -// Determines if the bytes is a json array, only looks at prefix -// not parsing the entire thing -func IsJsonArray(by []byte) bool { - firstRune, ok := firstNonWsRune(by) - if !ok { - return false - } - if firstRune == '[' { - return true - } - return false -} - -func IsJsonObject(by []byte) bool { - firstRune, ok := firstNonWsRune(by) - if !ok { - return false - } - if firstRune == '{' { - return true - } - return false -} - -type JsonRawWriter struct { - bytes.Buffer -} - -func (m *JsonRawWriter) MarshalJSON() ([]byte, error) { - return m.Bytes(), nil -} - -func (m *JsonRawWriter) Raw() json.RawMessage { - return json.RawMessage(m.Bytes()) -} - -// A simple wrapper to help json data be consumed when not -// using Strongly typed structs. -type JsonInterface struct { - data interface{} -} - -// Encode returns its marshaled data as `[]byte` -func (j *JsonInterface) Encode() ([]byte, error) { - return j.MarshalJSON() -} - -// Implements the json.Marshaler interface. -func (j *JsonInterface) MarshalJSON() ([]byte, error) { - return json.Marshal(&j.data) -} - -// Implements the json.Unmarshal interface. -func (j *JsonInterface) UnmarshalJSON(raw []byte) error { - return json.Unmarshal(raw, &j.data) -} - -// Coerce to a String -func (j *JsonInterface) String() (string, error) { - return CoerceString(j.data) -} - -// Coerce to a string, may be zero length if missing, or zero length -func (j JsonInterface) StringSh() string { - val, _ := CoerceString(j.data) - return val -} - -// Coerce to Int -func (j *JsonInterface) Int() (int, error) { - return CoerceInt(j.data) -} - -// Coerce to Int, 0 returned if missing or zero -func (j JsonInterface) IntSh() int { - val, _ := CoerceInt(j.data) - return val -} - -// Coerce to Float, return err if needed -func (j *JsonInterface) Float() (float32, error) { - val, err := CoerceFloat(j.data) - return float32(val), err -} - -// Coerce to Float, 0 returned if 0 or missing -func (j JsonInterface) FloatSh() float32 { - val, _ := CoerceFloat(j.data) - return float32(val) -} - -// A wrapper around a map[string]interface{} to facilitate coercion -// of json data to what you want -// -// allows usage such as this -// -// jh := NewJsonHelper([]byte(`{ -// "name":"string", -// "ints":[1,5,9,11], -// "int":1, -// "int64":1234567890, -// "MaxSize" : 1048576, -// "strings":["string1"], -// "nested":{ -// "nest":"string2", -// "strings":["string1"], -// "int":2, -// "list":["value"], -// "nest2":{ -// "test":"good" -// } -// }, -// "nested2":[ -// {"sub":5} -// ] -// }`) -// -// i := jh.Int("nested.int") // 2 -// i2 := jh.Int("ints[1]") // 5 array position 1 from [1,5,9,11] -// s := jh.String("nested.nest") // "string2" -// -type JsonHelper map[string]interface{} - -func NewJsonHelper(b []byte) JsonHelper { - jh := make(JsonHelper) - json.Unmarshal(b, &jh) - return jh -} - -func NewJsonHelperReader(r io.Reader) (jh JsonHelper, err error) { - jh = make(JsonHelper) - err = json.NewDecoder(r).Decode(&jh) - return -} - -func NewJsonHelpers(b []byte) []JsonHelper { - var jhl []JsonHelper - json.Unmarshal(MakeJsonList(b), &jhl) - return jhl -} - -// Make a JsonHelper from http response. This will automatically -// close the response body -func NewJsonHelperFromResp(resp *http.Response) (JsonHelper, error) { - jh := make(JsonHelper) - if resp == nil || resp.Body == nil { - return jh, fmt.Errorf("No response or response body to read") - } - defer resp.Body.Close() - respBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - if len(respBytes) == 0 { - return jh, fmt.Errorf("No data in response") - } - if err := json.Unmarshal(respBytes, &jh); err != nil { - return jh, err - } - return jh, nil -} - -func jsonList(v interface{}) []interface{} { - switch v.(type) { - case []interface{}: - return v.([]interface{}) - } - return nil -} - -func jsonEntry(name string, v interface{}) (interface{}, bool) { - switch val := v.(type) { - case map[string]interface{}: - if root, ok := val[name]; ok { - return root, true - } else { - return nil, false - } - case JsonHelper: - return val.Get(name), true - case []interface{}: - return v, true - default: - Debug("no type? ", name, " ", v) - return nil, false - } -} - -// Get the key (or keypath) value as interface, mostly used -// internally through String, etc methods -// -// jh.Get("name.subname") -// jh.Get("name/subname") -// jh.Get("name.arrayname[1]") -// jh.Get("name.arrayname[]") -func (j JsonHelper) Get(n string) interface{} { - var parts []string - if strings.Contains(n, "/") { - parts = strings.Split(n, "/") - if strings.HasPrefix(n, "/") && len(parts) > 0 { - parts = parts[1:] - } - } else { - parts = strings.Split(n, ".") - } - var root interface{} - var err error - var ok, isList, listEntry bool - var ln, st, idx int - for ict, name := range parts { - isList = strings.HasSuffix(name, "[]") - listEntry = strings.HasSuffix(name, "]") && !isList - ln, idx = len(name), -1 - if isList || listEntry { - st = strings.Index(name, "[") - idx, err = strconv.Atoi(name[st+1 : ln-1]) - name = name[:st] - } - if ict == 0 { - root, ok = j[name] - } else { - root, ok = jsonEntry(name, root) - } - //Debug(isList, listEntry, " ", name, " ", root, " ", ok, err) - if !ok { - if len(parts) > 0 { - // lets ensure the actual json-value doesn't have period in key - root, ok = j[n] - if !ok { - return nil - } else { - //Warnf("returning root %T %#v", root, root) - return root - } - } else { - return nil - } - - } - if isList { - return jsonList(root) - } else if listEntry && err == nil { - if lst := jsonList(root); lst != nil && len(lst) > idx { - root = lst[idx] - } else { - return nil - } - } - - } - return root -} - -// Get a Helper from a string path -func (j JsonHelper) Helper(n string) JsonHelper { - v := j.Get(n) - if v == nil { - return nil - } - switch vt := v.(type) { - case map[string]interface{}: - cn := JsonHelper{} - for n, val := range vt { - cn[n] = val - } - return cn - case map[string]string: - cn := JsonHelper{} - for n, val := range vt { - cn[n] = val - } - return cn - case JsonHelper: - return vt - default: - //Infof("wrong type: %T", v) - } - return nil -} - -// Get list of Helpers at given name. Trys to coerce into -// proper Helper type -func (j JsonHelper) Helpers(n string) []JsonHelper { - v := j.Get(n) - if v == nil { - return nil - } - switch val := v.(type) { - case []map[string]interface{}: - hl := make([]JsonHelper, 0) - for _, mapVal := range val { - hl = append(hl, mapVal) - } - return hl - case []interface{}: - jhl := make([]JsonHelper, 0) - for _, item := range val { - if jh, ok := item.(map[string]interface{}); ok { - jhl = append(jhl, jh) - } - } - return jhl - } - - return nil -} - -// Gets slice of interface{} -func (j JsonHelper) List(n string) []interface{} { - v := j.Get(n) - switch val := v.(type) { - case []string: - il := make([]interface{}, len(val)) - for i, val := range val { - il[i] = val - } - return il - case []interface{}: - return val - } - return nil -} - -func (j JsonHelper) String(n string) string { - if v := j.Get(n); v != nil { - val, _ := CoerceString(v) - return val - } - return "" -} -func (j JsonHelper) Strings(n string) []string { - if v := j.Get(n); v != nil { - //Debugf("Strings(%s) => %T %#v", n, v, v) - switch val := v.(type) { - case string: - return strings.Split(val, ",") - case []string: - //Debug("type []string") - return val - case []interface{}: - //Debug("Kind = []interface{} n=", n, " v=", v) - sva := make([]string, 0) - for _, av := range val { - switch aval := av.(type) { - case string: - sva = append(sva, aval) - default: - //Warnf("Kind ? %T v=%v", aval, aval) - } - } - return sva - default: - return []string{j.String(n)} - } - } - return nil -} -func (j JsonHelper) Ints(n string) []int { - v := j.Get(n) - if v == nil { - return nil - } - if sl, isSlice := v.([]interface{}); isSlice { - iva := make([]int, 0) - for _, av := range sl { - avAsInt, ok := valToInt(av) - if ok { - iva = append(iva, avAsInt) - } - } - return iva - } - return nil -} -func (j JsonHelper) StringSafe(n string) (string, bool) { - v := j.Get(n) - if v != nil { - if s, ok := v.(string); ok { - return s, ok - } - } - return "", false -} - -func (j JsonHelper) Int(n string) int { - i, ok := j.IntSafe(n) - if !ok { - return -1 - } - return i -} - -func (j JsonHelper) IntSafe(n string) (int, bool) { - v := j.Get(n) - return valToInt(v) -} - -func (j JsonHelper) Int64(n string) int64 { - i64, ok := j.Int64Safe(n) - if !ok { - return -1 - } - return i64 -} - -func (j JsonHelper) Int64Safe(n string) (int64, bool) { - v := j.Get(n) - return valToInt64(v) -} - -func (j JsonHelper) Float64(n string) float64 { - v := j.Get(n) - f64, err := CoerceFloat(v) - if err != nil { - return math.NaN() - } - return f64 -} - -func (j JsonHelper) Float64Safe(n string) (float64, bool) { - v := j.Get(n) - if v == nil { - return math.NaN(), true - } - fv, err := CoerceFloat(v) - if err != nil { - return math.NaN(), false - } - return fv, true -} - -func (j JsonHelper) Uint64(n string) uint64 { - v := j.Get(n) - if v != nil { - return CoerceUintShort(v) - } - return 0 -} - -func (j JsonHelper) Uint64Safe(n string) (uint64, bool) { - v := j.Get(n) - if v != nil { - if uv, err := CoerceUint(v); err == nil { - return uv, true - } - } - return 0, false -} - -func (j JsonHelper) BoolSafe(n string) (val bool, ok bool) { - v := j.Get(n) - if v != nil { - switch v.(type) { - case bool: - return v.(bool), true - case string: - if s := v.(string); len(s) > 0 { - if b, err := strconv.ParseBool(s); err == nil { - return b, true - } - } - } - } - return false, false -} - -func (j JsonHelper) Bool(n string) bool { - val, ok := j.BoolSafe(n) - if !ok { - return false - } - - return val -} - -func (j JsonHelper) Map(n string) map[string]interface{} { - v := j.Get(n) - if v == nil { - return nil - } - m, ok := v.(map[string]interface{}) - if !ok { - return nil - } - return m -} - -func (j JsonHelper) MapSafe(n string) (map[string]interface{}, bool) { - v := j.Get(n) - if v == nil { - return nil, false - } - m, ok := v.(map[string]interface{}) - if !ok { - return nil, false - } - return m, true -} - -func (j JsonHelper) PrettyJson() []byte { - jsonPretty, _ := json.MarshalIndent(j, " ", " ") - return jsonPretty -} -func (j JsonHelper) Keys() []string { - keys := make([]string, 0) - for key := range j { - keys = append(keys, key) - } - return keys -} -func (j JsonHelper) HasKey(name string) bool { - if val := j.Get(name); val != nil { - return true - } - return false -} - -// GobDecode overwrites the receiver, which must be a pointer, -// with the value represented by the byte slice, which was written -// by GobEncode, usually for the same concrete type. -// GobDecode([]byte) error -func (j *JsonHelper) GobDecode(data []byte) error { - var mv map[string]interface{} - if err := json.Unmarshal(data, &mv); err != nil { - return err - } - *j = JsonHelper(mv) - return nil -} -func (j *JsonHelper) GobEncode() ([]byte, error) { - by, err := json.Marshal(j) - return by, err -} - -// The following consts are from http://code.google.com/p/go-bit/ (Apache licensed). It -// lets us figure out how wide go ints are, and determine their max and min values. - -// Note the use of << to create an untyped constant. -const bitsPerWord = 32 << uint(^uint(0)>>63) - -// Implementation-specific size of int and uint in bits. -const BitsPerWord = bitsPerWord // either 32 or 64 - -// Implementation-specific integer limit values. -const ( - MaxInt = 1<<(BitsPerWord-1) - 1 // either 1<<31 - 1 or 1<<63 - 1 - MinInt = -MaxInt - 1 // either -1 << 31 or -1 << 63 - MaxUint = 1< 0 { - // uv[k] = sva - // } - case map[string]bool: - // what to do? - Info("not implemented: [string]bool") - case map[string]interface{}: - if len(x) > 0 { - if err := flattenJsonMap(uv, x, k+"."); err != nil { - return err - } - } - case string: - uv.Set(k, x) - case bool: - if x == true { - uv.Set(k, "t") - } else { - uv.Set(k, "f") - } - case int: - uv.Set(k, strconv.FormatInt(int64(x), 10)) - case int8: - uv.Set(k, strconv.FormatInt(int64(x), 10)) - case int16: - uv.Set(k, strconv.FormatInt(int64(x), 10)) - case int32: - uv.Set(k, strconv.FormatInt(int64(x), 10)) - case int64: - uv.Set(k, strconv.FormatInt(x, 10)) - case uint: - uv.Set(k, strconv.FormatUint(uint64(x), 10)) - case uint8: - uv.Set(k, strconv.FormatUint(uint64(x), 10)) - case uint16: - uv.Set(k, strconv.FormatUint(uint64(x), 10)) - case uint32: - uv.Set(k, strconv.FormatUint(uint64(x), 10)) - case uint64: - uv.Set(k, strconv.FormatUint(x, 10)) - case float32: - uv.Set(k, strconv.FormatFloat(float64(x), 'f', -1, 64)) - case float64: - uv.Set(k, strconv.FormatFloat(x, 'f', -1, 64)) - default: - // what types don't we support? - // []interface{} - } - return nil -} diff --git a/services/templeton/vendor/src/github.com/araddon/gou/jsonhelper_test.go b/services/templeton/vendor/src/github.com/araddon/gou/jsonhelper_test.go deleted file mode 100644 index 1d1e05b9d..000000000 --- a/services/templeton/vendor/src/github.com/araddon/gou/jsonhelper_test.go +++ /dev/null @@ -1,221 +0,0 @@ -package gou - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "math" - "strings" - "testing" - - . "github.com/araddon/gou/goutest" - "github.com/bmizerany/assert" -) - -// go test -bench=".*" -// go test -run="(Util)" - -var ( - jh JsonHelper -) - -func init() { - SetupLogging("debug") - SetColorOutput() - //SetLogger(log.New(os.Stderr, "", log.Ltime|log.Lshortfile), "debug") - // create test data - json.Unmarshal([]byte(`{ - "name":"aaron", - "nullstring":null, - "ints":[1,2,3,4], - "int":1, - "intstr":"1", - "int64":1234567890, - "float64":123.456, - "float64str":"123.456", - "float64null": null, - "MaxSize" : 1048576, - "strings":["string1"], - "stringscsv":"string1,string2", - "nested":{ - "nest":"string2", - "strings":["string1"], - "int":2, - "list":["value"], - "nest2":{ - "test":"good" - } - }, - "nested2":[ - {"sub":2} - ], - "period.name":"value" - }`), &jh) -} - -func TestJsonRawWriter(t *testing.T) { - var buf bytes.Buffer - buf.WriteString(`"hello"`) - raw := json.RawMessage(buf.Bytes()) - bya, _ := json.Marshal(&buf) - Debug(string(bya)) - bya, _ = json.Marshal(&raw) - Debug(string(bya)) - - /* - bya, err := json.Marshal(buf) - Assert(string(bya) == `"hello"`, t, "Should be hello but was %s", string(bya)) - Debug(string(buf.Bytes()), err) - var jrw JsonRawWriter - jrw.WriteString(`"hello"`) - Debug(jrw.Raw()) - bya, err = json.Marshal(jrw.Raw()) - Assert(string(bya) == `"hello"`, t, "Should be hello but was %s", string(bya)) - Debug(string(jrw.Bytes()), err) - */ -} - -func TestJsonHelper(t *testing.T) { - - Assert(jh.String("name") == "aaron", t, "should get 'aaron' %s", jh.String("name")) - Assert(jh.String("nullstring") == "", t, "should get '' %s", jh.String("nullstring")) - - Assert(jh.Int("int") == 1, t, "get int ") - Assert(jh.Int("ints[0]") == 1, t, "get int from array %d", jh.Int("ints[0]")) - Assert(jh.Int("ints[2]") == 3, t, "get int from array %d", jh.Int("ints[0]")) - Assert(len(jh.Ints("ints")) == 4, t, "get int array %v", jh.Ints("ints")) - Assert(jh.Int64("int64") == 1234567890, t, "get int") - Assert(jh.Int("nested.int") == 2, t, "get int") - Assert(jh.String("nested.nest") == "string2", t, "should get string %s", jh.String("nested.nest")) - Assert(jh.String("nested.nest2.test") == "good", t, "should get string %s", jh.String("nested.nest2.test")) - Assert(jh.String("nested.list[0]") == "value", t, "get string from array") - Assert(jh.Int("nested2[0].sub") == 2, t, "get int from obj in array %d", jh.Int("nested2[0].sub")) - - Assert(jh.Int("MaxSize") == 1048576, t, "get int, test capitalization? ") - sl := jh.Strings("strings") - Assert(len(sl) == 1 && sl[0] == "string1", t, "get strings ") - sl = jh.Strings("stringscsv") - Assert(len(sl) == 2 && sl[0] == "string1", t, "get strings ") - - i64, ok := jh.Int64Safe("int64") - Assert(ok, t, "int64safe ok") - Assert(i64 == 1234567890, t, "int64safe value") - - u64, ok := jh.Uint64Safe("int64") - Assert(ok, t, "uint64safe ok") - Assert(u64 == 1234567890, t, "int64safe value") - _, ok = jh.Uint64Safe("notexistent") - assert.Tf(t, !ok, "should not be ok") - _, ok = jh.Uint64Safe("name") - assert.Tf(t, !ok, "should not be ok") - - i, ok := jh.IntSafe("int") - Assert(ok, t, "intsafe ok") - Assert(i == 1, t, "intsafe value") - - l := jh.List("nested2") - Assert(len(l) == 1, t, "get list") - - fv, ok := jh.Float64Safe("name") - assert.Tf(t, !ok, "floatsafe not ok") - fv, ok = jh.Float64Safe("float64") - assert.Tf(t, ok, "floatsafe ok") - assert.Tf(t, CloseEnuf(fv, 123.456), "floatsafe value %v", fv) - fv = jh.Float64("float64") - assert.Tf(t, CloseEnuf(fv, 123.456), "floatsafe value %v", fv) - fv, ok = jh.Float64Safe("float64str") - assert.Tf(t, ok, "floatsafe ok") - assert.Tf(t, CloseEnuf(fv, 123.456), "floatsafe value %v", fv) - fv = jh.Float64("float64str") - assert.Tf(t, CloseEnuf(fv, 123.456), "floatsafe value %v", fv) - fv, ok = jh.Float64Safe("float64null") - assert.Tf(t, ok, "float64null ok") - assert.Tf(t, math.IsNaN(fv), "float64null expected Nan but got %v", fv) - fv = jh.Float64("float64null") - assert.Tf(t, math.IsNaN(fv), "float64null expected Nan but got %v", fv) - - jhm := jh.Helpers("nested2") - Assert(len(jhm) == 1, t, "get list of helpers") - Assert(jhm[0].Int("sub") == 2, t, "Should get list of helpers") -} - -func TestJsonInterface(t *testing.T) { - - var jim map[string]JsonInterface - err := json.Unmarshal([]byte(`{ - "nullstring":null, - "string":"string", - "int":22, - "float":22.2, - "floatstr":"22.2", - "intstr":"22" - }`), &jim) - Assert(err == nil, t, "no error:%v ", err) - Assert(jim["nullstring"].StringSh() == "", t, "nullstring: %v", jim["nullstring"]) - Assert(jim["string"].StringSh() == "string", t, "nullstring: %v", jim["string"]) - Assert(jim["int"].IntSh() == 22, t, "int: %v", jim["int"]) - Assert(jim["int"].StringSh() == "22", t, "int->string: %v", jim["int"]) - Assert(jim["int"].FloatSh() == float32(22), t, "int->float: %v", jim["int"]) - Assert(jim["float"].FloatSh() == 22.2, t, "float: %v", jim["float"]) - Assert(jim["float"].StringSh() == "22.2", t, "float->string: %v", jim["float"]) - Assert(jim["float"].IntSh() == 22, t, "float->int: %v", jim["float"]) - Assert(jim["intstr"].IntSh() == 22, t, "intstr: %v", jim["intstr"]) - Assert(jim["intstr"].FloatSh() == float32(22), t, "intstr->float: %v", jim["intstr"]) -} - -func TestJsonCoercion(t *testing.T) { - assert.Tf(t, jh.Int("intstr") == 1, "get string as int %s", jh.String("intstr")) - assert.Tf(t, jh.String("int") == "1", "get int as string %s", jh.String("int")) - assert.Tf(t, jh.Int("notint") == -1, "get non existent int = 0??? ") -} - -func TestJsonPathNotation(t *testing.T) { - // Now lets test xpath type syntax - assert.Tf(t, jh.Int("/MaxSize") == 1048576, "get int, test capitalization? ") - assert.Tf(t, jh.String("/nested/nest") == "string2", "should get string %s", jh.String("/nested/nest")) - assert.Tf(t, jh.String("/nested/list[0]") == "value", "get string from array") - // note this one has period in name - assert.Tf(t, jh.String("/period.name") == "value", "test period in name ") -} - -func TestFromReader(t *testing.T) { - raw := `{"testing": 123}` - reader := strings.NewReader(raw) - jh, err := NewJsonHelperReader(reader) - assert.Tf(t, err == nil, "Unexpected error decoding json: %s", err) - assert.Tf(t, jh.Int("testing") == 123, "Unexpected value in json: %d", jh.Int("testing")) -} - -func TestJsonHelperGobEncoding(t *testing.T) { - raw := `{"testing": 123,"name":"bob & more"}` - reader := strings.NewReader(raw) - jh, err := NewJsonHelperReader(reader) - assert.Tf(t, err == nil, "Unexpected error decoding gob: %s", err) - assert.Tf(t, jh.Int("testing") == 123, "Unexpected value in gob: %d", jh.Int("testing")) - var buf bytes.Buffer - err = gob.NewEncoder(&buf).Encode(&jh) - assert.T(t, err == nil, err) - - var jhNew JsonHelper - err = gob.NewDecoder(&buf).Decode(&jhNew) - assert.T(t, err == nil, err) - assert.Tf(t, jhNew.Int("testing") == 123, "Unexpected value in gob: %d", jhNew.Int("testing")) - assert.Tf(t, jhNew.String("name") == "bob & more", "Unexpected value in gob: %d", jhNew.String("name")) - - buf2 := bytes.Buffer{} - gt := GobTest{"Hello", jh} - err = gob.NewEncoder(&buf2).Encode(>) - assert.T(t, err == nil, err) - - var gt2 GobTest - err = gob.NewDecoder(&buf2).Decode(>2) - assert.T(t, err == nil, err) - assert.Tf(t, gt2.Name == "Hello", "Unexpected value in gob: %d", gt2.Name) - assert.Tf(t, gt2.Data.Int("testing") == 123, "Unexpected value in gob: %d", gt2.Data.Int("testing")) - assert.Tf(t, gt2.Data.String("name") == "bob & more", "Unexpected value in gob: %d", gt2.Data.String("name")) -} - -type GobTest struct { - Name string - Data JsonHelper -} diff --git a/services/templeton/vendor/src/github.com/araddon/gou/log.go b/services/templeton/vendor/src/github.com/araddon/gou/log.go deleted file mode 100644 index 2a01805f7..000000000 --- a/services/templeton/vendor/src/github.com/araddon/gou/log.go +++ /dev/null @@ -1,412 +0,0 @@ -package gou - -import ( - "fmt" - "log" - "os" - "runtime" - "strings" - "sync" - "time" -) - -const ( - NOLOGGING = -1 - FATAL = 0 - ERROR = 1 - WARN = 2 - INFO = 3 - DEBUG = 4 -) - -/* -https://github.com/mewkiz/pkg/tree/master/term -RED = '\033[0;1;31m' -GREEN = '\033[0;1;32m' -YELLOW = '\033[0;1;33m' -BLUE = '\033[0;1;34m' -MAGENTA = '\033[0;1;35m' -CYAN = '\033[0;1;36m' -WHITE = '\033[0;1;37m' -DARK_MAGENTA = '\033[0;35m' -ANSI_RESET = '\033[0m' -LogColor = map[int]string{FATAL: "\033[0m\033[37m", - ERROR: "\033[0m\033[31m", - WARN: "\033[0m\033[33m", - INFO: "\033[0m\033[32m", - DEBUG: "\033[0m\033[34m"} - -\e]PFdedede -*/ - -var ( - LogLevel int = ERROR - EMPTY struct{} - ErrLogLevel int = ERROR - logger *log.Logger - loggerErr *log.Logger - LogColor = map[int]string{FATAL: "\033[0m\033[37m", - ERROR: "\033[0m\033[31m", - WARN: "\033[0m\033[33m", - INFO: "\033[0m\033[35m", - DEBUG: "\033[0m\033[34m"} - LogPrefix = map[int]string{ - FATAL: "[FATAL] ", - ERROR: "[ERROR] ", - WARN: "[WARN] ", - INFO: "[INFO] ", - DEBUG: "[DEBUG] ", - } - escapeNewlines bool = false - postFix = "" //\033[0m - LogLevelWords map[string]int = map[string]int{"fatal": 0, "error": 1, "warn": 2, "info": 3, "debug": 4, "none": -1} - logThrottles = make(map[string]*Throttler) - throttleMu sync.Mutex -) - -// Setup default logging to Stderr, equivalent to: -// -// gou.SetLogger(log.New(os.Stderr, "", log.Ltime|log.Lshortfile), "debug") -func SetupLogging(lvl string) { - SetLogger(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile|log.Lmicroseconds), strings.ToLower(lvl)) -} - -// Setup default logging to Stderr, equivalent to: -// -// gou.SetLogger(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile|log.Lmicroseconds), level) -func SetupLoggingLong(lvl string) { - SetLogger(log.New(os.Stderr, "", log.LstdFlags|log.Llongfile|log.Lmicroseconds), strings.ToLower(lvl)) -} - -// Setup colorized output if this is a terminal -func SetColorIfTerminal() { - if IsTerminal() { - SetColorOutput() - } -} - -// Setup colorized output -func SetColorOutput() { - for lvl, color := range LogColor { - LogPrefix[lvl] = color - } - postFix = "\033[0m" -} - -//Set whether to escape newline characters in log messages -func SetEscapeNewlines(en bool) { - escapeNewlines = en -} - -// Setup default log output to go to a dev/null -// -// log.SetOutput(new(DevNull)) -func DiscardStandardLogger() { - log.SetOutput(new(DevNull)) -} - -// you can set a logger, and log level,most common usage is: -// -// gou.SetLogger(log.New(os.Stdout, "", log.LstdFlags), "debug") -// -// loglevls: debug, info, warn, error, fatal -// Note, that you can also set a seperate Error Log Level -func SetLogger(l *log.Logger, logLevel string) { - logger = l - LogLevelSet(logLevel) -} -func GetLogger() *log.Logger { - return logger -} - -// you can set a logger, and log level. this is for errors, and assumes -// you are logging to Stderr (seperate from stdout above), allowing you to seperate -// debug&info logging from errors -// -// gou.SetLogger(log.New(os.Stderr, "", log.LstdFlags), "debug") -// -// loglevls: debug, info, warn, error, fatal -func SetErrLogger(l *log.Logger, logLevel string) { - loggerErr = l - if lvl, ok := LogLevelWords[logLevel]; ok { - ErrLogLevel = lvl - } -} -func GetErrLogger() *log.Logger { - return logger -} - -// sets the log level from a string -func LogLevelSet(levelWord string) { - if lvl, ok := LogLevelWords[levelWord]; ok { - LogLevel = lvl - } -} - -// Log at debug level -func Debug(v ...interface{}) { - if LogLevel >= 4 { - DoLog(3, DEBUG, fmt.Sprint(v...)) - } -} - -// Debug log formatted -func Debugf(format string, v ...interface{}) { - if LogLevel >= 4 { - DoLog(3, DEBUG, fmt.Sprintf(format, v...)) - } -} - -// Log at info level -func Info(v ...interface{}) { - if LogLevel >= 3 { - DoLog(3, INFO, fmt.Sprint(v...)) - } -} - -// info log formatted -func Infof(format string, v ...interface{}) { - if LogLevel >= 3 { - DoLog(3, INFO, fmt.Sprintf(format, v...)) - } -} - -// Log at warn level -func Warn(v ...interface{}) { - if LogLevel >= 2 { - DoLog(3, WARN, fmt.Sprint(v...)) - } -} - -// Debug log formatted -func Warnf(format string, v ...interface{}) { - if LogLevel >= 2 { - DoLog(3, WARN, fmt.Sprintf(format, v...)) - } -} - -// Log at error level -func Error(v ...interface{}) { - if LogLevel >= 1 { - DoLog(3, ERROR, fmt.Sprint(v...)) - } -} - -// Error log formatted -func Errorf(format string, v ...interface{}) { - if LogLevel >= 1 { - DoLog(3, ERROR, fmt.Sprintf(format, v...)) - } -} - -// Log this error, and return error object -func LogErrorf(format string, v ...interface{}) error { - err := fmt.Errorf(format, v...) - if LogLevel >= 1 { - DoLog(3, ERROR, err.Error()) - } - return err -} - -// Log to logger if setup -// Log(ERROR, "message") -func Log(logLvl int, v ...interface{}) { - if LogLevel >= logLvl { - DoLog(3, logLvl, fmt.Sprint(v...)) - } -} - -// Log to logger if setup, grab a stack trace and add that as well -// -// u.LogTracef(u.ERROR, "message %s", varx) -// -func LogTracef(logLvl int, format string, v ...interface{}) { - if LogLevel >= logLvl { - // grab a stack trace - stackBuf := make([]byte, 6000) - stackBufLen := runtime.Stack(stackBuf, false) - stackTraceStr := string(stackBuf[0:stackBufLen]) - parts := strings.Split(stackTraceStr, "\n") - if len(parts) > 1 { - v = append(v, strings.Join(parts[3:], "\n")) - } - DoLog(3, logLvl, fmt.Sprintf(format+"\n%v", v...)) - } -} - -// Log to logger if setup, grab a stack trace and add that as well -// -// u.LogTracef(u.ERROR, "message %s", varx) -// -func LogTraceDf(logLvl, lineCt int, format string, v ...interface{}) { - if LogLevel >= logLvl { - // grab a stack trace - stackBuf := make([]byte, 6000) - stackBufLen := runtime.Stack(stackBuf, false) - stackTraceStr := string(stackBuf[0:stackBufLen]) - parts := strings.Split(stackTraceStr, "\n") - if len(parts) > 1 { - if (len(parts) - 3) > lineCt { - parts = parts[3 : 3+lineCt] - parts2 := make([]string, 0, len(parts)/2) - for i := 1; i < len(parts); i = i + 2 { - parts2 = append(parts2, parts[i]) - } - v = append(v, strings.Join(parts2, "\n")) - //v = append(v, strings.Join(parts[3:3+lineCt], "\n")) - } else { - v = append(v, strings.Join(parts[3:], "\n")) - } - } - DoLog(3, logLvl, fmt.Sprintf(format+"\n%v", v...)) - } -} - -// Throttle logging based on key, such that key would never occur more than -// @limit times per hour -// -// LogThrottleKey(u.ERROR, 1,"error_that_happens_a_lot" "message %s", varx) -// -func LogThrottleKey(logLvl, limit int, key, format string, v ...interface{}) { - if LogLevel >= logLvl { - throttleMu.Lock() - th, ok := logThrottles[key] - if !ok { - th = NewThrottler(limit, 3600*time.Second) - logThrottles[key] = th - } - if th.Throttle() { - throttleMu.Unlock() - return - } - throttleMu.Unlock() - DoLog(3, logLvl, fmt.Sprintf(format, v...)) - } -} - -// Throttle logging based on @format as a key, such that key would never occur more than -// @limit times per hour -// -// LogThrottle(u.ERROR, 1, "message %s", varx) -// -func LogThrottle(logLvl, limit int, format string, v ...interface{}) { - if LogLevel >= logLvl { - throttleMu.Lock() - th, ok := logThrottles[format] - if !ok { - th = NewThrottler(limit, 3600*time.Second) - logThrottles[format] = th - } - if th.Throttle() { - throttleMu.Unlock() - return - } - throttleMu.Unlock() - DoLog(3, logLvl, fmt.Sprintf(format, v...)) - } -} - -// Throttle logging based on @format as a key, such that key would never occur more than -// @limit times per hour -// -// LogThrottleD(5, u.ERROR, 1, "message %s", varx) -// -func LogThrottleD(depth, logLvl, limit int, format string, v ...interface{}) { - if LogLevel >= logLvl { - throttleMu.Lock() - th, ok := logThrottles[format] - if !ok { - th = NewThrottler(limit, 3600*time.Second) - logThrottles[format] = th - } - if th.Throttle() { - throttleMu.Unlock() - return - } - throttleMu.Unlock() - DoLog(depth, logLvl, fmt.Sprintf(format, v...)) - } -} - -// Log to logger if setup -// Logf(ERROR, "message %d", 20) -func Logf(logLvl int, format string, v ...interface{}) { - if LogLevel >= logLvl { - DoLog(3, logLvl, fmt.Sprintf(format, v...)) - } -} - -// Log to logger if setup -// LogP(ERROR, "prefix", "message", anyItems, youWant) -func LogP(logLvl int, prefix string, v ...interface{}) { - if ErrLogLevel >= logLvl && loggerErr != nil { - loggerErr.Output(3, prefix+LogPrefix[logLvl]+fmt.Sprint(v...)+postFix) - } else if LogLevel >= logLvl && logger != nil { - logger.Output(3, prefix+LogPrefix[logLvl]+fmt.Sprint(v...)+postFix) - } -} - -// Log to logger if setup with a prefix -// LogPf(ERROR, "prefix", "formatString %s %v", anyItems, youWant) -func LogPf(logLvl int, prefix string, format string, v ...interface{}) { - if ErrLogLevel >= logLvl && loggerErr != nil { - loggerErr.Output(3, prefix+LogPrefix[logLvl]+fmt.Sprintf(format, v...)+postFix) - } else if LogLevel >= logLvl && logger != nil { - logger.Output(3, prefix+LogPrefix[logLvl]+fmt.Sprintf(format, v...)+postFix) - } -} - -// When you want to use the log short filename flag, and want to use -// the lower level logging functions (say from an *Assert* type function) -// you need to modify the stack depth: -// -// func init() {} -// SetLogger(log.New(os.Stderr, "", log.Ltime|log.Lshortfile|log.Lmicroseconds), lvl) -// } -// -// func assert(t *testing.T, myData) { -// // we want log line to show line that called this assert, not this line -// LogD(5, DEBUG, v...) -// } -func LogD(depth int, logLvl int, v ...interface{}) { - if LogLevel >= logLvl { - DoLog(depth, logLvl, fmt.Sprint(v...)) - } -} - -// Low level log with depth , level, message and logger -func DoLog(depth, logLvl int, msg string) { - if escapeNewlines { - msg = EscapeNewlines(msg) - } - if ErrLogLevel >= logLvl && loggerErr != nil { - loggerErr.Output(depth, LogPrefix[logLvl]+msg+postFix) - } else if LogLevel >= logLvl && logger != nil { - logger.Output(depth, LogPrefix[logLvl]+msg+postFix) - } -} - -type winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -const ( - _TIOCGWINSZ = 0x5413 // OSX 1074295912 -) - -//http://play.golang.org/p/5LIA41Iqfp -// Dummy discard, satisfies io.Writer without importing io or os. -type DevNull struct{} - -func (DevNull) Write(p []byte) (int, error) { - return len(p), nil -} - -//Replace standard newline characters with escaped newlines so long msgs will -//remain one line. -func EscapeNewlines(str string) string { - return strings.Replace(str, "\n", "\\n", -1) -} diff --git a/services/templeton/vendor/src/github.com/araddon/gou/log_unix.go b/services/templeton/vendor/src/github.com/araddon/gou/log_unix.go deleted file mode 100644 index 6d4851cf5..000000000 --- a/services/templeton/vendor/src/github.com/araddon/gou/log_unix.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build !windows - -package gou - -import ( - "syscall" - "unsafe" -) - -// Determine is this process is running in a Terminal or not? -func IsTerminal() bool { - ws := &winsize{} - isTerm := true - defer func() { - if r := recover(); r != nil { - isTerm = false - } - }() - // This blows up on windows - retCode, _, _ := syscall.Syscall(syscall.SYS_IOCTL, - uintptr(syscall.Stdin), - uintptr(_TIOCGWINSZ), - uintptr(unsafe.Pointer(ws))) - - if int(retCode) == -1 { - return false - } - return isTerm -} diff --git a/services/templeton/vendor/src/github.com/araddon/gou/log_windows.go b/services/templeton/vendor/src/github.com/araddon/gou/log_windows.go deleted file mode 100644 index 12713de91..000000000 --- a/services/templeton/vendor/src/github.com/araddon/gou/log_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build windows - -package gou - -// Determine is this process is running in a Terminal or not? -func IsTerminal() bool { - return false // TODO Needs correct implementation on Windows -} diff --git a/services/templeton/vendor/src/github.com/araddon/gou/testutil.go b/services/templeton/vendor/src/github.com/araddon/gou/testutil.go deleted file mode 100644 index fe92c6f1f..000000000 --- a/services/templeton/vendor/src/github.com/araddon/gou/testutil.go +++ /dev/null @@ -1,75 +0,0 @@ -package gou - -import ( - "os" - "time" -) - -var ( - //finished chan bool - lastTest time.Time = time.Now() - stopper func() = func() {} -) - -// Wait for condition (defined by func) to be true -// this is mostly for testing, but a utility to -// create a ticker checking back every 100 ms to see -// if something (the supplied check func) is done -// -// WaitFor(func() bool { -// return ctr.Ct == 0 -// },10) -// timeout (in seconds) is the last arg -func WaitFor(check func() bool, timeoutSecs int) { - timer := time.NewTicker(100 * time.Millisecond) - tryct := 0 - for range timer.C { - if check() { - timer.Stop() - break - } - if tryct >= timeoutSecs*10 { - timer.Stop() - break - } - tryct++ - } -} - -// Use this in combo with StopCheck() for test functions that must start -// processes such as -func SetStopper(f func()) { - stopper = f -} - -// take two floats, compare, need to be within 2% -func CloseEnuf(a, b float64) bool { - c := a / b - if c > .98 && c < 1.02 { - return true - } - return false -} - -// take two ints, compare, need to be within 5% -func CloseInt(a, b int) bool { - c := float64(a) / float64(b) - if c >= .95 && c <= 1.05 { - return true - } - return false -} - -func StartTest() { - lastTest = time.Now() -} - -func StopCheck() { - t := time.Now() - if lastTest.Add(time.Millisecond*1000).UnixNano() < t.UnixNano() { - Log(INFO, "Stopping Test ", lastTest.Unix()) - //finished <- true - stopper() - os.Exit(0) - } -} diff --git a/services/templeton/vendor/src/github.com/araddon/gou/throttle.go b/services/templeton/vendor/src/github.com/araddon/gou/throttle.go deleted file mode 100644 index 8d1b3c34d..000000000 --- a/services/templeton/vendor/src/github.com/araddon/gou/throttle.go +++ /dev/null @@ -1,56 +0,0 @@ -package gou - -import ( - "time" -) - -type Throttler struct { - - // Limit to this events/per - maxPer float64 - per float64 - - // Last Event - last time.Time - - // How many events are allowed left to happen? - // Starts at limit, decrements down - allowance float64 -} - -// new Throttler that will tell you to limit or not based -// on given @max events @per duration -func NewThrottler(max int, per time.Duration) *Throttler { - return &Throttler{ - maxPer: float64(max), - allowance: float64(max), - last: time.Now(), - per: per.Seconds(), - } -} - -// Should we limit this because we are above rate? -func (r *Throttler) Throttle() bool { - - if r.maxPer == 0 { - return false - } - - // http://stackoverflow.com/questions/667508/whats-a-good-rate-limiting-algorithm - now := time.Now() - elapsed := float64(now.Sub(r.last).Nanoseconds()) / 1e9 // seconds - r.last = now - r.allowance += elapsed * (r.maxPer / r.per) - - //Infof("maxRate: %v cur: %v elapsed:%-6.6f incr: %v", r.maxPer, int(r.allowance), elapsed, elapsed*float64(r.maxPer)) - if r.allowance > r.maxPer { - r.allowance = r.maxPer - } - - if r.allowance <= 1.0 { - return true // do throttle/limit - } - - r.allowance -= 1.0 - return false // dont throttle -} diff --git a/services/templeton/vendor/src/github.com/araddon/gou/throttle_test.go b/services/templeton/vendor/src/github.com/araddon/gou/throttle_test.go deleted file mode 100644 index 7c654d47b..000000000 --- a/services/templeton/vendor/src/github.com/araddon/gou/throttle_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package gou - -import ( - "testing" - "time" - - "github.com/bmizerany/assert" -) - -func TestThrottleer(t *testing.T) { - th := NewThrottler(10, 10*time.Second) - for i := 0; i < 10; i++ { - assert.Tf(t, th.Throttle() == false, "Should not throttle %v", i) - time.Sleep(time.Millisecond * 10) - } - throttled := 0 - th = NewThrottler(10, 1*time.Second) - // We are going to loop 20 times, first 10 should make it, next 10 throttled - for i := 0; i < 20; i++ { - LogThrottleKey(WARN, 10, "throttle", "hello %v", i) - if th.Throttle() { - throttled += 1 - } - } - assert.Tf(t, throttled == 10, "Should throttle 10 of 20 requests: %v", throttled) - // Now sleep for 1 second so that we should - // no longer be throttled - time.Sleep(time.Second * 1) - assert.Tf(t, th.Throttle() == false, "We should not have been throttled") -} diff --git a/services/templeton/vendor/src/github.com/araddon/gou/uid.go b/services/templeton/vendor/src/github.com/araddon/gou/uid.go deleted file mode 100644 index 6afa66ad5..000000000 --- a/services/templeton/vendor/src/github.com/araddon/gou/uid.go +++ /dev/null @@ -1,94 +0,0 @@ -package gou - -import ( - "crypto/md5" - "crypto/rand" - "encoding/binary" - "fmt" - "io" - "os" - "strconv" - "sync/atomic" - "time" -) - -const ( - //2013-2-3 - ourEpoch = uint32(1359931242) -) - -func init() { - initHostPidId() -} - -/* -Special thanks to ideas from Mgo, and Noeqd, this is somewhat inbetween them -https://github.com/bmizerany/noeqd - -It is a roughly sortable UID, but uses machine specific info (host, processid) -as part of the uid so each machine *will* have unique id's - -The host+processid is 3 bytes - -*/ - -// uidCounter is an atomically incremented each time we created -// a new uid within given ms time window -var uidCounter uint32 = 0 - -// hostPidId stores the generated hostPid -var hostPidId []byte - -// initHostPidId generates a machine-process specific id by using hostname -// and processid -func initHostPidId() { - var sum [4]byte - hostB := sum[:] - host, err := os.Hostname() - if err != nil { - // if we cannot get hostname, just use a random set of bytes - _, err2 := io.ReadFull(rand.Reader, hostB) - if err2 != nil { - panic(fmt.Errorf("cannot get hostname: %v; %v", err, err2)) - } - } else { - hw := md5.New() - hw.Write([]byte(host)) - copy(hostB, hw.Sum(nil)) - } - pid := os.Getpid() - hostI := binary.BigEndian.Uint32(hostB) - uid := uint32(pid) + uint32(hostI) - binary.BigEndian.PutUint32(hostB, uid) - b := make([]byte, 4) - binary.BigEndian.PutUint32(b, uid) - hostPidId = b[:] -} - -// uid is a 64 bit int uid -type Uid uint64 - -// Create a new uint64 unique id -func NewUid() uint64 { - b := make([]byte, 8) - ts := uint32(time.Now().Unix()) - ourEpoch - - // Timestamp, 4 bytes, big endian - binary.BigEndian.PutUint32(b, ts) - //Debugf("ts=%v b=%v", ts, b) - // first 3 bytes of host/pid - b[4] = hostPidId[2] - b[5] = hostPidId[3] - b[6] = hostPidId[3] - // Increment, 2 bytes, big endian - i := atomic.AddUint32(&uidCounter, 1) - //b[6] = byte(i >> 8) - b[7] = byte(i) - ui := binary.BigEndian.Uint64(b) - //Debugf("ui=%d b=%v ", ui, b) - return ui -} - -func (u *Uid) String() string { - return strconv.FormatUint(uint64(*u), 10) -} diff --git a/services/templeton/vendor/src/github.com/araddon/gou/uid_test.go b/services/templeton/vendor/src/github.com/araddon/gou/uid_test.go deleted file mode 100644 index 7896e7c74..000000000 --- a/services/templeton/vendor/src/github.com/araddon/gou/uid_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package gou - -import ( - "testing" -) - -func TestUid(t *testing.T) { - u := NewUid() - Debug(u) - Debug(NewUid()) -} diff --git a/services/templeton/vendor/src/github.com/bitly/go-hostpool/LICENSE b/services/templeton/vendor/src/github.com/bitly/go-hostpool/LICENSE deleted file mode 100644 index f24db89c4..000000000 --- a/services/templeton/vendor/src/github.com/bitly/go-hostpool/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Bitly - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/services/templeton/vendor/src/github.com/bitly/go-hostpool/README.md b/services/templeton/vendor/src/github.com/bitly/go-hostpool/README.md deleted file mode 100644 index 7f4437277..000000000 --- a/services/templeton/vendor/src/github.com/bitly/go-hostpool/README.md +++ /dev/null @@ -1,17 +0,0 @@ -go-hostpool -=========== - -A Go package to intelligently and flexibly pool among multiple hosts from your Go application. -Host selection can operate in round robin or epsilon greedy mode, and unresponsive hosts are -avoided. -Usage example: - -```go -hp := hostpool.NewEpsilonGreedy([]string{"a", "b"}, 0, &hostpool.LinearEpsilonValueCalculator{}) -hostResponse := hp.Get() -hostname := hostResponse.Host() -err := _ // (make a request with hostname) -hostResponse.Mark(err) -``` - -View more detailed documentation on [godoc.org](http://godoc.org/github.com/bitly/go-hostpool) diff --git a/services/templeton/vendor/src/github.com/bitly/go-hostpool/epsilon_greedy.go b/services/templeton/vendor/src/github.com/bitly/go-hostpool/epsilon_greedy.go deleted file mode 100644 index 6976ba711..000000000 --- a/services/templeton/vendor/src/github.com/bitly/go-hostpool/epsilon_greedy.go +++ /dev/null @@ -1,205 +0,0 @@ -package hostpool - -import ( - "log" - "math/rand" - "time" -) - -type epsilonHostPoolResponse struct { - standardHostPoolResponse - started time.Time - ended time.Time -} - -func (r *epsilonHostPoolResponse) Mark(err error) { - r.Do(func() { - r.ended = time.Now() - doMark(err, r) - }) -} - -type epsilonGreedyHostPool struct { - standardHostPool // TODO - would be nifty if we could embed HostPool and Locker interfaces - epsilon float32 // this is our exploration factor - decayDuration time.Duration - EpsilonValueCalculator // embed the epsilonValueCalculator - timer - quit chan bool -} - -// Construct an Epsilon Greedy HostPool -// -// Epsilon Greedy is an algorithm that allows HostPool not only to track failure state, -// but also to learn about "better" options in terms of speed, and to pick from available hosts -// based on how well they perform. This gives a weighted request rate to better -// performing hosts, while still distributing requests to all hosts (proportionate to their performance). -// The interface is the same as the standard HostPool, but be sure to mark the HostResponse immediately -// after executing the request to the host, as that will stop the implicitly running request timer. -// -// A good overview of Epsilon Greedy is here http://stevehanov.ca/blog/index.php?id=132 -// -// To compute the weighting scores, we perform a weighted average of recent response times, over the course of -// `decayDuration`. decayDuration may be set to 0 to use the default value of 5 minutes -// We then use the supplied EpsilonValueCalculator to calculate a score from that weighted average response time. -func NewEpsilonGreedy(hosts []string, decayDuration time.Duration, calc EpsilonValueCalculator) HostPool { - - if decayDuration <= 0 { - decayDuration = defaultDecayDuration - } - stdHP := New(hosts).(*standardHostPool) - p := &epsilonGreedyHostPool{ - standardHostPool: *stdHP, - epsilon: float32(initialEpsilon), - decayDuration: decayDuration, - EpsilonValueCalculator: calc, - timer: &realTimer{}, - quit: make(chan bool), - } - - // allocate structures - for _, h := range p.hostList { - h.epsilonCounts = make([]int64, epsilonBuckets) - h.epsilonValues = make([]int64, epsilonBuckets) - } - go p.epsilonGreedyDecay() - return p -} - -func (p *epsilonGreedyHostPool) Close() { - // No need to do p.quit <- true as close(p.quit) does the trick. - close(p.quit) -} - -func (p *epsilonGreedyHostPool) SetEpsilon(newEpsilon float32) { - p.Lock() - defer p.Unlock() - p.epsilon = newEpsilon -} - -func (p *epsilonGreedyHostPool) epsilonGreedyDecay() { - durationPerBucket := p.decayDuration / epsilonBuckets - ticker := time.NewTicker(durationPerBucket) - for { - select { - case <-p.quit: - ticker.Stop() - return - case <-ticker.C: - p.performEpsilonGreedyDecay() - } - } -} -func (p *epsilonGreedyHostPool) performEpsilonGreedyDecay() { - p.Lock() - for _, h := range p.hostList { - h.epsilonIndex += 1 - h.epsilonIndex = h.epsilonIndex % epsilonBuckets - h.epsilonCounts[h.epsilonIndex] = 0 - h.epsilonValues[h.epsilonIndex] = 0 - } - p.Unlock() -} - -func (p *epsilonGreedyHostPool) Get() HostPoolResponse { - p.Lock() - defer p.Unlock() - host := p.getEpsilonGreedy() - started := time.Now() - return &epsilonHostPoolResponse{ - standardHostPoolResponse: standardHostPoolResponse{host: host, pool: p}, - started: started, - } -} - -func (p *epsilonGreedyHostPool) getEpsilonGreedy() string { - var hostToUse *hostEntry - - // this is our exploration phase - if rand.Float32() < p.epsilon { - p.epsilon = p.epsilon * epsilonDecay - if p.epsilon < minEpsilon { - p.epsilon = minEpsilon - } - return p.getRoundRobin() - } - - // calculate values for each host in the 0..1 range (but not ormalized) - var possibleHosts []*hostEntry - now := time.Now() - var sumValues float64 - for _, h := range p.hostList { - if h.canTryHost(now) { - v := h.getWeightedAverageResponseTime() - if v > 0 { - ev := p.CalcValueFromAvgResponseTime(v) - h.epsilonValue = ev - sumValues += ev - possibleHosts = append(possibleHosts, h) - } - } - } - - if len(possibleHosts) != 0 { - // now normalize to the 0..1 range to get a percentage - for _, h := range possibleHosts { - h.epsilonPercentage = h.epsilonValue / sumValues - } - - // do a weighted random choice among hosts - ceiling := 0.0 - pickPercentage := rand.Float64() - for _, h := range possibleHosts { - ceiling += h.epsilonPercentage - if pickPercentage <= ceiling { - hostToUse = h - break - } - } - } - - if hostToUse == nil { - if len(possibleHosts) != 0 { - log.Println("Failed to randomly choose a host, Dan loses") - } - return p.getRoundRobin() - } - - if hostToUse.dead { - hostToUse.willRetryHost(p.maxRetryInterval) - } - return hostToUse.host -} - -func (p *epsilonGreedyHostPool) markSuccess(hostR HostPoolResponse) { - // first do the base markSuccess - a little redundant with host lookup but cleaner than repeating logic - p.standardHostPool.markSuccess(hostR) - eHostR, ok := hostR.(*epsilonHostPoolResponse) - if !ok { - log.Printf("Incorrect type in eps markSuccess!") // TODO reflection to print out offending type - return - } - host := eHostR.host - duration := p.between(eHostR.started, eHostR.ended) - - p.Lock() - defer p.Unlock() - h, ok := p.hosts[host] - if !ok { - log.Fatalf("host %s not in HostPool %v", host, p.Hosts()) - } - h.epsilonCounts[h.epsilonIndex]++ - h.epsilonValues[h.epsilonIndex] += int64(duration.Seconds() * 1000) -} - -// --- timer: this just exists for testing - -type timer interface { - between(time.Time, time.Time) time.Duration -} - -type realTimer struct{} - -func (rt *realTimer) between(start time.Time, end time.Time) time.Duration { - return end.Sub(start) -} diff --git a/services/templeton/vendor/src/github.com/bitly/go-hostpool/epsilon_value_calculators.go b/services/templeton/vendor/src/github.com/bitly/go-hostpool/epsilon_value_calculators.go deleted file mode 100644 index 9bc3102a9..000000000 --- a/services/templeton/vendor/src/github.com/bitly/go-hostpool/epsilon_value_calculators.go +++ /dev/null @@ -1,40 +0,0 @@ -package hostpool - -// --- Value Calculators ----------------- - -import ( - "math" -) - -// --- Definitions ----------------------- - -// Structs implementing this interface are used to convert the average response time for a host -// into a score that can be used to weight hosts in the epsilon greedy hostpool. Lower response -// times should yield higher scores (we want to select the faster hosts more often) The default -// LinearEpsilonValueCalculator just uses the reciprocal of the response time. In practice, any -// decreasing function from the positive reals to the positive reals should work. -type EpsilonValueCalculator interface { - CalcValueFromAvgResponseTime(float64) float64 -} - -type LinearEpsilonValueCalculator struct{} -type LogEpsilonValueCalculator struct{ LinearEpsilonValueCalculator } -type PolynomialEpsilonValueCalculator struct { - LinearEpsilonValueCalculator - Exp float64 // the exponent to which we will raise the value to reweight -} - -// -------- Methods ----------------------- - -func (c *LinearEpsilonValueCalculator) CalcValueFromAvgResponseTime(v float64) float64 { - return 1.0 / v -} - -func (c *LogEpsilonValueCalculator) CalcValueFromAvgResponseTime(v float64) float64 { - // we need to add 1 to v so that this will be defined on all positive floats - return c.LinearEpsilonValueCalculator.CalcValueFromAvgResponseTime(math.Log(v + 1.0)) -} - -func (c *PolynomialEpsilonValueCalculator) CalcValueFromAvgResponseTime(v float64) float64 { - return c.LinearEpsilonValueCalculator.CalcValueFromAvgResponseTime(math.Pow(v, c.Exp)) -} diff --git a/services/templeton/vendor/src/github.com/bitly/go-hostpool/example_test.go b/services/templeton/vendor/src/github.com/bitly/go-hostpool/example_test.go deleted file mode 100644 index 88d0e558c..000000000 --- a/services/templeton/vendor/src/github.com/bitly/go-hostpool/example_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package hostpool - -import ( - "github.com/bitly/go-hostpool" -) - -func ExampleNewEpsilonGreedy() { - hp := hostpool.NewEpsilonGreedy([]string{"a", "b"}, 0, &hostpool.LinearEpsilonValueCalculator{}) - hostResponse := hp.Get() - hostname := hostResponse.Host() - err := nil // (make a request with hostname) - hostResponse.Mark(err) -} diff --git a/services/templeton/vendor/src/github.com/bitly/go-hostpool/host_entry.go b/services/templeton/vendor/src/github.com/bitly/go-hostpool/host_entry.go deleted file mode 100644 index dcec9a0b7..000000000 --- a/services/templeton/vendor/src/github.com/bitly/go-hostpool/host_entry.go +++ /dev/null @@ -1,62 +0,0 @@ -package hostpool - -import ( - "time" -) - -// --- hostEntry - this is due to get upgraded - -type hostEntry struct { - host string - nextRetry time.Time - retryCount int16 - retryDelay time.Duration - dead bool - epsilonCounts []int64 - epsilonValues []int64 - epsilonIndex int - epsilonValue float64 - epsilonPercentage float64 -} - -func (h *hostEntry) canTryHost(now time.Time) bool { - if !h.dead { - return true - } - if h.nextRetry.Before(now) { - return true - } - return false -} - -func (h *hostEntry) willRetryHost(maxRetryInterval time.Duration) { - h.retryCount += 1 - newDelay := h.retryDelay * 2 - if newDelay < maxRetryInterval { - h.retryDelay = newDelay - } else { - h.retryDelay = maxRetryInterval - } - h.nextRetry = time.Now().Add(h.retryDelay) -} - -func (h *hostEntry) getWeightedAverageResponseTime() float64 { - var value float64 - var lastValue float64 - - // start at 1 so we start with the oldest entry - for i := 1; i <= epsilonBuckets; i += 1 { - pos := (h.epsilonIndex + i) % epsilonBuckets - bucketCount := h.epsilonCounts[pos] - // Changing the line below to what I think it should be to get the weights right - weight := float64(i) / float64(epsilonBuckets) - if bucketCount > 0 { - currentValue := float64(h.epsilonValues[pos]) / float64(bucketCount) - value += currentValue * weight - lastValue = currentValue - } else { - value += lastValue * weight - } - } - return value -} diff --git a/services/templeton/vendor/src/github.com/bitly/go-hostpool/hostpool.go b/services/templeton/vendor/src/github.com/bitly/go-hostpool/hostpool.go deleted file mode 100644 index d65cb2dee..000000000 --- a/services/templeton/vendor/src/github.com/bitly/go-hostpool/hostpool.go +++ /dev/null @@ -1,201 +0,0 @@ -// A Go package to intelligently and flexibly pool among multiple hosts from your Go application. -// Host selection can operate in round robin or epsilon greedy mode, and unresponsive hosts are -// avoided. A good overview of Epsilon Greedy is here http://stevehanov.ca/blog/index.php?id=132 -package hostpool - -import ( - "log" - "sync" - "time" -) - -// Returns current version -func Version() string { - return "0.1" -} - -// --- Response interfaces and structs ---- - -// This interface represents the response from HostPool. You can retrieve the -// hostname by calling Host(), and after making a request to the host you should -// call Mark with any error encountered, which will inform the HostPool issuing -// the HostPoolResponse of what happened to the request and allow it to update. -type HostPoolResponse interface { - Host() string - Mark(error) - hostPool() HostPool -} - -type standardHostPoolResponse struct { - host string - sync.Once - pool HostPool -} - -// --- HostPool structs and interfaces ---- - -// This is the main HostPool interface. Structs implementing this interface -// allow you to Get a HostPoolResponse (which includes a hostname to use), -// get the list of all Hosts, and use ResetAll to reset state. -type HostPool interface { - Get() HostPoolResponse - // keep the marks separate so we can override independently - markSuccess(HostPoolResponse) - markFailed(HostPoolResponse) - - ResetAll() - Hosts() []string - - // Close the hostpool and release all resources. - Close() -} - -type standardHostPool struct { - sync.RWMutex - hosts map[string]*hostEntry - hostList []*hostEntry - initialRetryDelay time.Duration - maxRetryInterval time.Duration - nextHostIndex int -} - -// ------ constants ------------------- - -const epsilonBuckets = 120 -const epsilonDecay = 0.90 // decay the exploration rate -const minEpsilon = 0.01 // explore one percent of the time -const initialEpsilon = 0.3 -const defaultDecayDuration = time.Duration(5) * time.Minute - -// Construct a basic HostPool using the hostnames provided -func New(hosts []string) HostPool { - p := &standardHostPool{ - hosts: make(map[string]*hostEntry, len(hosts)), - hostList: make([]*hostEntry, len(hosts)), - initialRetryDelay: time.Duration(30) * time.Second, - maxRetryInterval: time.Duration(900) * time.Second, - } - - for i, h := range hosts { - e := &hostEntry{ - host: h, - retryDelay: p.initialRetryDelay, - } - p.hosts[h] = e - p.hostList[i] = e - } - - return p -} - -func (r *standardHostPoolResponse) Host() string { - return r.host -} - -func (r *standardHostPoolResponse) hostPool() HostPool { - return r.pool -} - -func (r *standardHostPoolResponse) Mark(err error) { - r.Do(func() { - doMark(err, r) - }) -} - -func doMark(err error, r HostPoolResponse) { - if err == nil { - r.hostPool().markSuccess(r) - } else { - r.hostPool().markFailed(r) - } -} - -// return an entry from the HostPool -func (p *standardHostPool) Get() HostPoolResponse { - p.Lock() - defer p.Unlock() - host := p.getRoundRobin() - return &standardHostPoolResponse{host: host, pool: p} -} - -func (p *standardHostPool) getRoundRobin() string { - now := time.Now() - hostCount := len(p.hostList) - for i := range p.hostList { - // iterate via sequenece from where we last iterated - currentIndex := (i + p.nextHostIndex) % hostCount - - h := p.hostList[currentIndex] - if !h.dead { - p.nextHostIndex = currentIndex + 1 - return h.host - } - if h.nextRetry.Before(now) { - h.willRetryHost(p.maxRetryInterval) - p.nextHostIndex = currentIndex + 1 - return h.host - } - } - - // all hosts are down. re-add them - p.doResetAll() - p.nextHostIndex = 0 - return p.hostList[0].host -} - -func (p *standardHostPool) ResetAll() { - p.Lock() - defer p.Unlock() - p.doResetAll() -} - -// this actually performs the logic to reset, -// and should only be called when the lock has -// already been acquired -func (p *standardHostPool) doResetAll() { - for _, h := range p.hosts { - h.dead = false - } -} - -func (p *standardHostPool) Close() { - for _, h := range p.hosts { - h.dead = true - } -} - -func (p *standardHostPool) markSuccess(hostR HostPoolResponse) { - host := hostR.Host() - p.Lock() - defer p.Unlock() - - h, ok := p.hosts[host] - if !ok { - log.Fatalf("host %s not in HostPool %v", host, p.Hosts()) - } - h.dead = false -} - -func (p *standardHostPool) markFailed(hostR HostPoolResponse) { - host := hostR.Host() - p.Lock() - defer p.Unlock() - h, ok := p.hosts[host] - if !ok { - log.Fatalf("host %s not in HostPool %v", host, p.Hosts()) - } - if !h.dead { - h.dead = true - h.retryCount = 0 - h.retryDelay = p.initialRetryDelay - h.nextRetry = time.Now().Add(h.retryDelay) - } - -} -func (p *standardHostPool) Hosts() []string { - hosts := make([]string, 0, len(p.hosts)) - for host := range p.hosts { - hosts = append(hosts, host) - } - return hosts -} diff --git a/services/templeton/vendor/src/github.com/bitly/go-hostpool/hostpool_test.go b/services/templeton/vendor/src/github.com/bitly/go-hostpool/hostpool_test.go deleted file mode 100644 index e974aa74c..000000000 --- a/services/templeton/vendor/src/github.com/bitly/go-hostpool/hostpool_test.go +++ /dev/null @@ -1,145 +0,0 @@ -package hostpool - -import ( - "errors" - "github.com/bmizerany/assert" - "io/ioutil" - "log" - "math/rand" - "os" - "testing" - "time" -) - -func TestHostPool(t *testing.T) { - log.SetOutput(ioutil.Discard) - defer log.SetOutput(os.Stdout) - - dummyErr := errors.New("Dummy Error") - - p := New([]string{"a", "b", "c"}) - assert.Equal(t, p.Get().Host(), "a") - assert.Equal(t, p.Get().Host(), "b") - assert.Equal(t, p.Get().Host(), "c") - respA := p.Get() - assert.Equal(t, respA.Host(), "a") - - respA.Mark(dummyErr) - respB := p.Get() - respB.Mark(dummyErr) - respC := p.Get() - assert.Equal(t, respC.Host(), "c") - respC.Mark(nil) - // get again, and verify that it's still c - assert.Equal(t, p.Get().Host(), "c") - // now try to mark b as success; should fail because already marked - respB.Mark(nil) - assert.Equal(t, p.Get().Host(), "c") // would be b if it were not dead - // now restore a - respA = &standardHostPoolResponse{host: "a", pool: p} - respA.Mark(nil) - assert.Equal(t, p.Get().Host(), "a") - assert.Equal(t, p.Get().Host(), "c") - - // ensure that we get *something* back when all hosts fail - for _, host := range []string{"a", "b", "c"} { - response := &standardHostPoolResponse{host: host, pool: p} - response.Mark(dummyErr) - } - resp := p.Get() - assert.NotEqual(t, resp, nil) -} - -type mockTimer struct { - t int // the time it will always return -} - -func (t *mockTimer) between(start time.Time, end time.Time) time.Duration { - return time.Duration(t.t) * time.Millisecond -} - -func TestEpsilonGreedy(t *testing.T) { - log.SetOutput(ioutil.Discard) - defer log.SetOutput(os.Stdout) - - rand.Seed(10) - - iterations := 12000 - p := NewEpsilonGreedy([]string{"a", "b"}, 0, &LinearEpsilonValueCalculator{}).(*epsilonGreedyHostPool) - - timings := make(map[string]int64) - timings["a"] = 200 - timings["b"] = 300 - - hitCounts := make(map[string]int) - hitCounts["a"] = 0 - hitCounts["b"] = 0 - - log.Printf("starting first run (a, b)") - - for i := 0; i < iterations; i += 1 { - if i != 0 && i%100 == 0 { - p.performEpsilonGreedyDecay() - } - hostR := p.Get() - host := hostR.Host() - hitCounts[host]++ - timing := timings[host] - p.timer = &mockTimer{t: int(timing)} - hostR.Mark(nil) - } - - for host := range hitCounts { - log.Printf("host %s hit %d times (%0.2f percent)", host, hitCounts[host], (float64(hitCounts[host])/float64(iterations))*100.0) - } - - assert.Equal(t, hitCounts["a"] > hitCounts["b"], true) - - hitCounts["a"] = 0 - hitCounts["b"] = 0 - log.Printf("starting second run (b, a)") - timings["a"] = 500 - timings["b"] = 100 - - for i := 0; i < iterations; i += 1 { - if i != 0 && i%100 == 0 { - p.performEpsilonGreedyDecay() - } - hostR := p.Get() - host := hostR.Host() - hitCounts[host]++ - timing := timings[host] - p.timer = &mockTimer{t: int(timing)} - hostR.Mark(nil) - } - - for host := range hitCounts { - log.Printf("host %s hit %d times (%0.2f percent)", host, hitCounts[host], (float64(hitCounts[host])/float64(iterations))*100.0) - } - - assert.Equal(t, hitCounts["b"] > hitCounts["a"], true) -} - -func BenchmarkEpsilonGreedy(b *testing.B) { - b.StopTimer() - - // Make up some response times - zipfDist := rand.NewZipf(rand.New(rand.NewSource(0)), 1.1, 5, 5000) - timings := make([]uint64, b.N) - for i := 0; i < b.N; i++ { - timings[i] = zipfDist.Uint64() - } - - // Make the hostpool with a few hosts - p := NewEpsilonGreedy([]string{"a", "b"}, 0, &LinearEpsilonValueCalculator{}).(*epsilonGreedyHostPool) - - b.StartTimer() - for i := 0; i < b.N; i++ { - if i != 0 && i%100 == 0 { - p.performEpsilonGreedyDecay() - } - hostR := p.Get() - p.timer = &mockTimer{t: int(timings[i])} - hostR.Mark(nil) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/CHANGELOG-3.0.md b/services/templeton/vendor/src/github.com/olivere/elastic/CHANGELOG-3.0.md deleted file mode 100644 index 07f3e66bf..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/CHANGELOG-3.0.md +++ /dev/null @@ -1,363 +0,0 @@ -# Elastic 3.0 - -Elasticsearch 2.0 comes with some [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/breaking-changes-2.0.html). You will probably need to upgrade your application and/or rewrite part of it due to those changes. - -We use that window of opportunity to also update Elastic (the Go client) from version 2.0 to 3.0. This will introduce both changes due to the Elasticsearch 2.0 update as well as changes that make Elastic cleaner by removing some old cruft. - -So, to summarize: - -1. Elastic 2.0 is compatible with Elasticsearch 1.7+ and is still actively maintained. -2. Elastic 3.0 is compatible with Elasticsearch 2.0+ and will soon become the new master branch. - -The rest of the document is a list of all changes in Elastic 3.0. - -## Pointer types - -All types have changed to be pointer types, not value types. This not only is cleaner but also simplifies the API as illustrated by the following example: - -Example for Elastic 2.0 (old): - -```go -q := elastic.NewMatchAllQuery() -res, err := elastic.Search("one").Query(&q).Do() // notice the & here -``` - -Example for Elastic 3.0 (new): - -```go -q := elastic.NewMatchAllQuery() -res, err := elastic.Search("one").Query(q).Do() // no more & -// ... which can be simplified as: -res, err := elastic.Search("one").Query(elastic.NewMatchAllQuery()).Do() -``` - -It also helps to prevent [subtle issues](https://github.com/olivere/elastic/issues/115#issuecomment-130753046). - -## Query/filter merge - -One of the biggest changes in Elasticsearch 2.0 is the [merge of queries and filters](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_queries_and_filters_merged). In Elasticsearch 1.x, you had a whole range of queries and filters that were basically identical (e.g. `term_query` and `term_filter`). - -The practical aspect of the merge is that you can now basically use queries where once you had to use filters instead. For Elastic 3.0 this means: We could remove a whole bunch of files. Yay! - -Notice that some methods still come by "filter", e.g. `PostFilter`. However, they accept a `Query` now when they used to accept a `Filter` before. - -Example for Elastic 2.0 (old): - -```go -q := elastic.NewMatchAllQuery() -f := elastic.NewTermFilter("tag", "important") -res, err := elastic.Search().Index("one").Query(&q).PostFilter(f) -``` - -Example for Elastic 3.0 (new): - -```go -q := elastic.NewMatchAllQuery() -f := elastic.NewTermQuery("tag", "important") // it's a query now! -res, err := elastic.Search().Index("one").Query(q).PostFilter(f) -``` - -## Facets are removed - -[Facets have been removed](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_removed_features.html#_facets_have_been_removed) in Elasticsearch 2.0. You need to use aggregations now. - -## Errors - -Elasticsearch 2.0 returns more information about an error in the HTTP response body. Elastic 3.0 now reads this information and makes it accessible by the consumer. - -Errors and all its details are now returned in [`Error`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L59). - -### HTTP Status 404 (Not Found) - -When Elasticsearch does not find an entity or an index, it generally returns HTTP status code 404. In Elastic 2.0 this was a valid result and didn't raise an error from the `Do` functions. This has now changed in Elastic 3.0. - -Starting with Elastic 3.0, there are only two types of responses considered successful. First, responses with HTTP status codes [200..299]. Second, HEAD requests which return HTTP status 404. The latter is used by Elasticsearch to e.g. check for existence of indices or documents. All other responses will return an error. - -To check for HTTP Status 404 (with non-HEAD requests), e.g. when trying to get or delete a missing document, you can use the [`IsNotFound`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L84) helper (see below). - -The following example illustrates how to check for a missing document in Elastic 2.0 and what has changed in 3.0. - -Example for Elastic 2.0 (old): - -```go -res, err = client.Get().Index("one").Type("tweet").Id("no-such-id").Do() -if err != nil { - // Something else went wrong (but 404 is NOT an error in Elastic 2.0) -} -if !res.Found { - // Document has not been found -} -``` - -Example for Elastic 3.0 (new): - -```go -res, err = client.Get().Index("one").Type("tweet").Id("no-such-id").Do() -if err != nil { - if elastic.IsNotFound(err) { - // Document has not been found - } else { - // Something else went wrong - } -} -``` - -### HTTP Status 408 (Timeouts) - -Elasticsearch now responds with HTTP status code 408 (Timeout) when a request fails due to a timeout. E.g. if you specify a timeout with the Cluster Health API, the HTTP response status will be 408 if the timeout is raised. See [here](https://github.com/elastic/elasticsearch/commit/fe3179d9cccb569784434b2135ca9ae13d5158d3) for the specific commit to the Cluster Health API. - -To check for HTTP Status 408, we introduced the [`IsTimeout`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L101) helper. - -Example for Elastic 2.0 (old): - -```go -health, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("1s").Do() -if err != nil { - // ... -} -if health.TimedOut { - // We have a timeout -} -``` - -Example for Elastic 3.0 (new): - -```go -health, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("1s").Do() -if elastic.IsTimeout(err) { - // We have a timeout -} -``` - -### Bulk Errors - -The error response of a bulk operation used to be a simple string in Elasticsearch 1.x. -In Elasticsearch 2.0, it returns a structured JSON object with a lot more details about the error. -These errors are now captured in an object of type [`ErrorDetails`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L59) which is used in [`BulkResponseItem`](https://github.com/olivere/elastic/blob/release-branch.v3/bulk.go#L206). - -### Removed specific Elastic errors - -The specific error types `ErrMissingIndex`, `ErrMissingType`, and `ErrMissingId` have been removed. They were only used by `DeleteService` and are replaced by a generic error message. - -## Numeric types - -Elastic 3.0 has settled to use `float64` everywhere. It used to be a mix of `float32` and `float64` in Elastic 2.0. E.g. all boostable queries in Elastic 3.0 now have a boost type of `float64` where it used to be `float32`. - -## Pluralization - -Some services accept zero, one or more indices or types to operate on. -E.g. in the `SearchService` accepts a list of zero, one, or more indices to -search and therefor had a func called `Index(index string)` and a func -called `Indices(indices ...string)`. - -Elastic 3.0 now only uses the singular form that, when applicable, accepts a -variadic type. E.g. in the case of the `SearchService`, you now only have -one func with the following signature: `Index(indices ...string)`. - -Notice this is only limited to `Index(...)` and `Type(...)`. There are other -services with variadic functions. These have not been changed. - -## Multiple calls to variadic functions - -Some services with variadic functions have cleared the underlying slice when -called while other services just add to the existing slice. This has now been -normalized to always add to the underlying slice. - -Example for Elastic 2.0 (old): - -```go -// Would only cleared scroll id "two" -// because ScrollId cleared the values when called multiple times -client.ClearScroll().ScrollId("one").ScrollId("two").Do() -``` - -Example for Elastic 3.0 (new): - -```go -// Now (correctly) clears both scroll id "one" and "two" -// because ScrollId no longer clears the values when called multiple times -client.ClearScroll().ScrollId("one").ScrollId("two").Do() -``` - -## Ping service requires URL - -The `Ping` service raised some issues because it is different from all -other services. If not explicitly given a URL, it always pings `127.0.0.1:9200`. - -Users expected to ping the cluster, but that is not possible as the cluster -can be a set of many nodes: So which node do we ping then? - -To make it more clear, the `Ping` function on the client now requires users -to explicitly set the URL of the node to ping. - -## Meta fields - -Many of the meta fields e.g. `_parent` or `_routing` are now -[part of the top-level of a document](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_mapping_changes.html#migration-meta-fields) -and are no longer returned as parts of the `fields` object. We had to change -larger parts of e.g. the `Reindexer` to get it to work seamlessly with Elasticsearch 2.0. - -Notice that all stored meta-fields are now [returned by default](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_crud_and_routing_changes.html#_all_stored_meta_fields_returned_by_default). - -## HasParentQuery / HasChildQuery - -`NewHasParentQuery` and `NewHasChildQuery` must now include both parent/child type and query. It is now in line with the Java API. - -Example for Elastic 2.0 (old): - -```go -allQ := elastic.NewMatchAllQuery() -q := elastic.NewHasChildFilter("tweet").Query(&allQ) -``` - -Example for Elastic 3.0 (new): - -```go -q := elastic.NewHasChildQuery("tweet", elastic.NewMatchAllQuery()) -``` - -## SetBasicAuth client option - -You can now tell Elastic to pass HTTP Basic Auth credentials with each request. In previous versions of Elastic you had to set up your own `http.Transport` to do this. This should make it more convenient to use Elastic in combination with [Shield](https://www.elastic.co/products/shield) in its [basic setup](https://www.elastic.co/guide/en/shield/current/enable-basic-auth.html). - -Example: - -```go -client, err := elastic.NewClient(elastic.SetBasicAuth("user", "secret")) -if err != nil { - log.Fatal(err) -} -``` - -## Delete-by-Query API - -The Delete-by-Query API is [a plugin now](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_removed_features.html#_delete_by_query_is_now_a_plugin). It is no longer core part of Elasticsearch. You can [install it as a plugin as described here](https://www.elastic.co/guide/en/elasticsearch/plugins/2.0/plugins-delete-by-query.html). - -Elastic 3.0 still contains the `DeleteByQueryService`, but you need to install the plugin first. If you don't install it and use `DeleteByQueryService` you will most probably get a 404. - -An older version of this document stated the following: - -> Elastic 3.0 still contains the `DeleteByQueryService` but it will fail with `ErrPluginNotFound` when the plugin is not installed. -> -> Example for Elastic 3.0 (new): -> -> ```go -> _, err := client.DeleteByQuery().Query(elastic.NewTermQuery("client", "1")).Do() -> if err == elastic.ErrPluginNotFound { -> // Delete By Query API is not available -> } -> ``` - -I have decided that this is not a good way to handle the case of a missing plugin. The main reason is that with this logic, you'd always have to check if the plugin is missing in case of an error. This is not only slow, but it also puts logic into a service where it should really be just opaque and return the response of Elasticsearch. - -If you rely on certain plugins to be installed, you should check on startup. That's where the following two helpers come into play. - -## HasPlugin and SetRequiredPlugins - -Some of the core functionality of Elasticsearch has now been moved into plugins. E.g. the Delete-by-Query API is [a plugin now](https://www.elastic.co/guide/en/elasticsearch/plugins/2.0/plugins-delete-by-query.html). - -You need to make sure to add these plugins to your Elasticsearch installation to still be able to use the `DeleteByQueryService`. You can test this now with the `HasPlugin(name string)` helper in the client. - -Example for Elastic 3.0 (new): - -```go -err, found := client.HasPlugin("delete-by-query") -if err == nil && found { - // ... Delete By Query API is available -} -``` - -To simplify this process, there is now a `SetRequiredPlugins` helper that can be passed as an option func when creating a new client. If the plugin is not installed, the client wouldn't be created in the first place. - -```go -// Will raise an error if the "delete-by-query" plugin is NOT installed -client, err := elastic.NewClient(elastic.SetRequiredPlugins("delete-by-query")) -if err != nil { - log.Fatal(err) -} -``` - -Notice that there also is a way to define [mandatory plugins](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-plugins.html#_mandatory_plugins) in the Elasticsearch configuration file. - -## Common Query has been renamed to Common Terms Query - -The `CommonQuery` has been renamed to `CommonTermsQuery` to be in line with the [Java API](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_java_api_changes.html#_query_filter_refactoring). - -## Remove `MoreLikeThis` and `MoreLikeThisField` - -The More Like This API and the More Like This Field query [have been removed](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_more_like_this) and replaced with the `MoreLikeThisQuery`. - -## Remove Filtered Query - -With the merge of queries and filters, the [filtered query became deprecated](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_filtered_literal_query_and_literal_query_literal_filter_deprecated). While it is only deprecated and therefore still available in Elasticsearch 2.0, we have decided to remove it from Elastic 3.0. Why? Because we think that when you're already forced to rewrite many of your application code, it might be a good chance to get rid of things that are deprecated as well. So you might simply change your filtered query with a boolean query as [described here](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_filtered_literal_query_and_literal_query_literal_filter_deprecated). - -## Remove FuzzyLikeThis and FuzzyLikeThisField - -Both have been removed from Elasticsearch 2.0 as well. - -## Remove LimitFilter - -The `limit` filter is [deprecated in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_limit_literal_filter_deprecated) and becomes a no-op. Now is a good chance to remove it from your application as well. Use the `terminate_after` parameter in your search [as described here](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/search-request-body.html) to achieve similar effects. - -## Remove `_cache` and `_cache_key` from filters - -Both have been [removed from Elasticsearch 2.0 as well](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_filter_auto_caching). - -## Partial fields are gone - -Partial fields are [removed in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_search_changes.html#_partial_fields) in favor of [source filtering](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/search-request-source-filtering.html). - -## Scripting - -A [`Script`](https://github.com/olivere/elastic/blob/release-branch.v3/script.go) type has been added to Elastic 3.0. In Elastic 2.0, there were various places (e.g. aggregations) where you could just add the script as a string, specify the scripting language, add parameters etc. With Elastic 3.0, you should now always use the `Script` type. - -Example for Elastic 2.0 (old): - -```go -update, err := client.Update().Index("twitter").Type("tweet").Id("1"). - Script("ctx._source.retweets += num"). - ScriptParams(map[string]interface{}{"num": 1}). - Upsert(map[string]interface{}{"retweets": 0}). - Do() -``` - -Example for Elastic 3.0 (new): - -```go -update, err := client.Update().Index("twitter").Type("tweet").Id("1"). - Script(elastic.NewScript("ctx._source.retweets += num").Param("num", 1)). - Upsert(map[string]interface{}{"retweets": 0}). - Do() -``` - -## Cluster State - -The combination of `Metric(string)` and `Metrics(...string)` has been replaced by a single func with the signature `Metric(...string)`. - -## Unexported structs in response - -Services generally return a typed response from a `Do` func. Those structs are exported so that they can be passed around in your own application. In Elastic 3.0 however, we changed that (most) sub-structs are now unexported, meaning: You can only pass around the whole response, not sub-structures of it. This makes it easier for restructuring responses according to the Elasticsearch API. See [`ClusterStateResponse`](https://github.com/olivere/elastic/blob/release-branch.v3/cluster_state.go#L182) as an example. - -## Add offset to Histogram aggregation - -Histogram aggregations now have an [offset](https://github.com/elastic/elasticsearch/pull/9505) option. - -## Services - -### REST API specification - -As you might know, Elasticsearch comes with a REST API specification. The specification describes the endpoints in a JSON structure. - -Most services in Elastic predated the REST API specification. We are in the process of bringing all these services in line with the specification. Services can be generated by `go generate` (not 100% automatic though). This is an ongoing process. - -This probably doesn't mean a lot to you. However, you can now be more confident that Elastic supports all features that the REST API specification describes. - -At the same time, the file names of the services are renamed to match the REST API specification naming. - -### REST API Test Suite - -The REST API specification of Elasticsearch comes along with a test suite that official clients typically use to test for conformance. Up until now, Elastic didn't run this test suite. However, we are in the process of setting up infrastructure and tests to match this suite as well. - -This process in not completed though. - - diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/CONTRIBUTING.md b/services/templeton/vendor/src/github.com/olivere/elastic/CONTRIBUTING.md deleted file mode 100644 index 4fbc79dd0..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/CONTRIBUTING.md +++ /dev/null @@ -1,40 +0,0 @@ -# How to contribute - -Elastic is an open-source project and we are looking forward to each -contribution. - -Notice that while the [official Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) is rather good, it is a high-level -overview of the features of Elasticsearch. However, Elastic tries to resemble -the Java API of Elasticsearch which you can find [on GitHub](https://github.com/elastic/elasticsearch). - -This explains why you might think that some options are strange or missing -in Elastic, while often they're just different. Please check the Java API first. - -Having said that: Elasticsearch is moving fast and it might be very likely -that we missed some features or changes. Feel free to change that. - -## Your Pull Request - -To make it easy to review and understand your changes, please keep the -following things in mind before submitting your pull request: - -* You compared the existing implemenation with the Java API, did you? -* Please work on the latest possible state of `olivere/elastic`. - Use `release-branch.v2` for targeting Elasticsearch 1.x and - `release-branch.v3` for targeting 2.x. -* Create a branch dedicated to your change. -* If possible, write a test case which confirms your change. -* Make sure your changes and your tests work with all recent versions of - Elasticsearch. We currently support Elasticsearch 1.7.x in the - release-branch.v2 and Elasticsearch 2.x in the release-branch.v3. -* Test your changes before creating a pull request (`go test ./...`). -* Don't mix several features or bug fixes in one pull request. -* Create a meaningful commit message. -* Explain your change, e.g. provide a link to the issue you are fixing and - probably a link to the Elasticsearch documentation and/or source code. -* Format your source with `go fmt`. - -## Additional Resources - -* [GitHub documentation](http://help.github.com/) -* [GitHub pull request documentation](http://help.github.com/send-pull-requests/) diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/CONTRIBUTORS b/services/templeton/vendor/src/github.com/olivere/elastic/CONTRIBUTORS deleted file mode 100644 index 0743d2d15..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/CONTRIBUTORS +++ /dev/null @@ -1,35 +0,0 @@ -# This is a list of people who have contributed code -# to the Elastic repository. -# -# It is just my small "thank you" to all those that helped -# making Elastic what it is. -# -# Please keep this list sorted. - -Adam Alix [@adamalix](https://github.com/adamalix) -Adam Weiner [@adamweiner](https://github.com/adamweiner) -Alexey Sharov [@nizsheanez](https://github.com/nizsheanez) -Braden Bassingthwaite [@bbassingthwaite-va](https://github.com/bbassingthwaite-va) -Christophe Courtaut [@kri5](https://github.com/kri5) -Conrad Pankoff [@deoxxa](https://github.com/deoxxa) -Corey Scott [@corsc](https://github.com/corsc) -Daniel Heckrath [@DanielHeckrath](https://github.com/DanielHeckrath) -Gerhard Häring [@ghaering](https://github.com/ghaering) -Guilherme Silveira [@guilherme-santos](https://github.com/guilherme-santos) -Guillaume J. Charmes [@creack](https://github.com/creack) -Igor Dubinskiy [@idubinskiy](https://github.com/idubinskiy) -Isaac Saldana [@isaldana](https://github.com/isaldana) -Jack Lindamood [@cep21](https://github.com/cep21) -John Goodall [@jgoodall](https://github.com/jgoodall) -Junpei Tsuji [@jun06t](https://github.com/jun06t) -Maciej Lisiewski [@c2h5oh](https://github.com/c2h5oh) -Mara Kim [@autochthe](https://github.com/autochthe) -Medhi Bechina [@mdzor](https://github.com/mdzor) -Nicholas Wolff [@nwolff](https://github.com/nwolff) -Orne Brocaar [@brocaar](https://github.com/brocaar) -Sacheendra talluri [@sacheendra](https://github.com/sacheendra) -Sean DuBois [@Sean-Der](https://github.com/Sean-Der) -Shalin LK [@shalinlk](https://github.com/shalinlk) -Sundar [@sundarv85](https://github.com/sundarv85) -Tetsuya Morimoto [@t2y](https://github.com/t2y) -zakthomas [@zakthomas](https://github.com/zakthomas) diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/LICENSE b/services/templeton/vendor/src/github.com/olivere/elastic/LICENSE deleted file mode 100644 index 8b22cdb60..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) -Copyright © 2012-2015 Oliver Eilhard - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the “Software”), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/README.md b/services/templeton/vendor/src/github.com/olivere/elastic/README.md deleted file mode 100644 index eefd530df..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/README.md +++ /dev/null @@ -1,415 +0,0 @@ -# Elastic - -Elastic is an [Elasticsearch](http://www.elasticsearch.org/) client for the -[Go](http://www.golang.org/) programming language. - -[![Build Status](https://travis-ci.org/olivere/elastic.svg?branch=release-branch.v3)](https://travis-ci.org/olivere/elastic) -[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](http://godoc.org/gopkg.in/olivere/elastic.v3) -[![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/olivere/elastic/master/LICENSE) - -See the [wiki](https://github.com/olivere/elastic/wiki) for additional information about Elastic. - - -## Releases - -**The release branches (e.g. [`release-branch.v3`](https://github.com/olivere/elastic/tree/release-branch.v3)) are actively being worked on and can break at any time. If you want to use stable versions of Elastic, please use the packages released via [gopkg.in](https://gopkg.in).** - -Here's the version matrix: - -Elasticsearch version | Elastic version -| Package URL -----------------------|------------------|------------ -2.x | 3.0 | [`gopkg.in/olivere/elastic.v3`](https://gopkg.in/olivere/elastic.v3) ([source](https://github.com/olivere/elastic/tree/release-branch.v3) [doc](http://godoc.org/gopkg.in/olivere/elastic.v3)) -1.x | 2.0 | [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2) ([source](https://github.com/olivere/elastic/tree/release-branch.v2) [doc](http://godoc.org/gopkg.in/olivere/elastic.v2)) -0.9-1.3 | 1.0 | [`gopkg.in/olivere/elastic.v1`](https://gopkg.in/olivere/elastic.v1) ([source](https://github.com/olivere/elastic/tree/release-branch.v1) [doc](http://godoc.org/gopkg.in/olivere/elastic.v1)) - -**Example:** - -You have installed Elasticsearch 2.1.1 and want to use Elastic. As listed above, you should use Elastic 3.0. So you first install the stable release of Elastic 3.0 from gopkg.in. - -```sh -$ go get gopkg.in/olivere/elastic.v3 -``` - -You then import it with this import path: - -```go -import "gopkg.in/olivere/elastic.v3" -``` - -### Elastic 3.0 - -Elastic 3.0 targets Elasticsearch 2.0 and later. Elasticsearch 2.0.0 was [released on 28th October 2015](https://www.elastic.co/blog/elasticsearch-2-0-0-released). - -Notice that there are a lot of [breaking changes in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/breaking-changes-2.0.html) and we used this as an opportunity to [clean up and refactor Elastic as well](https://github.com/olivere/elastic/blob/release-branch.v3/CHANGELOG-3.0.md). - -### Elastic 2.0 - -Elastic 2.0 targets Elasticsearch 1.x and is published via [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2). - -### Elastic 1.0 - -Elastic 1.0 is deprecated. You should really update Elasticsearch and Elastic -to a recent version. - -However, if you cannot update for some reason, don't worry. Version 1.0 is -still available. All you need to do is go-get it and change your import path -as described above. - - -## Status - -We use Elastic in production since 2012. Elastic is stable but the API changes -now and then. We strive for API compatibility. -However, Elasticsearch sometimes introduces [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes.html) -and we sometimes have to adapt. - -Having said that, there have been no big API changes that required you -to rewrite your application big time. More often than not it's renaming APIs -and adding/removing features so that Elastic is in sync with Elasticsearch. - -Elastic has been used in production with the following Elasticsearch versions: -0.90, 1.0-1.7. Furthermore, we use [Travis CI](https://travis-ci.org/) -to test Elastic with the most recent versions of Elasticsearch and Go. -See the [.travis.yml](https://github.com/olivere/elastic/blob/master/.travis.yml) -file for the exact matrix and [Travis](https://travis-ci.org/olivere/elastic) -for the results. - -Elasticsearch has quite a few features. Most of them are implemented -by Elastic. I add features and APIs as required. It's straightforward -to implement missing pieces. I'm accepting pull requests :-) - -Having said that, I hope you find the project useful. - - -## Getting Started - -The first thing you do is to create a [Client](https://github.com/olivere/elastic/blob/master/client.go). The client connects to Elasticsearch on `http://127.0.0.1:9200` by default. - -You typically create one client for your app. Here's a complete example of -creating a client, creating an index, adding a document, executing a search etc. - -```go -// Create a client -client, err := elastic.NewClient() -if err != nil { - // Handle error -} - -// Create an index -_, err = client.CreateIndex("twitter").Do() -if err != nil { - // Handle error - panic(err) -} - -// Add a document to the index -tweet := Tweet{User: "olivere", Message: "Take Five"} -_, err = client.Index(). - Index("twitter"). - Type("tweet"). - Id("1"). - BodyJson(tweet). - Do() -if err != nil { - // Handle error - panic(err) -} - -// Search with a term query -termQuery := elastic.NewTermQuery("user", "olivere") -searchResult, err := client.Search(). - Index("twitter"). // search in index "twitter" - Query(termQuery). // specify the query - Sort("user", true). // sort by "user" field, ascending - From(0).Size(10). // take documents 0-9 - Pretty(true). // pretty print request and response JSON - Do() // execute -if err != nil { - // Handle error - panic(err) -} - -// searchResult is of type SearchResult and returns hits, suggestions, -// and all kinds of other information from Elasticsearch. -fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis) - -// Each is a convenience function that iterates over hits in a search result. -// It makes sure you don't need to check for nil values in the response. -// However, it ignores errors in serialization. If you want full control -// over iterating the hits, see below. -var ttyp Tweet -for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) { - if t, ok := item.(Tweet); ok { - fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) - } -} -// TotalHits is another convenience function that works even when something goes wrong. -fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits()) - -// Here's how you iterate through results with full control over each step. -if searchResult.Hits != nil { - fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits) - - // Iterate through results - for _, hit := range searchResult.Hits.Hits { - // hit.Index contains the name of the index - - // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}). - var t Tweet - err := json.Unmarshal(*hit.Source, &t) - if err != nil { - // Deserialization failed - } - - // Work with tweet - fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) - } -} else { - // No hits - fmt.Print("Found no tweets\n") -} - -// Delete the index again -_, err = client.DeleteIndex("twitter").Do() -if err != nil { - // Handle error - panic(err) -} -``` - -See the [wiki](https://github.com/olivere/elastic/wiki) for more details. - - -## API Status - -### Document APIs - -- [x] Index API -- [x] Get API -- [x] Delete API -- [x] Update API -- [x] Multi Get API -- [x] Bulk API -- [x] Delete By Query API -- [x] Term Vectors -- [ ] Multi termvectors API - -### Search APIs - -- [x] Search -- [x] Search Template -- [ ] Search Shards API -- [x] Suggesters - - [x] Term Suggester - - [x] Phrase Suggester - - [x] Completion Suggester - - [x] Context Suggester -- [x] Multi Search API -- [x] Count API -- [ ] Search Exists API -- [ ] Validate API -- [x] Explain API -- [x] Percolator API -- [ ] Field Stats API - -### Aggregations - -- Metrics Aggregations - - [x] Avg - - [x] Cardinality - - [x] Extended Stats - - [x] Geo Bounds - - [x] Max - - [x] Min - - [x] Percentiles - - [x] Percentile Ranks - - [ ] Scripted Metric - - [x] Stats - - [x] Sum - - [x] Top Hits - - [x] Value Count -- Bucket Aggregations - - [x] Children - - [x] Date Histogram - - [x] Date Range - - [x] Filter - - [x] Filters - - [x] Geo Distance - - [ ] GeoHash Grid - - [x] Global - - [x] Histogram - - [x] IPv4 Range - - [x] Missing - - [x] Nested - - [x] Range - - [x] Reverse Nested - - [x] Sampler - - [x] Significant Terms - - [x] Terms -- Pipeline Aggregations - - [x] Avg Bucket - - [x] Derivative - - [x] Max Bucket - - [x] Min Bucket - - [x] Sum Bucket - - [x] Moving Average - - [x] Cumulative Sum - - [x] Bucket Script - - [x] Bucket Selector - - [x] Serial Differencing -- [x] Aggregation Metadata - -### Indices APIs - -- [x] Create Index -- [x] Delete Index -- [x] Get Index -- [x] Indices Exists -- [x] Open / Close Index -- [x] Put Mapping -- [x] Get Mapping -- [ ] Get Field Mapping -- [ ] Types Exists -- [x] Index Aliases -- [x] Update Indices Settings -- [x] Get Settings -- [ ] Analyze -- [x] Index Templates -- [x] Warmers -- [x] Indices Stats -- [ ] Indices Segments -- [ ] Indices Recovery -- [ ] Clear Cache -- [x] Flush -- [x] Refresh -- [x] Optimize -- [ ] Shadow Replica Indices -- [ ] Upgrade - -### cat APIs - -The cat APIs are not implemented as of now. We think they are better suited for operating with Elasticsearch on the command line. - -- [ ] cat aliases -- [ ] cat allocation -- [ ] cat count -- [ ] cat fielddata -- [ ] cat health -- [ ] cat indices -- [ ] cat master -- [ ] cat nodes -- [ ] cat pending tasks -- [ ] cat plugins -- [ ] cat recovery -- [ ] cat thread pool -- [ ] cat shards -- [ ] cat segments - -### Cluster APIs - -- [x] Cluster Health -- [x] Cluster State -- [x] Cluster Stats -- [ ] Pending Cluster Tasks -- [ ] Cluster Reroute -- [ ] Cluster Update Settings -- [ ] Nodes Stats -- [x] Nodes Info -- [ ] Nodes hot_threads - -### Query DSL - -- [x] Match All Query -- [x] Inner hits -- Full text queries - - [x] Match Query - - [x] Multi Match Query - - [x] Common Terms Query - - [x] Query String Query - - [x] Simple Query String Query -- Term level queries - - [x] Term Query - - [x] Terms Query - - [x] Range Query - - [x] Exists Query - - [x] Missing Query - - [x] Prefix Query - - [x] Wildcard Query - - [x] Regexp Query - - [x] Fuzzy Query - - [x] Type Query - - [x] Ids Query -- Compound queries - - [x] Constant Score Query - - [x] Bool Query - - [x] Dis Max Query - - [x] Function Score Query - - [x] Boosting Query - - [x] Indices Query - - [x] And Query (deprecated) - - [x] Not Query - - [x] Or Query (deprecated) - - [ ] Filtered Query (deprecated) - - [ ] Limit Query (deprecated) -- Joining queries - - [x] Nested Query - - [x] Has Child Query - - [x] Has Parent Query -- Geo queries - - [ ] GeoShape Query - - [x] Geo Bounding Box Query - - [x] Geo Distance Query - - [ ] Geo Distance Range Query - - [x] Geo Polygon Query - - [ ] Geohash Cell Query -- Specialized queries - - [x] More Like This Query - - [x] Template Query - - [x] Script Query -- Span queries - - [ ] Span Term Query - - [ ] Span Multi Term Query - - [ ] Span First Query - - [ ] Span Near Query - - [ ] Span Or Query - - [ ] Span Not Query - - [ ] Span Containing Query - - [ ] Span Within Query - -### Modules - -- [ ] Snapshot and Restore - -### Sorting - -- [x] Sort by score -- [x] Sort by field -- [x] Sort by geo distance -- [x] Sort by script - -### Scan - -Scrolling through documents (e.g. `search_type=scan`) are implemented via -the `Scroll` and `Scan` services. The `ClearScroll` API is implemented as well. - - -## How to contribute - -Read [the contribution guidelines](https://github.com/olivere/elastic/blob/master/CONTRIBUTING.md). - -## Credits - -Thanks a lot for the great folks working hard on -[Elasticsearch](http://www.elasticsearch.org/) -and -[Go](http://www.golang.org/). - -Elastic uses portions of the -[uritemplates](https://github.com/jtacoma/uritemplates) library -by Joshua Tacoma and -[backoff](https://github.com/cenkalti/backoff) by Cenk Altı. - -## LICENSE - -MIT-LICENSE. See [LICENSE](http://olivere.mit-license.org/) -or the LICENSE file provided in the repository for details. diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/backoff/LICENSE b/services/templeton/vendor/src/github.com/olivere/elastic/backoff/LICENSE deleted file mode 100644 index f6f2dcc97..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/backoff/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Portions of this code rely on this LICENSE: - -The MIT License (MIT) - -Copyright (c) 2014 Cenk Altı - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/backoff/backoff.go b/services/templeton/vendor/src/github.com/olivere/elastic/backoff/backoff.go deleted file mode 100644 index f6d7ad9a0..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/backoff/backoff.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2012-2016 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package backoff - -import ( - "math" - "math/rand" - "sync" - "sync/atomic" - "time" -) - -// Backoff is an interface for different types of backoff algorithms. -type Backoff interface { - Next() time.Duration - Reset() -} - -// Stop is used as a signal to indicate that no more retries should be made. -const Stop time.Duration = -1 - -// -- Simple Backoff -- - -// SimpleBackoff takes a list of fixed values for backoff intervals. -// Each call to Next returns the next value from that fixed list. -// After each value is returned, subsequent calls to Next will only return -// the last element. The caller may specify if the values are "jittered". -type SimpleBackoff struct { - sync.Mutex - ticks []int - index int - jitter bool - stop bool -} - -// NewSimpleBackoff creates a SimpleBackoff algorithm with the specified -// list of fixed intervals in milliseconds. -func NewSimpleBackoff(ticks ...int) *SimpleBackoff { - return &SimpleBackoff{ - ticks: ticks, - index: 0, - jitter: false, - stop: false, - } -} - -// Jitter, when set, randomizes to return a value of [0.5*value .. 1.5*value]. -func (b *SimpleBackoff) Jitter(doJitter bool) *SimpleBackoff { - b.Lock() - defer b.Unlock() - b.jitter = doJitter - return b -} - -// SendStop, when enables, makes Next to return Stop once -// the list of values is exhausted. -func (b *SimpleBackoff) SendStop(doStop bool) *SimpleBackoff { - b.Lock() - defer b.Unlock() - b.stop = doStop - return b -} - -// Next returns the next wait interval. -func (b *SimpleBackoff) Next() time.Duration { - b.Lock() - defer b.Unlock() - - i := b.index - if i >= len(b.ticks) { - if b.stop { - return Stop - } - i = len(b.ticks) - 1 - b.index = i - } else { - b.index++ - } - - ms := b.ticks[i] - if b.jitter { - ms = jitter(ms) - } - return time.Duration(ms) * time.Millisecond -} - -// Reset resets SimpleBackoff. -func (b *SimpleBackoff) Reset() { - b.Lock() - b.index = 0 - b.Unlock() -} - -// jitter randomizes the interval to return a value of [0.5*millis .. 1.5*millis]. -func jitter(millis int) int { - if millis <= 0 { - return 0 - } - return millis/2 + rand.Intn(millis) -} - -// -- Exponential -- - -// ExponentialBackoff implements the simple exponential backoff described by -// Douglas Thain at http://dthain.blogspot.de/2009/02/exponential-backoff-in-distributed.html. -type ExponentialBackoff struct { - sync.Mutex - t float64 // initial timeout (in msec) - f float64 // exponential factor (e.g. 2) - m float64 // maximum timeout (in msec) - n int64 // number of retries - stop bool // indicates whether Next should send "Stop" whan max timeout is reached -} - -// NewExponentialBackoff returns a ExponentialBackoff backoff policy. -// Use initialTimeout to set the first/minimal interval -// and maxTimeout to set the maximum wait interval. -func NewExponentialBackoff(initialTimeout, maxTimeout time.Duration) *ExponentialBackoff { - return &ExponentialBackoff{ - t: float64(int64(initialTimeout / time.Millisecond)), - f: 2.0, - m: float64(int64(maxTimeout / time.Millisecond)), - n: 0, - stop: false, - } -} - -// SendStop, when enables, makes Next to return Stop once -// the maximum timeout is reached. -func (b *ExponentialBackoff) SendStop(doStop bool) *ExponentialBackoff { - b.Lock() - defer b.Unlock() - b.stop = doStop - return b -} - -// Next returns the next wait interval. -func (t *ExponentialBackoff) Next() time.Duration { - t.Lock() - defer t.Unlock() - - n := float64(atomic.AddInt64(&t.n, 1)) - r := 1.0 + rand.Float64() // random number in [1..2] - m := math.Min(r*t.t*math.Pow(t.f, n), t.m) - if t.stop && m >= t.m { - return Stop - } - d := time.Duration(int64(m)) * time.Millisecond - return d -} - -// Reset resets the backoff policy so that it can be reused. -func (t *ExponentialBackoff) Reset() { - t.Lock() - t.n = 0 - t.Unlock() -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/backoff/backoff_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/backoff/backoff_test.go deleted file mode 100644 index 9b5bcf0e1..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/backoff/backoff_test.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2012-2016 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package backoff - -import ( - "math/rand" - "testing" - "time" -) - -func TestSimpleBackoff(t *testing.T) { - b := NewSimpleBackoff(1, 2, 7) - - if got, want := b.Next(), time.Duration(1)*time.Millisecond; got != want { - t.Errorf("expected %v; got: %v", want, got) - } - if got, want := b.Next(), time.Duration(2)*time.Millisecond; got != want { - t.Errorf("expected %v; got: %v", want, got) - } - if got, want := b.Next(), time.Duration(7)*time.Millisecond; got != want { - t.Errorf("expected %v; got: %v", want, got) - } - if got, want := b.Next(), time.Duration(7)*time.Millisecond; got != want { - t.Errorf("expected %v; got: %v", want, got) - } - - b.Reset() - - if got, want := b.Next(), time.Duration(1)*time.Millisecond; got != want { - t.Errorf("expected %v; got: %v", want, got) - } - if got, want := b.Next(), time.Duration(2)*time.Millisecond; got != want { - t.Errorf("expected %v; got: %v", want, got) - } - if got, want := b.Next(), time.Duration(7)*time.Millisecond; got != want { - t.Errorf("expected %v; got: %v", want, got) - } - if got, want := b.Next(), time.Duration(7)*time.Millisecond; got != want { - t.Errorf("expected %v; got: %v", want, got) - } -} - -func TestSimpleBackoffWithStop(t *testing.T) { - b := NewSimpleBackoff(1, 2, 7).SendStop(true) - - // It should eventually return Stop (-1) after some loops. - var last time.Duration - for i := 0; i < 10; i++ { - last = b.Next() - if last == Stop { - break - } - } - if got, want := last, Stop; got != want { - t.Errorf("expected %v; got: %v", want, got) - } - - b.Reset() - - // It should eventually return Stop (-1) after some loops. - for i := 0; i < 10; i++ { - last = b.Next() - if last == Stop { - break - } - } - if got, want := last, Stop; got != want { - t.Errorf("expected %v; got: %v", want, got) - } -} - -func TestExponentialBackoff(t *testing.T) { - rand.Seed(time.Now().UnixNano()) - - min := time.Duration(8) * time.Millisecond - max := time.Duration(256) * time.Millisecond - b := NewExponentialBackoff(min, max) - - between := func(value time.Duration, a, b int) bool { - x := int(value / time.Millisecond) - return a <= x && x <= b - } - - if got := b.Next(); !between(got, 8, 256) { - t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) - } - if got := b.Next(); !between(got, 8, 256) { - t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) - } - if got := b.Next(); !between(got, 8, 256) { - t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) - } - if got := b.Next(); !between(got, 8, 256) { - t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) - } - - b.Reset() - - if got := b.Next(); !between(got, 8, 256) { - t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) - } - if got := b.Next(); !between(got, 8, 256) { - t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) - } - if got := b.Next(); !between(got, 8, 256) { - t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) - } - if got := b.Next(); !between(got, 8, 256) { - t.Errorf("expected [%v..%v]; got: %v", 8, 256, got) - } -} - -func TestExponentialBackoffWithStop(t *testing.T) { - rand.Seed(time.Now().UnixNano()) - - min := time.Duration(8) * time.Millisecond - max := time.Duration(256) * time.Millisecond - b := NewExponentialBackoff(min, max).SendStop(true) - - // It should eventually return Stop (-1) after some loops. - var last time.Duration - for i := 0; i < 10; i++ { - last = b.Next() - if last == Stop { - break - } - } - if got, want := last, Stop; got != want { - t.Errorf("expected %v; got: %v", want, got) - } - - b.Reset() - - // It should eventually return Stop (-1) after some loops. - for i := 0; i < 10; i++ { - last = b.Next() - if last == Stop { - break - } - } - if got, want := last, Stop; got != want { - t.Errorf("expected %v; got: %v", want, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/backoff/retry.go b/services/templeton/vendor/src/github.com/olivere/elastic/backoff/retry.go deleted file mode 100644 index 701e03ccc..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/backoff/retry.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2012-2016 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -// This file is (c) 2014 Cenk Altı and governed by the MIT license. -// See https://github.com/cenkalti/backoff for original source. - -package backoff - -import "time" - -// An Operation is executing by Retry() or RetryNotify(). -// The operation will be retried using a backoff policy if it returns an error. -type Operation func() error - -// Notify is a notify-on-error function. It receives an operation error and -// backoff delay if the operation failed (with an error). -// -// NOTE that if the backoff policy stated to stop retrying, -// the notify function isn't called. -type Notify func(error, time.Duration) - -// Retry the function f until it does not return error or BackOff stops. -// f is guaranteed to be run at least once. -// It is the caller's responsibility to reset b after Retry returns. -// -// Retry sleeps the goroutine for the duration returned by BackOff after a -// failed operation returns. -func Retry(o Operation, b Backoff) error { return RetryNotify(o, b, nil) } - -// RetryNotify calls notify function with the error and wait duration -// for each failed attempt before sleep. -func RetryNotify(operation Operation, b Backoff, notify Notify) error { - var err error - var next time.Duration - - b.Reset() - for { - if err = operation(); err == nil { - return nil - } - - if next = b.Next(); next == Stop { - return err - } - - if notify != nil { - notify(err, next) - } - - time.Sleep(next) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/backoff/retry_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/backoff/retry_test.go deleted file mode 100644 index 0dd45404b..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/backoff/retry_test.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2012-2016 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -// This file is (c) 2014 Cenk Altı and governed by the MIT license. -// See https://github.com/cenkalti/backoff for original source. - -package backoff - -import ( - "errors" - "log" - "testing" - "time" -) - -func TestRetry(t *testing.T) { - const successOn = 3 - var i = 0 - - // This function is successfull on "successOn" calls. - f := func() error { - i++ - log.Printf("function is called %d. time\n", i) - - if i == successOn { - log.Println("OK") - return nil - } - - log.Println("error") - return errors.New("error") - } - - min := time.Duration(8) * time.Millisecond - max := time.Duration(256) * time.Millisecond - err := Retry(f, NewExponentialBackoff(min, max).SendStop(true)) - if err != nil { - t.Errorf("unexpected error: %s", err.Error()) - } - if i != successOn { - t.Errorf("invalid number of retries: %d", i) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/bulk.go b/services/templeton/vendor/src/github.com/olivere/elastic/bulk.go deleted file mode 100644 index 91c7a9c17..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/bulk.go +++ /dev/null @@ -1,314 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "net/url" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -type BulkService struct { - client *Client - - index string - _type string - requests []BulkableRequest - //replicationType string - //consistencyLevel string - timeout string - refresh *bool - pretty bool - - sizeInBytes int64 -} - -func NewBulkService(client *Client) *BulkService { - builder := &BulkService{ - client: client, - requests: make([]BulkableRequest, 0), - } - return builder -} - -func (s *BulkService) reset() { - s.requests = make([]BulkableRequest, 0) - s.sizeInBytes = 0 -} - -func (s *BulkService) Index(index string) *BulkService { - s.index = index - return s -} - -func (s *BulkService) Type(_type string) *BulkService { - s._type = _type - return s -} - -func (s *BulkService) Timeout(timeout string) *BulkService { - s.timeout = timeout - return s -} - -func (s *BulkService) Refresh(refresh bool) *BulkService { - s.refresh = &refresh - return s -} - -func (s *BulkService) Pretty(pretty bool) *BulkService { - s.pretty = pretty - return s -} - -func (s *BulkService) Add(r BulkableRequest) *BulkService { - s.requests = append(s.requests, r) - s.sizeInBytes += s.estimateSizeInBytes(r) - return s -} - -func (s *BulkService) EstimatedSizeInBytes() int64 { - return s.sizeInBytes -} - -func (s *BulkService) estimateSizeInBytes(r BulkableRequest) int64 { - // +1 for the \n - return int64(1 + len([]byte(r.String()))) -} - -func (s *BulkService) NumberOfActions() int { - return len(s.requests) -} - -func (s *BulkService) bodyAsString() (string, error) { - buf := bytes.NewBufferString("") - - for _, req := range s.requests { - source, err := req.Source() - if err != nil { - return "", err - } - for _, line := range source { - _, err := buf.WriteString(fmt.Sprintf("%s\n", line)) - if err != nil { - return "", nil - } - } - } - - return buf.String(), nil -} - -func (s *BulkService) Do() (*BulkResponse, error) { - // No actions? - if s.NumberOfActions() == 0 { - return nil, errors.New("elastic: No bulk actions to commit") - } - - // Get body - body, err := s.bodyAsString() - if err != nil { - return nil, err - } - - // Build url - path := "/" - if s.index != "" { - index, err := uritemplates.Expand("{index}", map[string]string{ - "index": s.index, - }) - if err != nil { - return nil, err - } - path += index + "/" - } - if s._type != "" { - typ, err := uritemplates.Expand("{type}", map[string]string{ - "type": s._type, - }) - if err != nil { - return nil, err - } - path += typ + "/" - } - path += "_bulk" - - // Parameters - params := make(url.Values) - if s.pretty { - params.Set("pretty", fmt.Sprintf("%v", s.pretty)) - } - if s.refresh != nil { - params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - - // Get response - res, err := s.client.PerformRequest("POST", path, params, body) - if err != nil { - return nil, err - } - - // Return results - ret := new(BulkResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - - // Reset so the request can be reused - s.reset() - - return ret, nil -} - -// BulkResponse is a response to a bulk execution. -// -// Example: -// { -// "took":3, -// "errors":false, -// "items":[{ -// "index":{ -// "_index":"index1", -// "_type":"tweet", -// "_id":"1", -// "_version":3, -// "status":201 -// } -// },{ -// "index":{ -// "_index":"index2", -// "_type":"tweet", -// "_id":"2", -// "_version":3, -// "status":200 -// } -// },{ -// "delete":{ -// "_index":"index1", -// "_type":"tweet", -// "_id":"1", -// "_version":4, -// "status":200, -// "found":true -// } -// },{ -// "update":{ -// "_index":"index2", -// "_type":"tweet", -// "_id":"2", -// "_version":4, -// "status":200 -// } -// }] -// } -type BulkResponse struct { - Took int `json:"took,omitempty"` - Errors bool `json:"errors,omitempty"` - Items []map[string]*BulkResponseItem `json:"items,omitempty"` -} - -// BulkResponseItem is the result of a single bulk request. -type BulkResponseItem struct { - Index string `json:"_index,omitempty"` - Type string `json:"_type,omitempty"` - Id string `json:"_id,omitempty"` - Version int `json:"_version,omitempty"` - Status int `json:"status,omitempty"` - Found bool `json:"found,omitempty"` - Error *ErrorDetails `json:"error,omitempty"` -} - -// Indexed returns all bulk request results of "index" actions. -func (r *BulkResponse) Indexed() []*BulkResponseItem { - return r.ByAction("index") -} - -// Created returns all bulk request results of "create" actions. -func (r *BulkResponse) Created() []*BulkResponseItem { - return r.ByAction("create") -} - -// Updated returns all bulk request results of "update" actions. -func (r *BulkResponse) Updated() []*BulkResponseItem { - return r.ByAction("update") -} - -// Deleted returns all bulk request results of "delete" actions. -func (r *BulkResponse) Deleted() []*BulkResponseItem { - return r.ByAction("delete") -} - -// ByAction returns all bulk request results of a certain action, -// e.g. "index" or "delete". -func (r *BulkResponse) ByAction(action string) []*BulkResponseItem { - if r.Items == nil { - return nil - } - items := make([]*BulkResponseItem, 0) - for _, item := range r.Items { - if result, found := item[action]; found { - items = append(items, result) - } - } - return items -} - -// ById returns all bulk request results of a given document id, -// regardless of the action ("index", "delete" etc.). -func (r *BulkResponse) ById(id string) []*BulkResponseItem { - if r.Items == nil { - return nil - } - items := make([]*BulkResponseItem, 0) - for _, item := range r.Items { - for _, result := range item { - if result.Id == id { - items = append(items, result) - } - } - } - return items -} - -// Failed returns those items of a bulk response that have errors, -// i.e. those that don't have a status code between 200 and 299. -func (r *BulkResponse) Failed() []*BulkResponseItem { - if r.Items == nil { - return nil - } - errors := make([]*BulkResponseItem, 0) - for _, item := range r.Items { - for _, result := range item { - if !(result.Status >= 200 && result.Status <= 299) { - errors = append(errors, result) - } - } - } - return errors -} - -// Succeeded returns those items of a bulk response that have no errors, -// i.e. those have a status code between 200 and 299. -func (r *BulkResponse) Succeeded() []*BulkResponseItem { - if r.Items == nil { - return nil - } - succeeded := make([]*BulkResponseItem, 0) - for _, item := range r.Items { - for _, result := range item { - if result.Status >= 200 && result.Status <= 299 { - succeeded = append(succeeded, result) - } - } - } - return succeeded -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_delete_request.go b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_delete_request.go deleted file mode 100644 index 0ea372209..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_delete_request.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "strings" -) - -// -- Bulk delete request -- - -// Bulk request to remove document from Elasticsearch. -type BulkDeleteRequest struct { - BulkableRequest - index string - typ string - id string - routing string - refresh *bool - version int64 // default is MATCH_ANY - versionType string // default is "internal" -} - -func NewBulkDeleteRequest() *BulkDeleteRequest { - return &BulkDeleteRequest{} -} - -func (r *BulkDeleteRequest) Index(index string) *BulkDeleteRequest { - r.index = index - return r -} - -func (r *BulkDeleteRequest) Type(typ string) *BulkDeleteRequest { - r.typ = typ - return r -} - -func (r *BulkDeleteRequest) Id(id string) *BulkDeleteRequest { - r.id = id - return r -} - -func (r *BulkDeleteRequest) Routing(routing string) *BulkDeleteRequest { - r.routing = routing - return r -} - -func (r *BulkDeleteRequest) Refresh(refresh bool) *BulkDeleteRequest { - r.refresh = &refresh - return r -} - -func (r *BulkDeleteRequest) Version(version int64) *BulkDeleteRequest { - r.version = version - return r -} - -// VersionType can be "internal" (default), "external", "external_gte", -// "external_gt", or "force". -func (r *BulkDeleteRequest) VersionType(versionType string) *BulkDeleteRequest { - r.versionType = versionType - return r -} - -func (r *BulkDeleteRequest) String() string { - lines, err := r.Source() - if err == nil { - return strings.Join(lines, "\n") - } - return fmt.Sprintf("error: %v", err) -} - -func (r *BulkDeleteRequest) Source() ([]string, error) { - lines := make([]string, 1) - - source := make(map[string]interface{}) - deleteCommand := make(map[string]interface{}) - if r.index != "" { - deleteCommand["_index"] = r.index - } - if r.typ != "" { - deleteCommand["_type"] = r.typ - } - if r.id != "" { - deleteCommand["_id"] = r.id - } - if r.routing != "" { - deleteCommand["_routing"] = r.routing - } - if r.version > 0 { - deleteCommand["_version"] = r.version - } - if r.versionType != "" { - deleteCommand["_version_type"] = r.versionType - } - if r.refresh != nil { - deleteCommand["refresh"] = *r.refresh - } - source["delete"] = deleteCommand - - body, err := json.Marshal(source) - if err != nil { - return nil, err - } - - lines[0] = string(body) - - return lines, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_delete_request_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_delete_request_test.go deleted file mode 100644 index 73abfcd40..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_delete_request_test.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "testing" -) - -func TestBulkDeleteRequestSerialization(t *testing.T) { - tests := []struct { - Request BulkableRequest - Expected []string - }{ - // #0 - { - Request: NewBulkDeleteRequest().Index("index1").Type("tweet").Id("1"), - Expected: []string{ - `{"delete":{"_id":"1","_index":"index1","_type":"tweet"}}`, - }, - }, - } - - for i, test := range tests { - lines, err := test.Request.Source() - if err != nil { - t.Fatalf("case #%d: expected no error, got: %v", i, err) - } - if lines == nil { - t.Fatalf("case #%d: expected lines, got nil", i) - } - if len(lines) != len(test.Expected) { - t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines)) - } - for j, line := range lines { - if line != test.Expected[j] { - t.Errorf("case #%d: expected line #%d to be %s, got: %s", i, j, test.Expected[j], line) - } - } - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_index_request.go b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_index_request.go deleted file mode 100644 index 495694671..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_index_request.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "strings" -) - -// Bulk request to add document to Elasticsearch. -type BulkIndexRequest struct { - BulkableRequest - index string - typ string - id string - opType string - routing string - parent string - timestamp string - ttl int64 - refresh *bool - version int64 // default is MATCH_ANY - versionType string // default is "internal" - doc interface{} -} - -func NewBulkIndexRequest() *BulkIndexRequest { - return &BulkIndexRequest{ - opType: "index", - } -} - -func (r *BulkIndexRequest) Index(index string) *BulkIndexRequest { - r.index = index - return r -} - -func (r *BulkIndexRequest) Type(typ string) *BulkIndexRequest { - r.typ = typ - return r -} - -func (r *BulkIndexRequest) Id(id string) *BulkIndexRequest { - r.id = id - return r -} - -func (r *BulkIndexRequest) OpType(opType string) *BulkIndexRequest { - r.opType = opType - return r -} - -func (r *BulkIndexRequest) Routing(routing string) *BulkIndexRequest { - r.routing = routing - return r -} - -func (r *BulkIndexRequest) Parent(parent string) *BulkIndexRequest { - r.parent = parent - return r -} - -func (r *BulkIndexRequest) Timestamp(timestamp string) *BulkIndexRequest { - r.timestamp = timestamp - return r -} - -func (r *BulkIndexRequest) Ttl(ttl int64) *BulkIndexRequest { - r.ttl = ttl - return r -} - -func (r *BulkIndexRequest) Refresh(refresh bool) *BulkIndexRequest { - r.refresh = &refresh - return r -} - -func (r *BulkIndexRequest) Version(version int64) *BulkIndexRequest { - r.version = version - return r -} - -func (r *BulkIndexRequest) VersionType(versionType string) *BulkIndexRequest { - r.versionType = versionType - return r -} - -func (r *BulkIndexRequest) Doc(doc interface{}) *BulkIndexRequest { - r.doc = doc - return r -} - -func (r *BulkIndexRequest) String() string { - lines, err := r.Source() - if err == nil { - return strings.Join(lines, "\n") - } - return fmt.Sprintf("error: %v", err) -} - -func (r *BulkIndexRequest) Source() ([]string, error) { - // { "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } } - // { "field1" : "value1" } - - lines := make([]string, 2) - - // "index" ... - command := make(map[string]interface{}) - indexCommand := make(map[string]interface{}) - if r.index != "" { - indexCommand["_index"] = r.index - } - if r.typ != "" { - indexCommand["_type"] = r.typ - } - if r.id != "" { - indexCommand["_id"] = r.id - } - if r.routing != "" { - indexCommand["_routing"] = r.routing - } - if r.parent != "" { - indexCommand["_parent"] = r.parent - } - if r.timestamp != "" { - indexCommand["_timestamp"] = r.timestamp - } - if r.ttl > 0 { - indexCommand["_ttl"] = r.ttl - } - if r.version > 0 { - indexCommand["_version"] = r.version - } - if r.versionType != "" { - indexCommand["_version_type"] = r.versionType - } - if r.refresh != nil { - indexCommand["refresh"] = *r.refresh - } - command[r.opType] = indexCommand - line, err := json.Marshal(command) - if err != nil { - return nil, err - } - lines[0] = string(line) - - // "field1" ... - if r.doc != nil { - switch t := r.doc.(type) { - default: - body, err := json.Marshal(r.doc) - if err != nil { - return nil, err - } - lines[1] = string(body) - case json.RawMessage: - lines[1] = string(t) - case *json.RawMessage: - lines[1] = string(*t) - case string: - lines[1] = t - case *string: - lines[1] = *t - } - } else { - lines[1] = "{}" - } - - return lines, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_index_request_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_index_request_test.go deleted file mode 100644 index 271347e30..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_index_request_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "testing" - "time" -) - -func TestBulkIndexRequestSerialization(t *testing.T) { - tests := []struct { - Request BulkableRequest - Expected []string - }{ - // #0 - { - Request: NewBulkIndexRequest().Index("index1").Type("tweet").Id("1"). - Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}), - Expected: []string{ - `{"index":{"_id":"1","_index":"index1","_type":"tweet"}}`, - `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`, - }, - }, - // #1 - { - Request: NewBulkIndexRequest().OpType("create").Index("index1").Type("tweet").Id("1"). - Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}), - Expected: []string{ - `{"create":{"_id":"1","_index":"index1","_type":"tweet"}}`, - `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`, - }, - }, - // #2 - { - Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("tweet").Id("1"). - Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}), - Expected: []string{ - `{"index":{"_id":"1","_index":"index1","_type":"tweet"}}`, - `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`, - }, - }, - } - - for i, test := range tests { - lines, err := test.Request.Source() - if err != nil { - t.Fatalf("case #%d: expected no error, got: %v", i, err) - } - if lines == nil { - t.Fatalf("case #%d: expected lines, got nil", i) - } - if len(lines) != len(test.Expected) { - t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines)) - } - for j, line := range lines { - if line != test.Expected[j] { - t.Errorf("case #%d: expected line #%d to be %s, got: %s", i, j, test.Expected[j], line) - } - } - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_processor.go b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_processor.go deleted file mode 100644 index 04492a47c..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_processor.go +++ /dev/null @@ -1,515 +0,0 @@ -// Copyright 2012-2016 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "sync" - "sync/atomic" - "time" - - "gopkg.in/olivere/elastic.v3/backoff" -) - -// BulkProcessorService allows to easily process bulk requests. It allows setting -// policies when to flush new bulk requests, e.g. based on a number of actions, -// on the size of the actions, and/or to flush periodically. It also allows -// to control the number of concurrent bulk requests allowed to be executed -// in parallel. -// -// BulkProcessorService, by default, commits either every 1000 requests or when the -// (estimated) size of the bulk requests exceeds 5 MB. However, it does not -// commit periodically. BulkProcessorService also does retry by default, using -// an exponential backoff algorithm. -// -// The caller is responsible for setting the index and type on every -// bulk request added to BulkProcessorService. -// -// BulkProcessorService takes ideas from the BulkProcessor of the -// Elasticsearch Java API as documented in -// https://www.elastic.co/guide/en/elasticsearch/client/java-api/current/java-docs-bulk-processor.html. -type BulkProcessorService struct { - c *Client - beforeFn BulkBeforeFunc - afterFn BulkAfterFunc - name string // name of processor - numWorkers int // # of workers (>= 1) - bulkActions int // # of requests after which to commit - bulkSize int // # of bytes after which to commit - flushInterval time.Duration // periodic flush interval - wantStats bool // indicates whether to gather statistics - initialTimeout time.Duration // initial wait time before retry on errors - maxTimeout time.Duration // max time to wait for retry on errors -} - -// NewBulkProcessorService creates a new BulkProcessorService. -func NewBulkProcessorService(client *Client) *BulkProcessorService { - return &BulkProcessorService{ - c: client, - numWorkers: 1, - bulkActions: 1000, - bulkSize: 5 << 20, // 5 MB - initialTimeout: time.Duration(200) * time.Millisecond, - maxTimeout: time.Duration(10000) * time.Millisecond, - } -} - -// BulkBeforeFunc defines the signature of callbacks that are executed -// before a commit to Elasticsearch. -type BulkBeforeFunc func(executionId int64, requests []BulkableRequest) - -// BulkAfterFunc defines the signature of callbacks that are executed -// after a commit to Elasticsearch. The err parameter signals an error. -type BulkAfterFunc func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) - -// Before specifies a function to be executed before bulk requests get comitted -// to Elasticsearch. -func (s *BulkProcessorService) Before(fn BulkBeforeFunc) *BulkProcessorService { - s.beforeFn = fn - return s -} - -// After specifies a function to be executed when bulk requests have been -// comitted to Elasticsearch. The After callback executes both when the -// commit was successful as well as on failures. -func (s *BulkProcessorService) After(fn BulkAfterFunc) *BulkProcessorService { - s.afterFn = fn - return s -} - -// Name is an optional name to identify this bulk processor. -func (s *BulkProcessorService) Name(name string) *BulkProcessorService { - s.name = name - return s -} - -// Workers is the number of concurrent workers allowed to be -// executed. Defaults to 1 and must be greater or equal to 1. -func (s *BulkProcessorService) Workers(num int) *BulkProcessorService { - s.numWorkers = num - return s -} - -// BulkActions specifies when to flush based on the number of actions -// currently added. Defaults to 1000 and can be set to -1 to be disabled. -func (s *BulkProcessorService) BulkActions(bulkActions int) *BulkProcessorService { - s.bulkActions = bulkActions - return s -} - -// BulkSize specifies when to flush based on the size (in bytes) of the actions -// currently added. Defaults to 5 MB and can be set to -1 to be disabled. -func (s *BulkProcessorService) BulkSize(bulkSize int) *BulkProcessorService { - s.bulkSize = bulkSize - return s -} - -// FlushInterval specifies when to flush at the end of the given interval. -// This is disabled by default. If you want the bulk processor to -// operate completely asynchronously, set both BulkActions and BulkSize to -// -1 and set the FlushInterval to a meaningful interval. -func (s *BulkProcessorService) FlushInterval(interval time.Duration) *BulkProcessorService { - s.flushInterval = interval - return s -} - -// Stats tells bulk processor to gather stats while running. -// Use Stats to return the stats. This is disabled by default. -func (s *BulkProcessorService) Stats(wantStats bool) *BulkProcessorService { - s.wantStats = wantStats - return s -} - -// Do creates a new BulkProcessor and starts it. -// Consider the BulkProcessor as a running instance that accepts bulk requests -// and commits them to Elasticsearch, spreading the work across one or more -// workers. -// -// You can interoperate with the BulkProcessor returned by Do, e.g. Start and -// Stop (or Close) it. -// -// Calling Do several times returns new BulkProcessors. You probably don't -// want to do this. BulkProcessorService implements just a builder pattern. -func (s *BulkProcessorService) Do() (*BulkProcessor, error) { - p := newBulkProcessor( - s.c, - s.beforeFn, - s.afterFn, - s.name, - s.numWorkers, - s.bulkActions, - s.bulkSize, - s.flushInterval, - s.wantStats, - s.initialTimeout, - s.maxTimeout) - - err := p.Start() - if err != nil { - return nil, err - } - return p, nil -} - -// -- Bulk Processor Statistics -- - -// BulkProcessorStats contains various statistics of a bulk processor -// while it is running. Use the Stats func to return it while running. -type BulkProcessorStats struct { - Flushed int64 // number of times the flush interval has been invoked - Committed int64 // # of times workers committed bulk requests - Indexed int64 // # of requests indexed - Created int64 // # of requests that ES reported as creates (201) - Updated int64 // # of requests that ES reported as updates - Deleted int64 // # of requests that ES reported as deletes - Succeeded int64 // # of requests that ES reported as successful - Failed int64 // # of requests that ES reported as failed - - Workers []*BulkProcessorWorkerStats // stats for each worker -} - -// BulkProcessorWorkerStats represents per-worker statistics. -type BulkProcessorWorkerStats struct { - Queued int64 // # of requests queued in this worker - LastDuration time.Duration // duration of last commit -} - -// newBulkProcessorStats initializes and returns a BulkProcessorStats struct. -func newBulkProcessorStats(workers int) *BulkProcessorStats { - stats := &BulkProcessorStats{ - Workers: make([]*BulkProcessorWorkerStats, workers), - } - for i := 0; i < workers; i++ { - stats.Workers[i] = &BulkProcessorWorkerStats{} - } - return stats -} - -// -- Bulk Processor -- - -// BulkProcessor encapsulates a task that accepts bulk requests and -// orchestrates committing them to Elasticsearch via one or more workers. -// -// BulkProcessor is returned by setting up a BulkProcessorService and -// calling the Do method. -type BulkProcessor struct { - c *Client - beforeFn BulkBeforeFunc - afterFn BulkAfterFunc - name string - bulkActions int - bulkSize int - numWorkers int - executionId int64 - requestsC chan BulkableRequest - workerWg sync.WaitGroup - workers []*bulkWorker - flushInterval time.Duration - flusherStopC chan struct{} - wantStats bool - initialTimeout time.Duration // initial wait time before retry on errors - maxTimeout time.Duration // max time to wait for retry on errors - - startedMu sync.Mutex // guards the following block - started bool - - statsMu sync.Mutex // guards the following block - stats *BulkProcessorStats -} - -func newBulkProcessor( - client *Client, - beforeFn BulkBeforeFunc, - afterFn BulkAfterFunc, - name string, - numWorkers int, - bulkActions int, - bulkSize int, - flushInterval time.Duration, - wantStats bool, - initialTimeout time.Duration, - maxTimeout time.Duration) *BulkProcessor { - return &BulkProcessor{ - c: client, - beforeFn: beforeFn, - afterFn: afterFn, - name: name, - numWorkers: numWorkers, - bulkActions: bulkActions, - bulkSize: bulkSize, - flushInterval: flushInterval, - wantStats: wantStats, - initialTimeout: initialTimeout, - maxTimeout: maxTimeout, - } -} - -// Start starts the bulk processor. If the processor is already started, -// nil is returned. -func (p *BulkProcessor) Start() error { - p.startedMu.Lock() - defer p.startedMu.Unlock() - - if p.started { - return nil - } - - // We must have at least one worker. - if p.numWorkers < 1 { - p.numWorkers = 1 - } - - p.requestsC = make(chan BulkableRequest) - p.executionId = 0 - p.stats = newBulkProcessorStats(p.numWorkers) - - // Create and start up workers. - p.workers = make([]*bulkWorker, p.numWorkers) - for i := 0; i < p.numWorkers; i++ { - p.workerWg.Add(1) - p.workers[i] = newBulkWorker(p, i) - go p.workers[i].work() - } - - // Start the ticker for flush (if enabled) - if int64(p.flushInterval) > 0 { - p.flusherStopC = make(chan struct{}) - go p.flusher(p.flushInterval) - } - - p.started = true - - return nil -} - -// Stop is an alias for Close. -func (p *BulkProcessor) Stop() error { - return p.Close() -} - -// Close stops the bulk processor previously started with Do. -// If it is already stopped, this is a no-op and nil is returned. -// -// By implementing Close, BulkProcessor implements the io.Closer interface. -func (p *BulkProcessor) Close() error { - p.startedMu.Lock() - defer p.startedMu.Unlock() - - // Already stopped? Do nothing. - if !p.started { - return nil - } - - // Stop flusher (if enabled) - if p.flusherStopC != nil { - p.flusherStopC <- struct{}{} - <-p.flusherStopC - close(p.flusherStopC) - p.flusherStopC = nil - } - - // Stop all workers. - close(p.requestsC) - p.workerWg.Wait() - - p.started = false - - return nil -} - -// Stats returns the latest bulk processor statistics. -// Collecting stats must be enabled first by calling Stats(true) on -// the service that created this processor. -func (p *BulkProcessor) Stats() BulkProcessorStats { - p.statsMu.Lock() - defer p.statsMu.Unlock() - return *p.stats -} - -// Add adds a single request to commit by the BulkProcessorService. -// -// The caller is responsible for setting the index and type on the request. -func (p *BulkProcessor) Add(request BulkableRequest) { - p.requestsC <- request -} - -// Flush manually asks all workers to commit their outstanding requests. -// It returns only when all workers acknowledge completion. -func (p *BulkProcessor) Flush() error { - p.statsMu.Lock() - p.stats.Flushed++ - p.statsMu.Unlock() - - for _, w := range p.workers { - w.flushC <- struct{}{} - <-w.flushAckC // wait for completion - } - return nil -} - -// flusher is a single goroutine that periodically asks all workers to -// commit their outstanding bulk requests. It is only started if -// FlushInterval is greater than 0. -func (p *BulkProcessor) flusher(interval time.Duration) { - ticker := time.NewTicker(interval) - defer ticker.Stop() - - for { - select { - case <-ticker.C: // Periodic flush - p.Flush() // TODO swallow errors here? - - case <-p.flusherStopC: - p.flusherStopC <- struct{}{} - return - } - } -} - -// -- Bulk Worker -- - -// bulkWorker encapsulates a single worker, running in a goroutine, -// receiving bulk requests and eventually committing them to Elasticsearch. -// It is strongly bound to a BulkProcessor. -type bulkWorker struct { - p *BulkProcessor - i int - bulkActions int - bulkSize int - service *BulkService - flushC chan struct{} - flushAckC chan struct{} -} - -// newBulkWorker creates a new bulkWorker instance. -func newBulkWorker(p *BulkProcessor, i int) *bulkWorker { - return &bulkWorker{ - p: p, - i: i, - bulkActions: p.bulkActions, - bulkSize: p.bulkSize, - service: NewBulkService(p.c), - flushC: make(chan struct{}), - flushAckC: make(chan struct{}), - } -} - -// work waits for bulk requests and manual flush calls on the respective -// channels and is invoked as a goroutine when the bulk processor is started. -func (w *bulkWorker) work() { - defer func() { - w.p.workerWg.Done() - close(w.flushAckC) - close(w.flushC) - }() - - var stop bool - for !stop { - select { - case req, open := <-w.p.requestsC: - if open { - // Received a new request - w.service.Add(req) - if w.commitRequired() { - w.commit() // TODO swallow errors here? - } - } else { - // Channel closed: Stop. - stop = true - if w.service.NumberOfActions() > 0 { - w.commit() // TODO swallow errors here? - } - } - - case <-w.flushC: - // Commit outstanding requests - if w.service.NumberOfActions() > 0 { - w.commit() // TODO swallow errors here? - } - w.flushAckC <- struct{}{} - } - } -} - -// commit commits the bulk requests in the given service, -// invoking callbacks as specified. -func (w *bulkWorker) commit() error { - var res *BulkResponse - - // commitFunc will commit bulk requests and, on failure, be retried - // via exponential backoff - commitFunc := func() error { - var err error - res, err = w.service.Do() - return err - } - // notifyFunc will be called if retry fails - notifyFunc := func(err error, d time.Duration) { - w.p.c.errorf("elastic: bulk processor %q failed but will retry in %v: %v", w.p.name, d, err) - } - - id := atomic.AddInt64(&w.p.executionId, 1) - - // Update # documents in queue before eventual retries - w.p.statsMu.Lock() - if w.p.wantStats { - w.p.stats.Workers[w.i].Queued = int64(len(w.service.requests)) - } - w.p.statsMu.Unlock() - - // Invoke before callback - if w.p.beforeFn != nil { - w.p.beforeFn(id, w.service.requests) - } - - // Commit bulk requests - policy := backoff.NewExponentialBackoff(w.p.initialTimeout, w.p.maxTimeout).SendStop(true) - err := backoff.RetryNotify(commitFunc, policy, notifyFunc) - w.updateStats(res) - if err != nil { - w.p.c.errorf("elastic: bulk processor %q failed: %v", w.p.name, err) - } - - // Invoke after callback - if w.p.afterFn != nil { - w.p.afterFn(id, w.service.requests, res, err) - } - - return err -} - -func (w *bulkWorker) updateStats(res *BulkResponse) { - // Update stats - if res != nil { - w.p.statsMu.Lock() - if w.p.wantStats { - w.p.stats.Committed++ - if res != nil { - w.p.stats.Indexed += int64(len(res.Indexed())) - w.p.stats.Created += int64(len(res.Created())) - w.p.stats.Updated += int64(len(res.Updated())) - w.p.stats.Deleted += int64(len(res.Deleted())) - w.p.stats.Succeeded += int64(len(res.Succeeded())) - w.p.stats.Failed += int64(len(res.Failed())) - } - w.p.stats.Workers[w.i].Queued = int64(len(w.service.requests)) - w.p.stats.Workers[w.i].LastDuration = time.Duration(int64(res.Took)) * time.Millisecond - } - w.p.statsMu.Unlock() - } -} - -// commitRequired returns true if the service has to commit its -// bulk requests. This can be either because the number of actions -// or the estimated size in bytes is larger than specified in the -// BulkProcessorService. -func (w *bulkWorker) commitRequired() bool { - if w.bulkActions >= 0 && w.service.NumberOfActions() >= w.bulkActions { - return true - } - if w.bulkSize >= 0 && w.service.EstimatedSizeInBytes() >= int64(w.bulkSize) { - return true - } - return false -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_processor_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_processor_test.go deleted file mode 100644 index 645617b4d..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_processor_test.go +++ /dev/null @@ -1,406 +0,0 @@ -// Copyright 2012-2016 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "fmt" - "math/rand" - "sync/atomic" - "testing" - "time" -) - -func TestBulkProcessorDefaults(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - p := client.BulkProcessor() - if p == nil { - t.Fatalf("expected BulkProcessorService; got: %v", p) - } - if got, want := p.name, ""; got != want { - t.Errorf("expected %q; got: %q", want, got) - } - if got, want := p.numWorkers, 1; got != want { - t.Errorf("expected %d; got: %d", want, got) - } - if got, want := p.bulkActions, 1000; got != want { - t.Errorf("expected %d; got: %d", want, got) - } - if got, want := p.bulkSize, 5*1024*1024; got != want { - t.Errorf("expected %d; got: %d", want, got) - } - if got, want := p.flushInterval, time.Duration(0); got != want { - t.Errorf("expected %v; got: %v", want, got) - } - if got, want := p.wantStats, false; got != want { - t.Errorf("expected %v; got: %v", want, got) - } -} - -func TestBulkProcessorCommitOnBulkActions(t *testing.T) { - //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0))) - client := setupTestClientAndCreateIndex(t) - - testBulkProcessor(t, - 10000, - client.BulkProcessor(). - Name("Actions-1"). - Workers(1). - BulkActions(100). - BulkSize(-1), - ) - - testBulkProcessor(t, - 10000, - client.BulkProcessor(). - Name("Actions-2"). - Workers(2). - BulkActions(100). - BulkSize(-1), - ) -} - -func TestBulkProcessorCommitOnBulkSize(t *testing.T) { - //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0))) - client := setupTestClientAndCreateIndex(t) - - testBulkProcessor(t, - 10000, - client.BulkProcessor(). - Name("Size-1"). - Workers(1). - BulkActions(-1). - BulkSize(64*1024), - ) - - testBulkProcessor(t, - 10000, - client.BulkProcessor(). - Name("Size-2"). - Workers(2). - BulkActions(-1). - BulkSize(64*1024), - ) -} - -func TestBulkProcessorBasedOnFlushInterval(t *testing.T) { - //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0))) - client := setupTestClientAndCreateIndex(t) - - var beforeRequests int64 - var befores int64 - var afters int64 - var failures int64 - - beforeFn := func(executionId int64, requests []BulkableRequest) { - atomic.AddInt64(&beforeRequests, int64(len(requests))) - atomic.AddInt64(&befores, 1) - } - afterFn := func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) { - atomic.AddInt64(&afters, 1) - if err != nil { - atomic.AddInt64(&failures, 1) - } - } - - svc := client.BulkProcessor(). - Name("FlushInterval-1"). - Workers(2). - BulkActions(-1). - BulkSize(-1). - FlushInterval(1 * time.Second). - Before(beforeFn). - After(afterFn) - - p, err := svc.Do() - if err != nil { - t.Fatal(err) - } - - const numDocs = 1000 // low-enough number that flush should be invoked - - for i := 1; i <= numDocs; i++ { - tweet := tweet{User: "olivere", Message: fmt.Sprintf("%d. %s", i, randomString(rand.Intn(64)))} - request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet) - p.Add(request) - } - - // Should flush at least once - time.Sleep(2 * time.Second) - - err = p.Close() - if err != nil { - t.Fatal(err) - } - - if p.stats.Flushed == 0 { - t.Errorf("expected at least 1 flush; got: %d", p.stats.Flushed) - } - if got, want := beforeRequests, int64(numDocs); got != want { - t.Errorf("expected %d requests to before callback; got: %d", want, got) - } - if befores == 0 { - t.Error("expected at least 1 call to before callback") - } - if afters == 0 { - t.Error("expected at least 1 call to after callback") - } - if failures != 0 { - t.Errorf("expected 0 calls to failure callback; got: %d", failures) - } - - // Check number of documents that were bulk indexed - _, err = p.c.Flush(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - count, err := p.c.Count(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if count != int64(numDocs) { - t.Fatalf("expected %d documents; got: %d", numDocs, count) - } -} - -func TestBulkProcessorClose(t *testing.T) { - //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0))) - client := setupTestClientAndCreateIndex(t) - - var beforeRequests int64 - var befores int64 - var afters int64 - var failures int64 - - beforeFn := func(executionId int64, requests []BulkableRequest) { - atomic.AddInt64(&beforeRequests, int64(len(requests))) - atomic.AddInt64(&befores, 1) - } - afterFn := func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) { - atomic.AddInt64(&afters, 1) - if err != nil { - atomic.AddInt64(&failures, 1) - } - } - - p, err := client.BulkProcessor(). - Name("FlushInterval-1"). - Workers(2). - BulkActions(-1). - BulkSize(-1). - FlushInterval(30 * time.Second). // 30 seconds to flush - Before(beforeFn).After(afterFn). - Do() - if err != nil { - t.Fatal(err) - } - - const numDocs = 1000 // low-enough number that flush should be invoked - - for i := 1; i <= numDocs; i++ { - tweet := tweet{User: "olivere", Message: fmt.Sprintf("%d. %s", i, randomString(rand.Intn(64)))} - request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet) - p.Add(request) - } - - // Should not flush because 30s > 1s - time.Sleep(1 * time.Second) - - // Close should flush - err = p.Close() - if err != nil { - t.Fatal(err) - } - - if p.stats.Flushed != 0 { - t.Errorf("expected no flush; got: %d", p.stats.Flushed) - } - if got, want := beforeRequests, int64(numDocs); got != want { - t.Errorf("expected %d requests to before callback; got: %d", want, got) - } - if befores == 0 { - t.Error("expected at least 1 call to before callback") - } - if afters == 0 { - t.Error("expected at least 1 call to after callback") - } - if failures != 0 { - t.Errorf("expected 0 calls to failure callback; got: %d", failures) - } - - // Check number of documents that were bulk indexed - _, err = p.c.Flush(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - count, err := p.c.Count(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if count != int64(numDocs) { - t.Fatalf("expected %d documents; got: %d", numDocs, count) - } -} - -func TestBulkProcessorFlush(t *testing.T) { - //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0))) - client := setupTestClientAndCreateIndex(t) - - p, err := client.BulkProcessor(). - Name("ManualFlush"). - Workers(10). - BulkActions(-1). - BulkSize(-1). - FlushInterval(30 * time.Second). // 30 seconds to flush - Stats(true). - Do() - if err != nil { - t.Fatal(err) - } - - const numDocs = 100 - - for i := 1; i <= numDocs; i++ { - tweet := tweet{User: "olivere", Message: fmt.Sprintf("%d. %s", i, randomString(rand.Intn(64)))} - request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet) - p.Add(request) - } - - // Should not flush because 30s > 1s - time.Sleep(1 * time.Second) - - // No flush yet - stats := p.Stats() - if stats.Flushed != 0 { - t.Errorf("expected no flush; got: %d", p.stats.Flushed) - } - - // Manual flush - err = p.Flush() - if err != nil { - t.Fatal(err) - } - - time.Sleep(1 * time.Second) - - // Now flushed - stats = p.Stats() - if got, want := p.stats.Flushed, int64(1); got != want { - t.Errorf("expected %d flush; got: %d", want, got) - } - - // Close should not start another flush - err = p.Close() - if err != nil { - t.Fatal(err) - } - - // Still 1 flush - stats = p.Stats() - if got, want := p.stats.Flushed, int64(1); got != want { - t.Errorf("expected %d flush; got: %d", want, got) - } - - // Check number of documents that were bulk indexed - _, err = p.c.Flush(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - count, err := p.c.Count(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if count != int64(numDocs) { - t.Fatalf("expected %d documents; got: %d", numDocs, count) - } -} - -// -- Helper -- - -func testBulkProcessor(t *testing.T, numDocs int, svc *BulkProcessorService) { - var beforeRequests int64 - var befores int64 - var afters int64 - var failures int64 - - beforeFn := func(executionId int64, requests []BulkableRequest) { - atomic.AddInt64(&beforeRequests, int64(len(requests))) - atomic.AddInt64(&befores, 1) - } - afterFn := func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) { - atomic.AddInt64(&afters, 1) - if err != nil { - atomic.AddInt64(&failures, 1) - } - } - - p, err := svc.Before(beforeFn).After(afterFn).Stats(true).Do() - if err != nil { - t.Fatal(err) - } - - for i := 1; i <= numDocs; i++ { - tweet := tweet{User: "olivere", Message: fmt.Sprintf("%07d. %s", i, randomString(1+rand.Intn(63)))} - request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet) - p.Add(request) - } - - err = p.Close() - if err != nil { - t.Fatal(err) - } - - stats := p.Stats() - - if stats.Flushed != 0 { - t.Errorf("expected no flush; got: %d", stats.Flushed) - } - if stats.Committed <= 0 { - t.Errorf("expected committed > %d; got: %d", 0, stats.Committed) - } - if got, want := stats.Indexed, int64(numDocs); got != want { - t.Errorf("expected indexed = %d; got: %d", want, got) - } - if got, want := stats.Created, int64(0); got != want { - t.Errorf("expected created = %d; got: %d", want, got) - } - if got, want := stats.Updated, int64(0); got != want { - t.Errorf("expected updated = %d; got: %d", want, got) - } - if got, want := stats.Deleted, int64(0); got != want { - t.Errorf("expected deleted = %d; got: %d", want, got) - } - if got, want := stats.Succeeded, int64(numDocs); got != want { - t.Errorf("expected succeeded = %d; got: %d", want, got) - } - if got, want := stats.Failed, int64(0); got != want { - t.Errorf("expected failed = %d; got: %d", want, got) - } - if got, want := beforeRequests, int64(numDocs); got != want { - t.Errorf("expected %d requests to before callback; got: %d", want, got) - } - if befores == 0 { - t.Error("expected at least 1 call to before callback") - } - if afters == 0 { - t.Error("expected at least 1 call to after callback") - } - if failures != 0 { - t.Errorf("expected 0 calls to failure callback; got: %d", failures) - } - - // Check number of documents that were bulk indexed - _, err = p.c.Flush(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - count, err := p.c.Count(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if count != int64(numDocs) { - t.Fatalf("expected %d documents; got: %d", numDocs, count) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_request.go b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_request.go deleted file mode 100644 index 315b535ca..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_request.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "fmt" -) - -// -- Bulkable request (index/update/delete) -- - -// Generic interface to bulkable requests. -type BulkableRequest interface { - fmt.Stringer - Source() ([]string, error) -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_test.go deleted file mode 100644 index 7ce9053c8..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_test.go +++ /dev/null @@ -1,463 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestBulk(t *testing.T) { - //client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."} - - index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1) - index2Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("2").Doc(tweet2) - delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1") - - bulkRequest := client.Bulk() - bulkRequest = bulkRequest.Add(index1Req) - bulkRequest = bulkRequest.Add(index2Req) - bulkRequest = bulkRequest.Add(delete1Req) - - if bulkRequest.NumberOfActions() != 3 { - t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 3, bulkRequest.NumberOfActions()) - } - - bulkResponse, err := bulkRequest.Do() - if err != nil { - t.Fatal(err) - } - if bulkResponse == nil { - t.Errorf("expected bulkResponse to be != nil; got nil") - } - - if bulkRequest.NumberOfActions() != 0 { - t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions()) - } - - // Document with Id="1" should not exist - exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do() - if err != nil { - t.Fatal(err) - } - if exists { - t.Errorf("expected exists %v; got %v", false, exists) - } - - // Document with Id="2" should exist - exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("2").Do() - if err != nil { - t.Fatal(err) - } - if !exists { - t.Errorf("expected exists %v; got %v", true, exists) - } - - // Update - updateDoc := struct { - Retweets int `json:"retweets"` - }{ - 42, - } - update1Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2").Doc(&updateDoc) - bulkRequest = client.Bulk() - bulkRequest = bulkRequest.Add(update1Req) - - if bulkRequest.NumberOfActions() != 1 { - t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 1, bulkRequest.NumberOfActions()) - } - - bulkResponse, err = bulkRequest.Do() - if err != nil { - t.Fatal(err) - } - if bulkResponse == nil { - t.Errorf("expected bulkResponse to be != nil; got nil") - } - - if bulkRequest.NumberOfActions() != 0 { - t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions()) - } - - // Document with Id="1" should have a retweets count of 42 - doc, err := client.Get().Index(testIndexName).Type("tweet").Id("2").Do() - if err != nil { - t.Fatal(err) - } - if doc == nil { - t.Fatal("expected doc to be != nil; got nil") - } - if !doc.Found { - t.Fatalf("expected doc to be found; got found = %v", doc.Found) - } - if doc.Source == nil { - t.Fatal("expected doc source to be != nil; got nil") - } - var updatedTweet tweet - err = json.Unmarshal(*doc.Source, &updatedTweet) - if err != nil { - t.Fatal(err) - } - if updatedTweet.Retweets != 42 { - t.Errorf("expected updated tweet retweets = %v; got %v", 42, updatedTweet.Retweets) - } - - // Update with script - update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2"). - RetryOnConflict(3). - Script(NewScript("ctx._source.retweets += v").Param("v", 1)) - bulkRequest = client.Bulk() - bulkRequest = bulkRequest.Add(update2Req) - if bulkRequest.NumberOfActions() != 1 { - t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 1, bulkRequest.NumberOfActions()) - } - bulkResponse, err = bulkRequest.Do() - if err != nil { - t.Fatal(err) - } - if bulkResponse == nil { - t.Errorf("expected bulkResponse to be != nil; got nil") - } - - if bulkRequest.NumberOfActions() != 0 { - t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions()) - } - - // Document with Id="1" should have a retweets count of 43 - doc, err = client.Get().Index(testIndexName).Type("tweet").Id("2").Do() - if err != nil { - t.Fatal(err) - } - if doc == nil { - t.Fatal("expected doc to be != nil; got nil") - } - if !doc.Found { - t.Fatalf("expected doc to be found; got found = %v", doc.Found) - } - if doc.Source == nil { - t.Fatal("expected doc source to be != nil; got nil") - } - err = json.Unmarshal(*doc.Source, &updatedTweet) - if err != nil { - t.Fatal(err) - } - if updatedTweet.Retweets != 43 { - t.Errorf("expected updated tweet retweets = %v; got %v", 43, updatedTweet.Retweets) - } -} - -func TestBulkWithIndexSetOnClient(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."} - - index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1) - index2Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("2").Doc(tweet2) - delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1") - - bulkRequest := client.Bulk().Index(testIndexName).Type("tweet") - bulkRequest = bulkRequest.Add(index1Req) - bulkRequest = bulkRequest.Add(index2Req) - bulkRequest = bulkRequest.Add(delete1Req) - - if bulkRequest.NumberOfActions() != 3 { - t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 3, bulkRequest.NumberOfActions()) - } - - bulkResponse, err := bulkRequest.Do() - if err != nil { - t.Fatal(err) - } - if bulkResponse == nil { - t.Errorf("expected bulkResponse to be != nil; got nil") - } - - // Document with Id="1" should not exist - exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do() - if err != nil { - t.Fatal(err) - } - if exists { - t.Errorf("expected exists %v; got %v", false, exists) - } - - // Document with Id="2" should exist - exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("2").Do() - if err != nil { - t.Fatal(err) - } - if !exists { - t.Errorf("expected exists %v; got %v", true, exists) - } -} - -func TestBulkRequestsSerialization(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."} - - index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1) - index2Req := NewBulkIndexRequest().OpType("create").Index(testIndexName).Type("tweet").Id("2").Doc(tweet2) - delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1") - update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2"). - Doc(struct { - Retweets int `json:"retweets"` - }{ - Retweets: 42, - }) - - bulkRequest := client.Bulk() - bulkRequest = bulkRequest.Add(index1Req) - bulkRequest = bulkRequest.Add(index2Req) - bulkRequest = bulkRequest.Add(delete1Req) - bulkRequest = bulkRequest.Add(update2Req) - - if bulkRequest.NumberOfActions() != 4 { - t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 4, bulkRequest.NumberOfActions()) - } - - expected := `{"index":{"_id":"1","_index":"` + testIndexName + `","_type":"tweet"}} -{"user":"olivere","message":"Welcome to Golang and Elasticsearch.","retweets":0,"created":"0001-01-01T00:00:00Z"} -{"create":{"_id":"2","_index":"` + testIndexName + `","_type":"tweet"}} -{"user":"sandrae","message":"Dancing all night long. Yeah.","retweets":0,"created":"0001-01-01T00:00:00Z"} -{"delete":{"_id":"1","_index":"` + testIndexName + `","_type":"tweet"}} -{"update":{"_id":"2","_index":"` + testIndexName + `","_type":"tweet"}} -{"doc":{"retweets":42}} -` - got, err := bulkRequest.bodyAsString() - if err != nil { - t.Fatalf("expected no error, got: %v", err) - } - if got != expected { - t.Errorf("expected\n%s\ngot:\n%s", expected, got) - } - - // Run the bulk request - bulkResponse, err := bulkRequest.Do() - if err != nil { - t.Fatal(err) - } - if bulkResponse == nil { - t.Errorf("expected bulkResponse to be != nil; got nil") - } - if bulkResponse.Took == 0 { - t.Errorf("expected took to be > 0; got %d", bulkResponse.Took) - } - if bulkResponse.Errors { - t.Errorf("expected errors to be %v; got %v", false, bulkResponse.Errors) - } - if len(bulkResponse.Items) != 4 { - t.Fatalf("expected 4 result items; got %d", len(bulkResponse.Items)) - } - - // Indexed actions - indexed := bulkResponse.Indexed() - if indexed == nil { - t.Fatal("expected indexed to be != nil; got nil") - } - if len(indexed) != 1 { - t.Fatalf("expected len(indexed) == %d; got %d", 1, len(indexed)) - } - if indexed[0].Id != "1" { - t.Errorf("expected indexed[0].Id == %s; got %s", "1", indexed[0].Id) - } - if indexed[0].Status != 201 { - t.Errorf("expected indexed[0].Status == %d; got %d", 201, indexed[0].Status) - } - - // Created actions - created := bulkResponse.Created() - if created == nil { - t.Fatal("expected created to be != nil; got nil") - } - if len(created) != 1 { - t.Fatalf("expected len(created) == %d; got %d", 1, len(created)) - } - if created[0].Id != "2" { - t.Errorf("expected created[0].Id == %s; got %s", "2", created[0].Id) - } - if created[0].Status != 201 { - t.Errorf("expected created[0].Status == %d; got %d", 201, created[0].Status) - } - - // Deleted actions - deleted := bulkResponse.Deleted() - if deleted == nil { - t.Fatal("expected deleted to be != nil; got nil") - } - if len(deleted) != 1 { - t.Fatalf("expected len(deleted) == %d; got %d", 1, len(deleted)) - } - if deleted[0].Id != "1" { - t.Errorf("expected deleted[0].Id == %s; got %s", "1", deleted[0].Id) - } - if deleted[0].Status != 200 { - t.Errorf("expected deleted[0].Status == %d; got %d", 200, deleted[0].Status) - } - if !deleted[0].Found { - t.Errorf("expected deleted[0].Found == %v; got %v", true, deleted[0].Found) - } - - // Updated actions - updated := bulkResponse.Updated() - if updated == nil { - t.Fatal("expected updated to be != nil; got nil") - } - if len(updated) != 1 { - t.Fatalf("expected len(updated) == %d; got %d", 1, len(updated)) - } - if updated[0].Id != "2" { - t.Errorf("expected updated[0].Id == %s; got %s", "2", updated[0].Id) - } - if updated[0].Status != 200 { - t.Errorf("expected updated[0].Status == %d; got %d", 200, updated[0].Status) - } - if updated[0].Version != 2 { - t.Errorf("expected updated[0].Version == %d; got %d", 2, updated[0].Version) - } - - // Succeeded actions - succeeded := bulkResponse.Succeeded() - if succeeded == nil { - t.Fatal("expected succeeded to be != nil; got nil") - } - if len(succeeded) != 4 { - t.Fatalf("expected len(succeeded) == %d; got %d", 4, len(succeeded)) - } - - // ById - id1Results := bulkResponse.ById("1") - if id1Results == nil { - t.Fatal("expected id1Results to be != nil; got nil") - } - if len(id1Results) != 2 { - t.Fatalf("expected len(id1Results) == %d; got %d", 2, len(id1Results)) - } - if id1Results[0].Id != "1" { - t.Errorf("expected id1Results[0].Id == %s; got %s", "1", id1Results[0].Id) - } - if id1Results[0].Status != 201 { - t.Errorf("expected id1Results[0].Status == %d; got %d", 201, id1Results[0].Status) - } - if id1Results[0].Version != 1 { - t.Errorf("expected id1Results[0].Version == %d; got %d", 1, id1Results[0].Version) - } - if id1Results[1].Id != "1" { - t.Errorf("expected id1Results[1].Id == %s; got %s", "1", id1Results[1].Id) - } - if id1Results[1].Status != 200 { - t.Errorf("expected id1Results[1].Status == %d; got %d", 200, id1Results[1].Status) - } - if id1Results[1].Version != 2 { - t.Errorf("expected id1Results[1].Version == %d; got %d", 2, id1Results[1].Version) - } -} - -func TestFailedBulkRequests(t *testing.T) { - js := `{ - "took" : 2, - "errors" : true, - "items" : [ { - "index" : { - "_index" : "elastic-test", - "_type" : "tweet", - "_id" : "1", - "_version" : 1, - "status" : 201 - } - }, { - "create" : { - "_index" : "elastic-test", - "_type" : "tweet", - "_id" : "2", - "_version" : 1, - "status" : 423, - "error" : { - "type":"routing_missing_exception", - "reason":"routing is required for [elastic-test2]/[comment]/[1]" - } - } - }, { - "delete" : { - "_index" : "elastic-test", - "_type" : "tweet", - "_id" : "1", - "_version" : 2, - "status" : 404, - "found" : false - } - }, { - "update" : { - "_index" : "elastic-test", - "_type" : "tweet", - "_id" : "2", - "_version" : 2, - "status" : 200 - } - } ] -}` - - var resp BulkResponse - err := json.Unmarshal([]byte(js), &resp) - if err != nil { - t.Fatal(err) - } - failed := resp.Failed() - if len(failed) != 2 { - t.Errorf("expected %d failed items; got: %d", 2, len(failed)) - } -} - -func TestBulkEstimatedSizeInBytes(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."} - - index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1) - index2Req := NewBulkIndexRequest().OpType("create").Index(testIndexName).Type("tweet").Id("2").Doc(tweet2) - delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1") - update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2"). - Doc(struct { - Retweets int `json:"retweets"` - }{ - Retweets: 42, - }) - - bulkRequest := client.Bulk() - bulkRequest = bulkRequest.Add(index1Req) - bulkRequest = bulkRequest.Add(index2Req) - bulkRequest = bulkRequest.Add(delete1Req) - bulkRequest = bulkRequest.Add(update2Req) - - if bulkRequest.NumberOfActions() != 4 { - t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 4, bulkRequest.NumberOfActions()) - } - - // The estimated size of the bulk request in bytes must be at least - // the length of the body request. - raw, err := bulkRequest.bodyAsString() - if err != nil { - t.Fatal(err) - } - rawlen := int64(len([]byte(raw))) - - if got, want := bulkRequest.EstimatedSizeInBytes(), rawlen; got < want { - t.Errorf("expected an EstimatedSizeInBytes = %d; got: %v", want, got) - } - - // Reset should also reset the calculated estimated byte size - bulkRequest.reset() - - if got, want := bulkRequest.EstimatedSizeInBytes(), int64(0); got != want { - t.Errorf("expected an EstimatedSizeInBytes = %d; got: %v", want, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_update_request.go b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_update_request.go deleted file mode 100644 index 5adef7111..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_update_request.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "strings" -) - -// Bulk request to update document in Elasticsearch. -type BulkUpdateRequest struct { - BulkableRequest - index string - typ string - id string - - routing string - parent string - script *Script - version int64 // default is MATCH_ANY - versionType string // default is "internal" - retryOnConflict *int - refresh *bool - upsert interface{} - docAsUpsert *bool - doc interface{} - ttl int64 - timestamp string -} - -func NewBulkUpdateRequest() *BulkUpdateRequest { - return &BulkUpdateRequest{} -} - -func (r *BulkUpdateRequest) Index(index string) *BulkUpdateRequest { - r.index = index - return r -} - -func (r *BulkUpdateRequest) Type(typ string) *BulkUpdateRequest { - r.typ = typ - return r -} - -func (r *BulkUpdateRequest) Id(id string) *BulkUpdateRequest { - r.id = id - return r -} - -func (r *BulkUpdateRequest) Routing(routing string) *BulkUpdateRequest { - r.routing = routing - return r -} - -func (r *BulkUpdateRequest) Parent(parent string) *BulkUpdateRequest { - r.parent = parent - return r -} - -func (r *BulkUpdateRequest) Script(script *Script) *BulkUpdateRequest { - r.script = script - return r -} - -func (r *BulkUpdateRequest) RetryOnConflict(retryOnConflict int) *BulkUpdateRequest { - r.retryOnConflict = &retryOnConflict - return r -} - -func (r *BulkUpdateRequest) Version(version int64) *BulkUpdateRequest { - r.version = version - return r -} - -// VersionType can be "internal" (default), "external", "external_gte", -// "external_gt", or "force". -func (r *BulkUpdateRequest) VersionType(versionType string) *BulkUpdateRequest { - r.versionType = versionType - return r -} - -func (r *BulkUpdateRequest) Refresh(refresh bool) *BulkUpdateRequest { - r.refresh = &refresh - return r -} - -func (r *BulkUpdateRequest) Doc(doc interface{}) *BulkUpdateRequest { - r.doc = doc - return r -} - -func (r *BulkUpdateRequest) DocAsUpsert(docAsUpsert bool) *BulkUpdateRequest { - r.docAsUpsert = &docAsUpsert - return r -} - -func (r *BulkUpdateRequest) Upsert(doc interface{}) *BulkUpdateRequest { - r.upsert = doc - return r -} - -func (r *BulkUpdateRequest) Ttl(ttl int64) *BulkUpdateRequest { - r.ttl = ttl - return r -} - -func (r *BulkUpdateRequest) Timestamp(timestamp string) *BulkUpdateRequest { - r.timestamp = timestamp - return r -} - -func (r *BulkUpdateRequest) String() string { - lines, err := r.Source() - if err == nil { - return strings.Join(lines, "\n") - } - return fmt.Sprintf("error: %v", err) -} - -func (r *BulkUpdateRequest) getSourceAsString(data interface{}) (string, error) { - switch t := data.(type) { - default: - body, err := json.Marshal(data) - if err != nil { - return "", err - } - return string(body), nil - case json.RawMessage: - return string(t), nil - case *json.RawMessage: - return string(*t), nil - case string: - return t, nil - case *string: - return *t, nil - } -} - -func (r BulkUpdateRequest) Source() ([]string, error) { - // { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } } - // { "doc" : { "field1" : "value1", ... } } - // or - // { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } } - // { "script" : { ... } } - - lines := make([]string, 2) - - // "update" ... - command := make(map[string]interface{}) - updateCommand := make(map[string]interface{}) - if r.index != "" { - updateCommand["_index"] = r.index - } - if r.typ != "" { - updateCommand["_type"] = r.typ - } - if r.id != "" { - updateCommand["_id"] = r.id - } - if r.routing != "" { - updateCommand["_routing"] = r.routing - } - if r.parent != "" { - updateCommand["_parent"] = r.parent - } - if r.timestamp != "" { - updateCommand["_timestamp"] = r.timestamp - } - if r.ttl > 0 { - updateCommand["_ttl"] = r.ttl - } - if r.version > 0 { - updateCommand["_version"] = r.version - } - if r.versionType != "" { - updateCommand["_version_type"] = r.versionType - } - if r.refresh != nil { - updateCommand["refresh"] = *r.refresh - } - if r.retryOnConflict != nil { - updateCommand["_retry_on_conflict"] = *r.retryOnConflict - } - if r.upsert != nil { - updateCommand["upsert"] = r.upsert - } - command["update"] = updateCommand - line, err := json.Marshal(command) - if err != nil { - return nil, err - } - lines[0] = string(line) - - // 2nd line: {"doc" : { ... }} or {"script": {...}} - source := make(map[string]interface{}) - if r.docAsUpsert != nil { - source["doc_as_upsert"] = *r.docAsUpsert - } - if r.doc != nil { - // {"doc":{...}} - source["doc"] = r.doc - } else if r.script != nil { - // {"script":...} - src, err := r.script.Source() - if err != nil { - return nil, err - } - source["script"] = src - } - lines[1], err = r.getSourceAsString(source) - if err != nil { - return nil, err - } - - return lines, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_update_request_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/bulk_update_request_test.go deleted file mode 100644 index 75c5b6d7f..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/bulk_update_request_test.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "testing" -) - -func TestBulkUpdateRequestSerialization(t *testing.T) { - tests := []struct { - Request BulkableRequest - Expected []string - }{ - // #0 - { - Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").Doc(struct { - Counter int64 `json:"counter"` - }{ - Counter: 42, - }), - Expected: []string{ - `{"update":{"_id":"1","_index":"index1","_type":"tweet"}}`, - `{"doc":{"counter":42}}`, - }, - }, - // #1 - { - Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1"). - RetryOnConflict(3). - DocAsUpsert(true). - Doc(struct { - Counter int64 `json:"counter"` - }{ - Counter: 42, - }), - Expected: []string{ - `{"update":{"_id":"1","_index":"index1","_retry_on_conflict":3,"_type":"tweet"}}`, - `{"doc":{"counter":42},"doc_as_upsert":true}`, - }, - }, - // #2 - { - Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1"). - RetryOnConflict(3). - Script(NewScript(`ctx._source.retweets += param1`).Lang("javascript").Param("param1", 42)). - Upsert(struct { - Counter int64 `json:"counter"` - }{ - Counter: 42, - }), - Expected: []string{ - `{"update":{"_id":"1","_index":"index1","_retry_on_conflict":3,"_type":"tweet","upsert":{"counter":42}}}`, - `{"script":{"inline":"ctx._source.retweets += param1","lang":"javascript","params":{"param1":42}}}`, - }, - }, - } - - for i, test := range tests { - lines, err := test.Request.Source() - if err != nil { - t.Fatalf("case #%d: expected no error, got: %v", i, err) - } - if lines == nil { - t.Fatalf("case #%d: expected lines, got nil", i) - } - if len(lines) != len(test.Expected) { - t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines)) - } - for j, line := range lines { - if line != test.Expected[j] { - t.Errorf("case #%d: expected line #%d to be\n%s\nbut got:\n%s", i, j, test.Expected[j], line) - } - } - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/canonicalize.go b/services/templeton/vendor/src/github.com/olivere/elastic/canonicalize.go deleted file mode 100644 index 645930859..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/canonicalize.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "net/url" - -// canonicalize takes a list of URLs and returns its canonicalized form, i.e. -// remove anything but scheme, userinfo, host, and port. It also removes the -// slash at the end. It also skips invalid URLs or URLs that do not use -// protocol http or https. -// -// Example: -// http://127.0.0.1:9200/path?query=1 -> http://127.0.0.1:9200 -func canonicalize(rawurls ...string) []string { - canonicalized := make([]string, 0) - for _, rawurl := range rawurls { - u, err := url.Parse(rawurl) - if err == nil && (u.Scheme == "http" || u.Scheme == "https") { - u.Fragment = "" - u.Path = "" - u.RawQuery = "" - canonicalized = append(canonicalized, u.String()) - } - } - return canonicalized -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/canonicalize_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/canonicalize_test.go deleted file mode 100644 index ada2ff22d..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/canonicalize_test.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "reflect" - "testing" -) - -func TestCanonicalize(t *testing.T) { - tests := []struct { - Input []string - Output []string - }{ - { - Input: []string{"http://127.0.0.1/"}, - Output: []string{"http://127.0.0.1"}, - }, - { - Input: []string{"http://127.0.0.1:9200/", "gopher://golang.org/", "http://127.0.0.1:9201"}, - Output: []string{"http://127.0.0.1:9200", "http://127.0.0.1:9201"}, - }, - { - Input: []string{"http://user:secret@127.0.0.1/path?query=1#fragment"}, - Output: []string{"http://user:secret@127.0.0.1"}, - }, - { - Input: []string{"https://somewhere.on.mars:9999/path?query=1#fragment"}, - Output: []string{"https://somewhere.on.mars:9999"}, - }, - } - - for _, test := range tests { - got := canonicalize(test.Input...) - if !reflect.DeepEqual(got, test.Output) { - t.Errorf("expected %v; got: %v", test.Output, got) - } - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/clear_scroll.go b/services/templeton/vendor/src/github.com/olivere/elastic/clear_scroll.go deleted file mode 100644 index c57093267..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/clear_scroll.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" -) - -// ClearScrollService clears one or more scroll contexts by their ids. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html#_clear_scroll_api -// for details. -type ClearScrollService struct { - client *Client - pretty bool - scrollId []string -} - -// NewClearScrollService creates a new ClearScrollService. -func NewClearScrollService(client *Client) *ClearScrollService { - return &ClearScrollService{ - client: client, - scrollId: make([]string, 0), - } -} - -// ScrollId is a list of scroll IDs to clear. -// Use _all to clear all search contexts. -func (s *ClearScrollService) ScrollId(scrollIds ...string) *ClearScrollService { - s.scrollId = append(s.scrollId, scrollIds...) - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *ClearScrollService) Pretty(pretty bool) *ClearScrollService { - s.pretty = pretty - return s -} - -// buildURL builds the URL for the operation. -func (s *ClearScrollService) buildURL() (string, url.Values, error) { - // Build URL - path := "/_search/scroll/" - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *ClearScrollService) Validate() error { - var invalid []string - if len(s.scrollId) == 0 { - invalid = append(invalid, "ScrollId") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *ClearScrollService) Do() (*ClearScrollResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - body := strings.Join(s.scrollId, ",") - - // Get HTTP response - res, err := s.client.PerformRequest("DELETE", path, params, body) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(ClearScrollResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// ClearScrollResponse is the response of ClearScrollService.Do. -type ClearScrollResponse struct { -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/clear_scroll_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/clear_scroll_test.go deleted file mode 100644 index bbb659df9..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/clear_scroll_test.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - _ "net/http" - "testing" -) - -func TestClearScroll(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Match all should return all documents - res, err := client.Scroll(testIndexName).Size(1).Do() - if err != nil { - t.Fatal(err) - } - if res == nil { - t.Errorf("expected results != nil; got nil") - } - if res.ScrollId == "" { - t.Errorf("expected scrollId in results; got %q", res.ScrollId) - } - - // Search should succeed - _, err = client.Scroll(testIndexName).Size(1).ScrollId(res.ScrollId).Do() - if err != nil { - t.Fatal(err) - } - - // Clear scroll id - clearScrollRes, err := client.ClearScroll().ScrollId(res.ScrollId).Do() - if err != nil { - t.Fatal(err) - } - if clearScrollRes == nil { - t.Error("expected results != nil; got nil") - } - - // Search result should fail - _, err = client.Scroll(testIndexName).Size(1).ScrollId(res.ScrollId).Do() - if err == nil { - t.Fatalf("expected scroll to fail") - } -} - -func TestClearScrollValidate(t *testing.T) { - client := setupTestClient(t) - - // No scroll id -> fail with error - res, err := NewClearScrollService(client).Do() - if err == nil { - t.Fatalf("expected ClearScroll to fail without scroll ids") - } - if res != nil { - t.Fatalf("expected result to be nil; got: %v", res) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/client.go b/services/templeton/vendor/src/github.com/olivere/elastic/client.go deleted file mode 100644 index 556d2867f..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/client.go +++ /dev/null @@ -1,1551 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "math/rand" - "net/http" - "net/http/httputil" - "net/url" - "regexp" - "strings" - "sync" - "time" -) - -const ( - // Version is the current version of Elastic. - Version = "3.0.21" - - // DefaultUrl is the default endpoint of Elasticsearch on the local machine. - // It is used e.g. when initializing a new Client without a specific URL. - DefaultURL = "http://127.0.0.1:9200" - - // DefaultScheme is the default protocol scheme to use when sniffing - // the Elasticsearch cluster. - DefaultScheme = "http" - - // DefaultHealthcheckEnabled specifies if healthchecks are enabled by default. - DefaultHealthcheckEnabled = true - - // DefaultHealthcheckTimeoutStartup is the time the healthcheck waits - // for a response from Elasticsearch on startup, i.e. when creating a - // client. After the client is started, a shorter timeout is commonly used - // (its default is specified in DefaultHealthcheckTimeout). - DefaultHealthcheckTimeoutStartup = 5 * time.Second - - // DefaultHealthcheckTimeout specifies the time a running client waits for - // a response from Elasticsearch. Notice that the healthcheck timeout - // when a client is created is larger by default (see DefaultHealthcheckTimeoutStartup). - DefaultHealthcheckTimeout = 1 * time.Second - - // DefaultHealthcheckInterval is the default interval between - // two health checks of the nodes in the cluster. - DefaultHealthcheckInterval = 60 * time.Second - - // DefaultSnifferEnabled specifies if the sniffer is enabled by default. - DefaultSnifferEnabled = true - - // DefaultSnifferInterval is the interval between two sniffing procedures, - // i.e. the lookup of all nodes in the cluster and their addition/removal - // from the list of actual connections. - DefaultSnifferInterval = 15 * time.Minute - - // DefaultSnifferTimeoutStartup is the default timeout for the sniffing - // process that is initiated while creating a new client. For subsequent - // sniffing processes, DefaultSnifferTimeout is used (by default). - DefaultSnifferTimeoutStartup = 5 * time.Second - - // DefaultSnifferTimeout is the default timeout after which the - // sniffing process times out. Notice that for the initial sniffing - // process, DefaultSnifferTimeoutStartup is used. - DefaultSnifferTimeout = 2 * time.Second - - // DefaultMaxRetries is the number of retries for a single request after - // Elastic will give up and return an error. It is zero by default, so - // retry is disabled by default. - DefaultMaxRetries = 0 - - // DefaultSendGetBodyAs is the HTTP method to use when elastic is sending - // a GET request with a body. - DefaultSendGetBodyAs = "GET" - - // DefaultGzipEnabled specifies if gzip compression is enabled by default. - DefaultGzipEnabled = false - - // off is used to disable timeouts. - off = -1 * time.Second -) - -var ( - // ErrNoClient is raised when no Elasticsearch node is available. - ErrNoClient = errors.New("no Elasticsearch node available") - - // ErrRetry is raised when a request cannot be executed after the configured - // number of retries. - ErrRetry = errors.New("cannot connect after several retries") - - // ErrTimeout is raised when a request timed out, e.g. when WaitForStatus - // didn't return in time. - ErrTimeout = errors.New("timeout") -) - -// ClientOptionFunc is a function that configures a Client. -// It is used in NewClient. -type ClientOptionFunc func(*Client) error - -// Client is an Elasticsearch client. Create one by calling NewClient. -type Client struct { - c *http.Client // net/http Client to use for requests - - connsMu sync.RWMutex // connsMu guards the next block - conns []*conn // all connections - cindex int // index into conns - - mu sync.RWMutex // guards the next block - urls []string // set of URLs passed initially to the client - running bool // true if the client's background processes are running - errorlog Logger // error log for critical messages - infolog Logger // information log for e.g. response times - tracelog Logger // trace log for debugging - maxRetries int // max. number of retries - scheme string // http or https - healthcheckEnabled bool // healthchecks enabled or disabled - healthcheckTimeoutStartup time.Duration // time the healthcheck waits for a response from Elasticsearch on startup - healthcheckTimeout time.Duration // time the healthcheck waits for a response from Elasticsearch - healthcheckInterval time.Duration // interval between healthchecks - healthcheckStop chan bool // notify healthchecker to stop, and notify back - snifferEnabled bool // sniffer enabled or disabled - snifferTimeoutStartup time.Duration // time the sniffer waits for a response from nodes info API on startup - snifferTimeout time.Duration // time the sniffer waits for a response from nodes info API - snifferInterval time.Duration // interval between sniffing - snifferStop chan bool // notify sniffer to stop, and notify back - decoder Decoder // used to decode data sent from Elasticsearch - basicAuth bool // indicates whether to send HTTP Basic Auth credentials - basicAuthUsername string // username for HTTP Basic Auth - basicAuthPassword string // password for HTTP Basic Auth - sendGetBodyAs string // override for when sending a GET with a body - requiredPlugins []string // list of required plugins - gzipEnabled bool // gzip compression enabled or disabled (default) -} - -// NewClient creates a new client to work with Elasticsearch. -// -// NewClient, by default, is meant to be long-lived and shared across -// your application. If you need a short-lived client, e.g. for request-scope, -// consider using NewSimpleClient instead. -// -// The caller can configure the new client by passing configuration options -// to the func. -// -// Example: -// -// client, err := elastic.NewClient( -// elastic.SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"), -// elastic.SetMaxRetries(10), -// elastic.SetBasicAuth("user", "secret")) -// -// If no URL is configured, Elastic uses DefaultURL by default. -// -// If the sniffer is enabled (the default), the new client then sniffes -// the cluster via the Nodes Info API -// (see http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cluster-nodes-info.html#cluster-nodes-info). -// It uses the URLs specified by the caller. The caller is responsible -// to only pass a list of URLs of nodes that belong to the same cluster. -// This sniffing process is run on startup and periodically. -// Use SnifferInterval to set the interval between two sniffs (default is -// 15 minutes). In other words: By default, the client will find new nodes -// in the cluster and remove those that are no longer available every -// 15 minutes. Disable the sniffer by passing SetSniff(false) to NewClient. -// -// The list of nodes found in the sniffing process will be used to make -// connections to the REST API of Elasticsearch. These nodes are also -// periodically checked in a shorter time frame. This process is called -// a health check. By default, a health check is done every 60 seconds. -// You can set a shorter or longer interval by SetHealthcheckInterval. -// Disabling health checks is not recommended, but can be done by -// SetHealthcheck(false). -// -// Connections are automatically marked as dead or healthy while -// making requests to Elasticsearch. When a request fails, Elastic will -// retry up to a maximum number of retries configured with SetMaxRetries. -// Retries are disabled by default. -// -// If no HttpClient is configured, then http.DefaultClient is used. -// You can use your own http.Client with some http.Transport for -// advanced scenarios. -// -// An error is also returned when some configuration option is invalid or -// the new client cannot sniff the cluster (if enabled). -func NewClient(options ...ClientOptionFunc) (*Client, error) { - // Set up the client - c := &Client{ - c: http.DefaultClient, - conns: make([]*conn, 0), - cindex: -1, - scheme: DefaultScheme, - decoder: &DefaultDecoder{}, - maxRetries: DefaultMaxRetries, - healthcheckEnabled: DefaultHealthcheckEnabled, - healthcheckTimeoutStartup: DefaultHealthcheckTimeoutStartup, - healthcheckTimeout: DefaultHealthcheckTimeout, - healthcheckInterval: DefaultHealthcheckInterval, - healthcheckStop: make(chan bool), - snifferEnabled: DefaultSnifferEnabled, - snifferTimeoutStartup: DefaultSnifferTimeoutStartup, - snifferTimeout: DefaultSnifferTimeout, - snifferInterval: DefaultSnifferInterval, - snifferStop: make(chan bool), - sendGetBodyAs: DefaultSendGetBodyAs, - gzipEnabled: DefaultGzipEnabled, - } - - // Run the options on it - for _, option := range options { - if err := option(c); err != nil { - return nil, err - } - } - - if len(c.urls) == 0 { - c.urls = []string{DefaultURL} - } - c.urls = canonicalize(c.urls...) - - // Check if we can make a request to any of the specified URLs - if c.healthcheckEnabled { - if err := c.startupHealthcheck(c.healthcheckTimeoutStartup); err != nil { - return nil, err - } - } - - if c.snifferEnabled { - // Sniff the cluster initially - if err := c.sniff(c.snifferTimeoutStartup); err != nil { - return nil, err - } - } else { - // Do not sniff the cluster initially. Use the provided URLs instead. - for _, url := range c.urls { - c.conns = append(c.conns, newConn(url, url)) - } - } - - if c.healthcheckEnabled { - // Perform an initial health check - c.healthcheck(c.healthcheckTimeoutStartup, true) - } - // Ensure that we have at least one connection available - if err := c.mustActiveConn(); err != nil { - return nil, err - } - - // Check the required plugins - for _, plugin := range c.requiredPlugins { - found, err := c.HasPlugin(plugin) - if err != nil { - return nil, err - } - if !found { - return nil, fmt.Errorf("elastic: plugin %s not found", plugin) - } - } - - if c.snifferEnabled { - go c.sniffer() // periodically update cluster information - } - if c.healthcheckEnabled { - go c.healthchecker() // start goroutine periodically ping all nodes of the cluster - } - - c.mu.Lock() - c.running = true - c.mu.Unlock() - - return c, nil -} - -// NewSimpleClient creates a new short-lived Client that can be used in -// use cases where you need e.g. one client per request. -// -// While NewClient by default sets up e.g. periodic health checks -// and sniffing for new nodes in separate goroutines, NewSimpleClient does -// not and is meant as a simple replacement where you don't need all the -// heavy lifting of NewClient. -// -// NewSimpleClient does the following by default: First, all health checks -// are disabled, including timeouts and periodic checks. Second, sniffing -// is disabled, including timeouts and periodic checks. The number of retries -// is set to 1. NewSimpleClient also does not start any goroutines. -// -// Notice that you can still override settings by passing additional options, -// just like with NewClient. -func NewSimpleClient(options ...ClientOptionFunc) (*Client, error) { - c := &Client{ - c: http.DefaultClient, - conns: make([]*conn, 0), - cindex: -1, - scheme: DefaultScheme, - decoder: &DefaultDecoder{}, - maxRetries: 1, - healthcheckEnabled: false, - healthcheckTimeoutStartup: off, - healthcheckTimeout: off, - healthcheckInterval: off, - healthcheckStop: make(chan bool), - snifferEnabled: false, - snifferTimeoutStartup: off, - snifferTimeout: off, - snifferInterval: off, - snifferStop: make(chan bool), - sendGetBodyAs: DefaultSendGetBodyAs, - gzipEnabled: DefaultGzipEnabled, - } - - // Run the options on it - for _, option := range options { - if err := option(c); err != nil { - return nil, err - } - } - - if len(c.urls) == 0 { - c.urls = []string{DefaultURL} - } - c.urls = canonicalize(c.urls...) - - for _, url := range c.urls { - c.conns = append(c.conns, newConn(url, url)) - } - - // Ensure that we have at least one connection available - if err := c.mustActiveConn(); err != nil { - return nil, err - } - - // Check the required plugins - for _, plugin := range c.requiredPlugins { - found, err := c.HasPlugin(plugin) - if err != nil { - return nil, err - } - if !found { - return nil, fmt.Errorf("elastic: plugin %s not found", plugin) - } - } - - c.mu.Lock() - c.running = true - c.mu.Unlock() - - return c, nil -} - -// SetHttpClient can be used to specify the http.Client to use when making -// HTTP requests to Elasticsearch. -func SetHttpClient(httpClient *http.Client) ClientOptionFunc { - return func(c *Client) error { - if httpClient != nil { - c.c = httpClient - } else { - c.c = http.DefaultClient - } - return nil - } -} - -// SetBasicAuth can be used to specify the HTTP Basic Auth credentials to -// use when making HTTP requests to Elasticsearch. -func SetBasicAuth(username, password string) ClientOptionFunc { - return func(c *Client) error { - c.basicAuthUsername = username - c.basicAuthPassword = password - c.basicAuth = c.basicAuthUsername != "" || c.basicAuthPassword != "" - return nil - } -} - -// SetURL defines the URL endpoints of the Elasticsearch nodes. Notice that -// when sniffing is enabled, these URLs are used to initially sniff the -// cluster on startup. -func SetURL(urls ...string) ClientOptionFunc { - return func(c *Client) error { - switch len(urls) { - case 0: - c.urls = []string{DefaultURL} - default: - c.urls = urls - } - return nil - } -} - -// SetScheme sets the HTTP scheme to look for when sniffing (http or https). -// This is http by default. -func SetScheme(scheme string) ClientOptionFunc { - return func(c *Client) error { - c.scheme = scheme - return nil - } -} - -// SetSniff enables or disables the sniffer (enabled by default). -func SetSniff(enabled bool) ClientOptionFunc { - return func(c *Client) error { - c.snifferEnabled = enabled - return nil - } -} - -// SetSnifferTimeoutStartup sets the timeout for the sniffer that is used -// when creating a new client. The default is 5 seconds. Notice that the -// timeout being used for subsequent sniffing processes is set with -// SetSnifferTimeout. -func SetSnifferTimeoutStartup(timeout time.Duration) ClientOptionFunc { - return func(c *Client) error { - c.snifferTimeoutStartup = timeout - return nil - } -} - -// SetSnifferTimeout sets the timeout for the sniffer that finds the -// nodes in a cluster. The default is 2 seconds. Notice that the timeout -// used when creating a new client on startup is usually greater and can -// be set with SetSnifferTimeoutStartup. -func SetSnifferTimeout(timeout time.Duration) ClientOptionFunc { - return func(c *Client) error { - c.snifferTimeout = timeout - return nil - } -} - -// SetSnifferInterval sets the interval between two sniffing processes. -// The default interval is 15 minutes. -func SetSnifferInterval(interval time.Duration) ClientOptionFunc { - return func(c *Client) error { - c.snifferInterval = interval - return nil - } -} - -// SetHealthcheck enables or disables healthchecks (enabled by default). -func SetHealthcheck(enabled bool) ClientOptionFunc { - return func(c *Client) error { - c.healthcheckEnabled = enabled - return nil - } -} - -// SetHealthcheckTimeoutStartup sets the timeout for the initial health check. -// The default timeout is 5 seconds (see DefaultHealthcheckTimeoutStartup). -// Notice that timeouts for subsequent health checks can be modified with -// SetHealthcheckTimeout. -func SetHealthcheckTimeoutStartup(timeout time.Duration) ClientOptionFunc { - return func(c *Client) error { - c.healthcheckTimeoutStartup = timeout - return nil - } -} - -// SetHealthcheckTimeout sets the timeout for periodic health checks. -// The default timeout is 1 second (see DefaultHealthcheckTimeout). -// Notice that a different (usually larger) timeout is used for the initial -// healthcheck, which is initiated while creating a new client. -// The startup timeout can be modified with SetHealthcheckTimeoutStartup. -func SetHealthcheckTimeout(timeout time.Duration) ClientOptionFunc { - return func(c *Client) error { - c.healthcheckTimeout = timeout - return nil - } -} - -// SetHealthcheckInterval sets the interval between two health checks. -// The default interval is 60 seconds. -func SetHealthcheckInterval(interval time.Duration) ClientOptionFunc { - return func(c *Client) error { - c.healthcheckInterval = interval - return nil - } -} - -// SetMaxRetries sets the maximum number of retries before giving up when -// performing a HTTP request to Elasticsearch. -func SetMaxRetries(maxRetries int) ClientOptionFunc { - return func(c *Client) error { - if maxRetries < 0 { - return errors.New("MaxRetries must be greater than or equal to 0") - } - c.maxRetries = maxRetries - return nil - } -} - -// SetGzip enables or disables gzip compression (disabled by default). -func SetGzip(enabled bool) ClientOptionFunc { - return func(c *Client) error { - c.gzipEnabled = enabled - return nil - } -} - -// SetDecoder sets the Decoder to use when decoding data from Elasticsearch. -// DefaultDecoder is used by default. -func SetDecoder(decoder Decoder) ClientOptionFunc { - return func(c *Client) error { - if decoder != nil { - c.decoder = decoder - } else { - c.decoder = &DefaultDecoder{} - } - return nil - } -} - -// SetRequiredPlugins can be used to indicate that some plugins are required -// before a Client will be created. -func SetRequiredPlugins(plugins ...string) ClientOptionFunc { - return func(c *Client) error { - if c.requiredPlugins == nil { - c.requiredPlugins = make([]string, 0) - } - c.requiredPlugins = append(c.requiredPlugins, plugins...) - return nil - } -} - -// SetErrorLog sets the logger for critical messages like nodes joining -// or leaving the cluster or failing requests. It is nil by default. -func SetErrorLog(logger Logger) ClientOptionFunc { - return func(c *Client) error { - c.errorlog = logger - return nil - } -} - -// SetInfoLog sets the logger for informational messages, e.g. requests -// and their response times. It is nil by default. -func SetInfoLog(logger Logger) ClientOptionFunc { - return func(c *Client) error { - c.infolog = logger - return nil - } -} - -// SetTraceLog specifies the log.Logger to use for output of HTTP requests -// and responses which is helpful during debugging. It is nil by default. -func SetTraceLog(logger Logger) ClientOptionFunc { - return func(c *Client) error { - c.tracelog = logger - return nil - } -} - -// SendGetBodyAs specifies the HTTP method to use when sending a GET request -// with a body. It is GET by default. -func SetSendGetBodyAs(httpMethod string) ClientOptionFunc { - return func(c *Client) error { - c.sendGetBodyAs = httpMethod - return nil - } -} - -// String returns a string representation of the client status. -func (c *Client) String() string { - c.connsMu.Lock() - conns := c.conns - c.connsMu.Unlock() - - var buf bytes.Buffer - for i, conn := range conns { - if i > 0 { - buf.WriteString(", ") - } - buf.WriteString(conn.String()) - } - return buf.String() -} - -// IsRunning returns true if the background processes of the client are -// running, false otherwise. -func (c *Client) IsRunning() bool { - c.mu.RLock() - defer c.mu.RUnlock() - return c.running -} - -// Start starts the background processes like sniffing the cluster and -// periodic health checks. You don't need to run Start when creating a -// client with NewClient; the background processes are run by default. -// -// If the background processes are already running, this is a no-op. -func (c *Client) Start() { - c.mu.RLock() - if c.running { - c.mu.RUnlock() - return - } - c.mu.RUnlock() - - if c.snifferEnabled { - go c.sniffer() - } - if c.healthcheckEnabled { - go c.healthchecker() - } - - c.mu.Lock() - c.running = true - c.mu.Unlock() - - c.infof("elastic: client started") -} - -// Stop stops the background processes that the client is running, -// i.e. sniffing the cluster periodically and running health checks -// on the nodes. -// -// If the background processes are not running, this is a no-op. -func (c *Client) Stop() { - c.mu.RLock() - if !c.running { - c.mu.RUnlock() - return - } - c.mu.RUnlock() - - if c.healthcheckEnabled { - c.healthcheckStop <- true - <-c.healthcheckStop - } - - if c.snifferEnabled { - c.snifferStop <- true - <-c.snifferStop - } - - c.mu.Lock() - c.running = false - c.mu.Unlock() - - c.infof("elastic: client stopped") -} - -// errorf logs to the error log. -func (c *Client) errorf(format string, args ...interface{}) { - if c.errorlog != nil { - c.errorlog.Printf(format, args...) - } -} - -// infof logs informational messages. -func (c *Client) infof(format string, args ...interface{}) { - if c.infolog != nil { - c.infolog.Printf(format, args...) - } -} - -// tracef logs to the trace log. -func (c *Client) tracef(format string, args ...interface{}) { - if c.tracelog != nil { - c.tracelog.Printf(format, args...) - } -} - -// dumpRequest dumps the given HTTP request to the trace log. -func (c *Client) dumpRequest(r *http.Request) { - if c.tracelog != nil { - out, err := httputil.DumpRequestOut(r, true) - if err == nil { - c.tracef("%s\n", string(out)) - } - } -} - -// dumpResponse dumps the given HTTP response to the trace log. -func (c *Client) dumpResponse(resp *http.Response) { - if c.tracelog != nil { - out, err := httputil.DumpResponse(resp, true) - if err == nil { - c.tracef("%s\n", string(out)) - } - } -} - -// sniffer periodically runs sniff. -func (c *Client) sniffer() { - for { - c.mu.RLock() - timeout := c.snifferTimeout - ticker := time.After(c.snifferInterval) - c.mu.RUnlock() - - select { - case <-c.snifferStop: - // we are asked to stop, so we signal back that we're stopping now - c.snifferStop <- true - return - case <-ticker: - c.sniff(timeout) - } - } -} - -// sniff uses the Node Info API to return the list of nodes in the cluster. -// It uses the list of URLs passed on startup plus the list of URLs found -// by the preceding sniffing process (if sniffing is enabled). -// -// If sniffing is disabled, this is a no-op. -func (c *Client) sniff(timeout time.Duration) error { - c.mu.RLock() - if !c.snifferEnabled { - c.mu.RUnlock() - return nil - } - - // Use all available URLs provided to sniff the cluster. - urlsMap := make(map[string]bool) - urls := make([]string, 0) - - // Add all URLs provided on startup - for _, url := range c.urls { - urlsMap[url] = true - urls = append(urls, url) - } - c.mu.RUnlock() - - // Add all URLs found by sniffing - c.connsMu.RLock() - for _, conn := range c.conns { - if !conn.IsDead() { - url := conn.URL() - if _, found := urlsMap[url]; !found { - urls = append(urls, url) - } - } - } - c.connsMu.RUnlock() - - if len(urls) == 0 { - return ErrNoClient - } - - // Start sniffing on all found URLs - ch := make(chan []*conn, len(urls)) - for _, url := range urls { - go func(url string) { ch <- c.sniffNode(url) }(url) - } - - // Wait for the results to come back, or the process times out. - for { - select { - case conns := <-ch: - if len(conns) > 0 { - c.updateConns(conns) - return nil - } - case <-time.After(timeout): - // We get here if no cluster responds in time - return ErrNoClient - } - } -} - -// reSniffHostAndPort is used to extract hostname and port from a result -// from a Nodes Info API (example: "inet[/127.0.0.1:9200]"). -var reSniffHostAndPort = regexp.MustCompile(`\/([^:]*):([0-9]+)\]`) - -// sniffNode sniffs a single node. This method is run as a goroutine -// in sniff. If successful, it returns the list of node URLs extracted -// from the result of calling Nodes Info API. Otherwise, an empty array -// is returned. -func (c *Client) sniffNode(url string) []*conn { - nodes := make([]*conn, 0) - - // Call the Nodes Info API at /_nodes/http - req, err := NewRequest("GET", url+"/_nodes/http") - if err != nil { - return nodes - } - - c.mu.RLock() - if c.basicAuth { - req.SetBasicAuth(c.basicAuthUsername, c.basicAuthPassword) - } - c.mu.RUnlock() - - res, err := c.c.Do((*http.Request)(req)) - if err != nil { - return nodes - } - if res == nil { - return nodes - } - - if res.Body != nil { - defer res.Body.Close() - } - - var info NodesInfoResponse - if err := json.NewDecoder(res.Body).Decode(&info); err == nil { - if len(info.Nodes) > 0 { - switch c.scheme { - case "https": - for nodeID, node := range info.Nodes { - if strings.HasPrefix(node.HTTPSAddress, "inet") { - m := reSniffHostAndPort.FindStringSubmatch(node.HTTPSAddress) - if len(m) == 3 { - url := fmt.Sprintf("https://%s:%s", m[1], m[2]) - nodes = append(nodes, newConn(nodeID, url)) - } - } else { - url := fmt.Sprintf("https://%s", node.HTTPSAddress) - nodes = append(nodes, newConn(nodeID, url)) - } - } - default: - for nodeID, node := range info.Nodes { - if strings.HasPrefix(node.HTTPAddress, "inet") { - m := reSniffHostAndPort.FindStringSubmatch(node.HTTPAddress) - if len(m) == 3 { - url := fmt.Sprintf("http://%s:%s", m[1], m[2]) - nodes = append(nodes, newConn(nodeID, url)) - } - } else { - url := fmt.Sprintf("http://%s", node.HTTPAddress) - nodes = append(nodes, newConn(nodeID, url)) - } - } - } - } - } - return nodes -} - -// updateConns updates the clients' connections with new information -// gather by a sniff operation. -func (c *Client) updateConns(conns []*conn) { - c.connsMu.Lock() - - newConns := make([]*conn, 0) - - // Build up new connections: - // If we find an existing connection, use that (including no. of failures etc.). - // If we find a new connection, add it. - for _, conn := range conns { - var found bool - for _, oldConn := range c.conns { - if oldConn.NodeID() == conn.NodeID() { - // Take over the old connection - newConns = append(newConns, oldConn) - found = true - break - } - } - if !found { - // New connection didn't exist, so add it to our list of new conns. - c.errorf("elastic: %s joined the cluster", conn.URL()) - newConns = append(newConns, conn) - } - } - - c.conns = newConns - c.cindex = -1 - c.connsMu.Unlock() -} - -// healthchecker periodically runs healthcheck. -func (c *Client) healthchecker() { - for { - c.mu.RLock() - timeout := c.healthcheckTimeout - ticker := time.After(c.healthcheckInterval) - c.mu.RUnlock() - - select { - case <-c.healthcheckStop: - // we are asked to stop, so we signal back that we're stopping now - c.healthcheckStop <- true - return - case <-ticker: - c.healthcheck(timeout, false) - } - } -} - -// healthcheck does a health check on all nodes in the cluster. Depending on -// the node state, it marks connections as dead, sets them alive etc. -// If healthchecks are disabled and force is false, this is a no-op. -// The timeout specifies how long to wait for a response from Elasticsearch. -func (c *Client) healthcheck(timeout time.Duration, force bool) { - c.mu.RLock() - if !c.healthcheckEnabled && !force { - c.mu.RUnlock() - return - } - basicAuth := c.basicAuth - basicAuthUsername := c.basicAuthUsername - basicAuthPassword := c.basicAuthPassword - c.mu.RUnlock() - - c.connsMu.RLock() - conns := c.conns - c.connsMu.RUnlock() - - timeoutInMillis := int64(timeout / time.Millisecond) - - for _, conn := range conns { - params := make(url.Values) - params.Set("timeout", fmt.Sprintf("%dms", timeoutInMillis)) - req, err := NewRequest("HEAD", conn.URL()+"/?"+params.Encode()) - if err == nil { - if basicAuth { - req.SetBasicAuth(basicAuthUsername, basicAuthPassword) - } - res, err := c.c.Do((*http.Request)(req)) - if err == nil { - if res.Body != nil { - defer res.Body.Close() - } - if res.StatusCode >= 200 && res.StatusCode < 300 { - conn.MarkAsAlive() - } else { - conn.MarkAsDead() - c.errorf("elastic: %s is dead [status=%d]", conn.URL(), res.StatusCode) - } - } else { - c.errorf("elastic: %s is dead", conn.URL()) - conn.MarkAsDead() - } - } else { - c.errorf("elastic: %s is dead", conn.URL()) - conn.MarkAsDead() - } - } -} - -// startupHealthcheck is used at startup to check if the server is available -// at all. -func (c *Client) startupHealthcheck(timeout time.Duration) error { - c.mu.Lock() - urls := c.urls - basicAuth := c.basicAuth - basicAuthUsername := c.basicAuthUsername - basicAuthPassword := c.basicAuthPassword - c.mu.Unlock() - - // If we don't get a connection after "timeout", we bail. - start := time.Now() - for { - cl := &http.Client{Timeout: timeout} - for _, url := range urls { - req, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return err - } - if basicAuth { - req.SetBasicAuth(basicAuthUsername, basicAuthPassword) - } - res, err := cl.Do(req) - if err == nil && res != nil && res.StatusCode >= 200 && res.StatusCode < 300 { - return nil - } - } - time.Sleep(1 * time.Second) - if time.Now().Sub(start) > timeout { - break - } - } - return ErrNoClient -} - -// next returns the next available connection, or ErrNoClient. -func (c *Client) next() (*conn, error) { - // We do round-robin here. - // TODO(oe) This should be a pluggable strategy, like the Selector in the official clients. - c.connsMu.Lock() - defer c.connsMu.Unlock() - - i := 0 - numConns := len(c.conns) - for { - i += 1 - if i > numConns { - break // we visited all conns: they all seem to be dead - } - c.cindex += 1 - if c.cindex >= numConns { - c.cindex = 0 - } - conn := c.conns[c.cindex] - if !conn.IsDead() { - return conn, nil - } - } - - // We have a deadlock here: All nodes are marked as dead. - // If sniffing is disabled, connections will never be marked alive again. - // So we are marking them as alive--if sniffing is disabled. - // They'll then be picked up in the next call to PerformRequest. - if !c.snifferEnabled { - c.errorf("elastic: all %d nodes marked as dead; resurrecting them to prevent deadlock", len(c.conns)) - for _, conn := range c.conns { - conn.MarkAsAlive() - } - } - - // We tried hard, but there is no node available - return nil, ErrNoClient -} - -// mustActiveConn returns nil if there is an active connection, -// otherwise ErrNoClient is returned. -func (c *Client) mustActiveConn() error { - c.connsMu.Lock() - defer c.connsMu.Unlock() - - for _, c := range c.conns { - if !c.IsDead() { - return nil - } - } - return ErrNoClient -} - -// PerformRequest does a HTTP request to Elasticsearch. -// It returns a response and an error on failure. -// -// Optionally, a list of HTTP error codes to ignore can be passed. -// This is necessary for services that expect e.g. HTTP status 404 as a -// valid outcome (Exists, IndicesExists, IndicesTypeExists). -func (c *Client) PerformRequest(method, path string, params url.Values, body interface{}, ignoreErrors ...int) (*Response, error) { - start := time.Now().UTC() - - c.mu.RLock() - timeout := c.healthcheckTimeout - retries := c.maxRetries - basicAuth := c.basicAuth - basicAuthUsername := c.basicAuthUsername - basicAuthPassword := c.basicAuthPassword - sendGetBodyAs := c.sendGetBodyAs - gzipEnabled := c.gzipEnabled - c.mu.RUnlock() - - var err error - var conn *conn - var req *Request - var resp *Response - var retried bool - - // We wait between retries, using simple exponential back-off. - // TODO: Make this configurable, including the jitter. - retryWaitMsec := int64(100 + (rand.Intn(20) - 10)) - - // Change method if sendGetBodyAs is specified. - if method == "GET" && body != nil && sendGetBodyAs != "GET" { - method = sendGetBodyAs - } - - for { - pathWithParams := path - if len(params) > 0 { - pathWithParams += "?" + params.Encode() - } - - // Get a connection - conn, err = c.next() - if err == ErrNoClient { - if !retried { - // Force a healtcheck as all connections seem to be dead. - c.healthcheck(timeout, false) - } - retries -= 1 - if retries <= 0 { - return nil, err - } - retried = true - time.Sleep(time.Duration(retryWaitMsec) * time.Millisecond) - retryWaitMsec += retryWaitMsec - continue // try again - } - if err != nil { - c.errorf("elastic: cannot get connection from pool") - return nil, err - } - - req, err = NewRequest(method, conn.URL()+pathWithParams) - if err != nil { - c.errorf("elastic: cannot create request for %s %s: %v", strings.ToUpper(method), conn.URL()+pathWithParams, err) - return nil, err - } - - if basicAuth { - req.SetBasicAuth(basicAuthUsername, basicAuthPassword) - } - - // Set body - if body != nil { - err = req.SetBody(body, gzipEnabled) - if err != nil { - c.errorf("elastic: couldn't set body %+v for request: %v", body, err) - return nil, err - } - } - - // Tracing - c.dumpRequest((*http.Request)(req)) - - // Get response - res, err := c.c.Do((*http.Request)(req)) - if err != nil { - retries -= 1 - if retries <= 0 { - c.errorf("elastic: %s is dead", conn.URL()) - conn.MarkAsDead() - return nil, err - } - retried = true - time.Sleep(time.Duration(retryWaitMsec) * time.Millisecond) - retryWaitMsec += retryWaitMsec - continue // try again - } - if res.Body != nil { - defer res.Body.Close() - } - - // Check for errors - if err := checkResponse((*http.Request)(req), res, ignoreErrors...); err != nil { - // No retry if request succeeded - return nil, err - } - - // Tracing - c.dumpResponse(res) - - // We successfully made a request with this connection - conn.MarkAsHealthy() - - resp, err = c.newResponse(res) - if err != nil { - return nil, err - } - - break - } - - duration := time.Now().UTC().Sub(start) - c.infof("%s %s [status:%d, request:%.3fs]", - strings.ToUpper(method), - req.URL, - resp.StatusCode, - float64(int64(duration/time.Millisecond))/1000) - - return resp, nil -} - -// -- Document APIs -- - -// Index a document. -func (c *Client) Index() *IndexService { - return NewIndexService(c) -} - -// Get a document. -func (c *Client) Get() *GetService { - return NewGetService(c) -} - -// MultiGet retrieves multiple documents in one roundtrip. -func (c *Client) MultiGet() *MgetService { - return NewMgetService(c) -} - -// Mget retrieves multiple documents in one roundtrip. -func (c *Client) Mget() *MgetService { - return NewMgetService(c) -} - -// Delete a document. -func (c *Client) Delete() *DeleteService { - return NewDeleteService(c) -} - -// DeleteByQuery deletes documents as found by a query. -func (c *Client) DeleteByQuery(indices ...string) *DeleteByQueryService { - return NewDeleteByQueryService(c).Index(indices...) -} - -// Update a document. -func (c *Client) Update() *UpdateService { - return NewUpdateService(c) -} - -// Bulk is the entry point to mass insert/update/delete documents. -func (c *Client) Bulk() *BulkService { - return NewBulkService(c) -} - -// BulkProcessor allows setting up a concurrent processor of bulk requests. -func (c *Client) BulkProcessor() *BulkProcessorService { - return NewBulkProcessorService(c) -} - -// TODO Term Vectors -// TODO Multi termvectors API - -// -- Search APIs -- - -// Search is the entry point for searches. -func (c *Client) Search(indices ...string) *SearchService { - return NewSearchService(c).Index(indices...) -} - -// Suggest returns a service to return suggestions. -func (c *Client) Suggest(indices ...string) *SuggestService { - return NewSuggestService(c).Index(indices...) -} - -// MultiSearch is the entry point for multi searches. -func (c *Client) MultiSearch() *MultiSearchService { - return NewMultiSearchService(c) -} - -// Count documents. -func (c *Client) Count(indices ...string) *CountService { - return NewCountService(c).Index(indices...) -} - -// Explain computes a score explanation for a query and a specific document. -func (c *Client) Explain(index, typ, id string) *ExplainService { - return NewExplainService(c).Index(index).Type(typ).Id(id) -} - -// Percolate allows to send a document and return matching queries. -// See http://www.elastic.co/guide/en/elasticsearch/reference/current/search-percolate.html. -func (c *Client) Percolate() *PercolateService { - return NewPercolateService(c) -} - -// TODO Search Template -// TODO Search Shards API -// TODO Search Exists API -// TODO Validate API -// TODO Field Stats API - -// Exists checks if a document exists. -func (c *Client) Exists() *ExistsService { - return NewExistsService(c) -} - -// Scan through documents. Use this to iterate inside a server process -// where the results will be processed without returning them to a client. -func (c *Client) Scan(indices ...string) *ScanService { - return NewScanService(c).Index(indices...) -} - -// Scroll through documents. Use this to efficiently scroll through results -// while returning the results to a client. Use Scan when you don't need -// to return requests to a client (i.e. not paginating via request/response). -func (c *Client) Scroll(indices ...string) *ScrollService { - return NewScrollService(c).Index(indices...) -} - -// ClearScroll can be used to clear search contexts manually. -func (c *Client) ClearScroll(scrollIds ...string) *ClearScrollService { - return NewClearScrollService(c).ScrollId(scrollIds...) -} - -// -- Indices APIs -- - -// CreateIndex returns a service to create a new index. -func (c *Client) CreateIndex(name string) *IndicesCreateService { - return NewIndicesCreateService(c).Index(name) -} - -// DeleteIndex returns a service to delete an index. -func (c *Client) DeleteIndex(indices ...string) *IndicesDeleteService { - return NewIndicesDeleteService(c).Index(indices) -} - -// IndexExists allows to check if an index exists. -func (c *Client) IndexExists(indices ...string) *IndicesExistsService { - return NewIndicesExistsService(c).Index(indices) -} - -// TypeExists allows to check if one or more types exist in one or more indices. -func (c *Client) TypeExists() *IndicesExistsTypeService { - return NewIndicesExistsTypeService(c) -} - -// IndexStats provides statistics on different operations happining -// in one or more indices. -func (c *Client) IndexStats(indices ...string) *IndicesStatsService { - return NewIndicesStatsService(c).Index(indices...) -} - -// OpenIndex opens an index. -func (c *Client) OpenIndex(name string) *IndicesOpenService { - return NewIndicesOpenService(c).Index(name) -} - -// CloseIndex closes an index. -func (c *Client) CloseIndex(name string) *IndicesCloseService { - return NewIndicesCloseService(c).Index(name) -} - -// IndexGet retrieves information about one or more indices. -// IndexGet is only available for Elasticsearch 1.4 or later. -func (c *Client) IndexGet(indices ...string) *IndicesGetService { - return NewIndicesGetService(c).Index(indices...) -} - -// IndexGetSettings retrieves settings of all, one or more indices. -func (c *Client) IndexGetSettings(indices ...string) *IndicesGetSettingsService { - return NewIndicesGetSettingsService(c).Index(indices...) -} - -// IndexPutSettings sets settings for all, one or more indices. -func (c *Client) IndexPutSettings(indices ...string) *IndicesPutSettingsService { - return NewIndicesPutSettingsService(c).Index(indices...) -} - -// Optimize asks Elasticsearch to optimize one or more indices. -// Optimize is deprecated as of Elasticsearch 2.1 and replaced by Forcemerge. -func (c *Client) Optimize(indices ...string) *OptimizeService { - return NewOptimizeService(c).Index(indices...) -} - -// Forcemerge optimizes one or more indices. -// It replaces the deprecated Optimize API. -func (c *Client) Forcemerge(indices ...string) *IndicesForcemergeService { - return NewIndicesForcemergeService(c).Index(indices...) -} - -// Refresh asks Elasticsearch to refresh one or more indices. -func (c *Client) Refresh(indices ...string) *RefreshService { - return NewRefreshService(c).Index(indices...) -} - -// Flush asks Elasticsearch to free memory from the index and -// flush data to disk. -func (c *Client) Flush(indices ...string) *IndicesFlushService { - return NewIndicesFlushService(c).Index(indices...) -} - -// Alias enables the caller to add and/or remove aliases. -func (c *Client) Alias() *AliasService { - return NewAliasService(c) -} - -// Aliases returns aliases by index name(s). -func (c *Client) Aliases() *AliasesService { - return NewAliasesService(c) -} - -// GetTemplate gets a search template. -// Use IndexXXXTemplate funcs to manage index templates. -func (c *Client) GetTemplate() *GetTemplateService { - return NewGetTemplateService(c) -} - -// PutTemplate creates or updates a search template. -// Use IndexXXXTemplate funcs to manage index templates. -func (c *Client) PutTemplate() *PutTemplateService { - return NewPutTemplateService(c) -} - -// DeleteTemplate deletes a search template. -// Use IndexXXXTemplate funcs to manage index templates. -func (c *Client) DeleteTemplate() *DeleteTemplateService { - return NewDeleteTemplateService(c) -} - -// IndexGetTemplate gets an index template. -// Use XXXTemplate funcs to manage search templates. -func (c *Client) IndexGetTemplate(names ...string) *IndicesGetTemplateService { - return NewIndicesGetTemplateService(c).Name(names...) -} - -// IndexTemplateExists gets check if an index template exists. -// Use XXXTemplate funcs to manage search templates. -func (c *Client) IndexTemplateExists(name string) *IndicesExistsTemplateService { - return NewIndicesExistsTemplateService(c).Name(name) -} - -// IndexPutTemplate creates or updates an index template. -// Use XXXTemplate funcs to manage search templates. -func (c *Client) IndexPutTemplate(name string) *IndicesPutTemplateService { - return NewIndicesPutTemplateService(c).Name(name) -} - -// IndexDeleteTemplate deletes an index template. -// Use XXXTemplate funcs to manage search templates. -func (c *Client) IndexDeleteTemplate(name string) *IndicesDeleteTemplateService { - return NewIndicesDeleteTemplateService(c).Name(name) -} - -// GetMapping gets a mapping. -func (c *Client) GetMapping() *IndicesGetMappingService { - return NewIndicesGetMappingService(c) -} - -// PutMapping registers a mapping. -func (c *Client) PutMapping() *IndicesPutMappingService { - return NewIndicesPutMappingService(c) -} - -// GetWarmer gets one or more warmers by name. -func (c *Client) GetWarmer() *IndicesGetWarmerService { - return NewIndicesGetWarmerService(c) -} - -// PutWarmer registers a warmer. -func (c *Client) PutWarmer() *IndicesPutWarmerService { - return NewIndicesPutWarmerService(c) -} - -// DeleteWarmer deletes one or more warmers. -func (c *Client) DeleteWarmer() *IndicesDeleteWarmerService { - return NewIndicesDeleteWarmerService(c) -} - -// -- cat APIs -- - -// TODO cat aliases -// TODO cat allocation -// TODO cat count -// TODO cat fielddata -// TODO cat health -// TODO cat indices -// TODO cat master -// TODO cat nodes -// TODO cat pending tasks -// TODO cat plugins -// TODO cat recovery -// TODO cat thread pool -// TODO cat shards -// TODO cat segments - -// -- Cluster APIs -- - -// ClusterHealth retrieves the health of the cluster. -func (c *Client) ClusterHealth() *ClusterHealthService { - return NewClusterHealthService(c) -} - -// ClusterState retrieves the state of the cluster. -func (c *Client) ClusterState() *ClusterStateService { - return NewClusterStateService(c) -} - -// ClusterStats retrieves cluster statistics. -func (c *Client) ClusterStats() *ClusterStatsService { - return NewClusterStatsService(c) -} - -// NodesInfo retrieves one or more or all of the cluster nodes information. -func (c *Client) NodesInfo() *NodesInfoService { - return NewNodesInfoService(c) -} - -// TODO Pending cluster tasks -// TODO Cluster Reroute -// TODO Cluster Update Settings -// TODO Nodes Stats -// TODO Nodes hot_threads - -// -- Snapshot and Restore -- - -// TODO Snapshot Create -// TODO Snapshot Create Repository -// TODO Snapshot Delete -// TODO Snapshot Delete Repository -// TODO Snapshot Get -// TODO Snapshot Get Repository -// TODO Snapshot Restore -// TODO Snapshot Status -// TODO Snapshot Verify Repository - -// -- Helpers and shortcuts -- - -// ElasticsearchVersion returns the version number of Elasticsearch -// running on the given URL. -func (c *Client) ElasticsearchVersion(url string) (string, error) { - res, _, err := c.Ping(url).Do() - if err != nil { - return "", err - } - return res.Version.Number, nil -} - -// IndexNames returns the names of all indices in the cluster. -func (c *Client) IndexNames() ([]string, error) { - res, err := c.IndexGetSettings().Index("_all").Do() - if err != nil { - return nil, err - } - var names []string - for name, _ := range res { - names = append(names, name) - } - return names, nil -} - -// Ping checks if a given node in a cluster exists and (optionally) -// returns some basic information about the Elasticsearch server, -// e.g. the Elasticsearch version number. -// -// Notice that you need to specify a URL here explicitly. -func (c *Client) Ping(url string) *PingService { - return NewPingService(c).URL(url) -} - -// Reindex returns a service that will reindex documents from a source -// index into a target index. See -// http://www.elastic.co/guide/en/elasticsearch/guide/current/reindex.html -// for more information about reindexing. -func (c *Client) Reindex(sourceIndex, targetIndex string) *Reindexer { - return NewReindexer(c, sourceIndex, CopyToTargetIndex(targetIndex)) -} - -// WaitForStatus waits for the cluster to have the given status. -// This is a shortcut method for the ClusterHealth service. -// -// WaitForStatus waits for the specified timeout, e.g. "10s". -// If the cluster will have the given state within the timeout, nil is returned. -// If the request timed out, ErrTimeout is returned. -func (c *Client) WaitForStatus(status string, timeout string) error { - health, err := c.ClusterHealth().WaitForStatus(status).Timeout(timeout).Do() - if err != nil { - return err - } - if health.TimedOut { - return ErrTimeout - } - return nil -} - -// WaitForGreenStatus waits for the cluster to have the "green" status. -// See WaitForStatus for more details. -func (c *Client) WaitForGreenStatus(timeout string) error { - return c.WaitForStatus("green", timeout) -} - -// WaitForYellowStatus waits for the cluster to have the "yellow" status. -// See WaitForStatus for more details. -func (c *Client) WaitForYellowStatus(timeout string) error { - return c.WaitForStatus("yellow", timeout) -} - -// TermVectors returns information and statistics on terms in the fields -// of a particular document. -func (c *Client) TermVectors(index, typ string) *TermvectorsService { - builder := NewTermvectorsService(c) - builder = builder.Index(index).Type(typ) - return builder -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/client_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/client_test.go deleted file mode 100644 index 7bdcd2287..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/client_test.go +++ /dev/null @@ -1,899 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "log" - "net/http" - "regexp" - "strings" - "testing" - "time" -) - -func findConn(s string, slice ...*conn) (int, bool) { - for i, t := range slice { - if s == t.URL() { - return i, true - } - } - return -1, false -} - -// -- NewClient -- - -func TestClientDefaults(t *testing.T) { - client, err := NewClient() - if err != nil { - t.Fatal(err) - } - if client.healthcheckEnabled != true { - t.Errorf("expected health checks to be enabled, got: %v", client.healthcheckEnabled) - } - if client.healthcheckTimeoutStartup != DefaultHealthcheckTimeoutStartup { - t.Errorf("expected health checks timeout on startup = %v, got: %v", DefaultHealthcheckTimeoutStartup, client.healthcheckTimeoutStartup) - } - if client.healthcheckTimeout != DefaultHealthcheckTimeout { - t.Errorf("expected health checks timeout = %v, got: %v", DefaultHealthcheckTimeout, client.healthcheckTimeout) - } - if client.healthcheckInterval != DefaultHealthcheckInterval { - t.Errorf("expected health checks interval = %v, got: %v", DefaultHealthcheckInterval, client.healthcheckInterval) - } - if client.snifferEnabled != true { - t.Errorf("expected sniffing to be enabled, got: %v", client.snifferEnabled) - } - if client.snifferTimeoutStartup != DefaultSnifferTimeoutStartup { - t.Errorf("expected sniffer timeout on startup = %v, got: %v", DefaultSnifferTimeoutStartup, client.snifferTimeoutStartup) - } - if client.snifferTimeout != DefaultSnifferTimeout { - t.Errorf("expected sniffer timeout = %v, got: %v", DefaultSnifferTimeout, client.snifferTimeout) - } - if client.snifferInterval != DefaultSnifferInterval { - t.Errorf("expected sniffer interval = %v, got: %v", DefaultSnifferInterval, client.snifferInterval) - } - if client.basicAuth != false { - t.Errorf("expected no basic auth; got: %v", client.basicAuth) - } - if client.basicAuthUsername != "" { - t.Errorf("expected no basic auth username; got: %q", client.basicAuthUsername) - } - if client.basicAuthPassword != "" { - t.Errorf("expected no basic auth password; got: %q", client.basicAuthUsername) - } - if client.sendGetBodyAs != "GET" { - t.Errorf("expected sendGetBodyAs to be GET; got: %q", client.sendGetBodyAs) - } -} - -func TestClientWithoutURL(t *testing.T) { - client, err := NewClient() - if err != nil { - t.Fatal(err) - } - // Two things should happen here: - // 1. The client starts sniffing the cluster on DefaultURL - // 2. The sniffing process should find (at least) one node in the cluster, i.e. the DefaultURL - if len(client.conns) == 0 { - t.Fatalf("expected at least 1 node in the cluster, got: %d (%v)", len(client.conns), client.conns) - } - if !isTravis() { - if _, found := findConn(DefaultURL, client.conns...); !found { - t.Errorf("expected to find node with default URL of %s in %v", DefaultURL, client.conns) - } - } -} - -func TestClientWithSingleURL(t *testing.T) { - client, err := NewClient(SetURL("http://127.0.0.1:9200")) - if err != nil { - t.Fatal(err) - } - // Two things should happen here: - // 1. The client starts sniffing the cluster on DefaultURL - // 2. The sniffing process should find (at least) one node in the cluster, i.e. the DefaultURL - if len(client.conns) == 0 { - t.Fatalf("expected at least 1 node in the cluster, got: %d (%v)", len(client.conns), client.conns) - } - if !isTravis() { - if _, found := findConn(DefaultURL, client.conns...); !found { - t.Errorf("expected to find node with default URL of %s in %v", DefaultURL, client.conns) - } - } -} - -func TestClientWithMultipleURLs(t *testing.T) { - client, err := NewClient(SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201")) - if err != nil { - t.Fatal(err) - } - // The client should sniff both URLs, but only 127.0.0.1:9200 should return nodes. - if len(client.conns) != 1 { - t.Fatalf("expected exactly 1 node in the local cluster, got: %d (%v)", len(client.conns), client.conns) - } - if !isTravis() { - if client.conns[0].URL() != DefaultURL { - t.Errorf("expected to find node with default URL of %s in %v", DefaultURL, client.conns) - } - } -} - -func TestClientWithBasicAuth(t *testing.T) { - client, err := NewClient(SetBasicAuth("user", "secret")) - if err != nil { - t.Fatal(err) - } - if client.basicAuth != true { - t.Errorf("expected basic auth; got: %v", client.basicAuth) - } - if got, want := client.basicAuthUsername, "user"; got != want { - t.Errorf("expected basic auth username %q; got: %q", want, got) - } - if got, want := client.basicAuthPassword, "secret"; got != want { - t.Errorf("expected basic auth password %q; got: %q", want, got) - } -} - -func TestClientSniffSuccess(t *testing.T) { - client, err := NewClient(SetURL("http://127.0.0.1:19200", "http://127.0.0.1:9200")) - if err != nil { - t.Fatal(err) - } - // The client should sniff both URLs, but only 127.0.0.1:9200 should return nodes. - if len(client.conns) != 1 { - t.Fatalf("expected exactly 1 node in the local cluster, got: %d (%v)", len(client.conns), client.conns) - } -} - -func TestClientSniffFailure(t *testing.T) { - _, err := NewClient(SetURL("http://127.0.0.1:19200", "http://127.0.0.1:19201")) - if err == nil { - t.Fatalf("expected cluster to fail with no nodes found") - } -} - -func TestClientSniffDisabled(t *testing.T) { - client, err := NewClient(SetSniff(false), SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201")) - if err != nil { - t.Fatal(err) - } - // The client should not sniff, so it should have two connections. - if len(client.conns) != 2 { - t.Fatalf("expected 2 nodes, got: %d (%v)", len(client.conns), client.conns) - } - // Make two requests, so that both connections are being used - for i := 0; i < len(client.conns); i++ { - client.Flush().Do() - } - // The first connection (127.0.0.1:9200) should now be okay. - if i, found := findConn("http://127.0.0.1:9200", client.conns...); !found { - t.Fatalf("expected connection to %q to be found", "http://127.0.0.1:9200") - } else { - if conn := client.conns[i]; conn.IsDead() { - t.Fatal("expected connection to be alive, but it is dead") - } - } - // The second connection (127.0.0.1:9201) should now be marked as dead. - if i, found := findConn("http://127.0.0.1:9201", client.conns...); !found { - t.Fatalf("expected connection to %q to be found", "http://127.0.0.1:9201") - } else { - if conn := client.conns[i]; !conn.IsDead() { - t.Fatal("expected connection to be dead, but it is alive") - } - } -} - -func TestClientWillMarkConnectionsAsAliveWhenAllAreDead(t *testing.T) { - client, err := NewClient(SetURL("http://127.0.0.1:9201"), - SetSniff(false), SetHealthcheck(false), SetMaxRetries(0)) - if err != nil { - t.Fatal(err) - } - // We should have a connection. - if len(client.conns) != 1 { - t.Fatalf("expected 1 node, got: %d (%v)", len(client.conns), client.conns) - } - - // Make a request, so that the connections is marked as dead. - client.Flush().Do() - - // The connection should now be marked as dead. - if i, found := findConn("http://127.0.0.1:9201", client.conns...); !found { - t.Fatalf("expected connection to %q to be found", "http://127.0.0.1:9201") - } else { - if conn := client.conns[i]; !conn.IsDead() { - t.Fatalf("expected connection to be dead, got: %v", conn) - } - } - - // Now send another request and the connection should be marked as alive again. - client.Flush().Do() - - if i, found := findConn("http://127.0.0.1:9201", client.conns...); !found { - t.Fatalf("expected connection to %q to be found", "http://127.0.0.1:9201") - } else { - if conn := client.conns[i]; conn.IsDead() { - t.Fatalf("expected connection to be alive, got: %v", conn) - } - } -} - -func TestClientWithRequiredPlugins(t *testing.T) { - _, err := NewClient(SetRequiredPlugins("no-such-plugin")) - if err == nil { - t.Fatal("expected error when creating client") - } - if got, want := err.Error(), "elastic: plugin no-such-plugin not found"; got != want { - t.Fatalf("expected error %q; got: %q", want, got) - } -} - -func TestClientHealthcheckStartupTimeout(t *testing.T) { - start := time.Now() - _, err := NewClient(SetURL("http://localhost:9299"), SetHealthcheckTimeoutStartup(5*time.Second)) - duration := time.Now().Sub(start) - if err != ErrNoClient { - t.Fatal(err) - } - if duration < 5*time.Second { - t.Fatalf("expected a timeout in more than 5 seconds; got: %v", duration) - } -} - -// -- NewSimpleClient -- - -func TestSimpleClientDefaults(t *testing.T) { - client, err := NewSimpleClient() - if err != nil { - t.Fatal(err) - } - if client.healthcheckEnabled != false { - t.Errorf("expected health checks to be disabled, got: %v", client.healthcheckEnabled) - } - if client.healthcheckTimeoutStartup != off { - t.Errorf("expected health checks timeout on startup = %v, got: %v", off, client.healthcheckTimeoutStartup) - } - if client.healthcheckTimeout != off { - t.Errorf("expected health checks timeout = %v, got: %v", off, client.healthcheckTimeout) - } - if client.healthcheckInterval != off { - t.Errorf("expected health checks interval = %v, got: %v", off, client.healthcheckInterval) - } - if client.snifferEnabled != false { - t.Errorf("expected sniffing to be disabled, got: %v", client.snifferEnabled) - } - if client.snifferTimeoutStartup != off { - t.Errorf("expected sniffer timeout on startup = %v, got: %v", off, client.snifferTimeoutStartup) - } - if client.snifferTimeout != off { - t.Errorf("expected sniffer timeout = %v, got: %v", off, client.snifferTimeout) - } - if client.snifferInterval != off { - t.Errorf("expected sniffer interval = %v, got: %v", off, client.snifferInterval) - } - if client.basicAuth != false { - t.Errorf("expected no basic auth; got: %v", client.basicAuth) - } - if client.basicAuthUsername != "" { - t.Errorf("expected no basic auth username; got: %q", client.basicAuthUsername) - } - if client.basicAuthPassword != "" { - t.Errorf("expected no basic auth password; got: %q", client.basicAuthUsername) - } - if client.sendGetBodyAs != "GET" { - t.Errorf("expected sendGetBodyAs to be GET; got: %q", client.sendGetBodyAs) - } -} - -// -- Start and stop -- - -func TestClientStartAndStop(t *testing.T) { - client, err := NewClient() - if err != nil { - t.Fatal(err) - } - - running := client.IsRunning() - if !running { - t.Fatalf("expected background processes to run; got: %v", running) - } - - // Stop - client.Stop() - running = client.IsRunning() - if running { - t.Fatalf("expected background processes to be stopped; got: %v", running) - } - - // Stop again => no-op - client.Stop() - running = client.IsRunning() - if running { - t.Fatalf("expected background processes to be stopped; got: %v", running) - } - - // Start - client.Start() - running = client.IsRunning() - if !running { - t.Fatalf("expected background processes to run; got: %v", running) - } - - // Start again => no-op - client.Start() - running = client.IsRunning() - if !running { - t.Fatalf("expected background processes to run; got: %v", running) - } -} - -func TestClientStartAndStopWithSnifferAndHealthchecksDisabled(t *testing.T) { - client, err := NewClient(SetSniff(false), SetHealthcheck(false)) - if err != nil { - t.Fatal(err) - } - - running := client.IsRunning() - if !running { - t.Fatalf("expected background processes to run; got: %v", running) - } - - // Stop - client.Stop() - running = client.IsRunning() - if running { - t.Fatalf("expected background processes to be stopped; got: %v", running) - } - - // Stop again => no-op - client.Stop() - running = client.IsRunning() - if running { - t.Fatalf("expected background processes to be stopped; got: %v", running) - } - - // Start - client.Start() - running = client.IsRunning() - if !running { - t.Fatalf("expected background processes to run; got: %v", running) - } - - // Start again => no-op - client.Start() - running = client.IsRunning() - if !running { - t.Fatalf("expected background processes to run; got: %v", running) - } -} - -// -- Sniffing -- - -func TestClientSniffNode(t *testing.T) { - client, err := NewClient() - if err != nil { - t.Fatal(err) - } - - ch := make(chan []*conn) - go func() { ch <- client.sniffNode(DefaultURL) }() - - select { - case nodes := <-ch: - if len(nodes) != 1 { - t.Fatalf("expected %d nodes; got: %d", 1, len(nodes)) - } - pattern := `http:\/\/[\d\.]+:9200` - matched, err := regexp.MatchString(pattern, nodes[0].URL()) - if err != nil { - t.Fatal(err) - } - if !matched { - t.Fatalf("expected node URL pattern %q; got: %q", pattern, nodes[0].URL()) - } - case <-time.After(2 * time.Second): - t.Fatal("expected no timeout in sniff node") - break - } -} - -func TestClientSniffOnDefaultURL(t *testing.T) { - client, _ := NewClient() - if client == nil { - t.Fatal("no client returned") - } - - ch := make(chan error, 1) - go func() { - ch <- client.sniff(DefaultSnifferTimeoutStartup) - }() - - select { - case err := <-ch: - if err != nil { - t.Fatalf("expected sniff to succeed; got: %v", err) - } - if len(client.conns) != 1 { - t.Fatalf("expected %d nodes; got: %d", 1, len(client.conns)) - } - pattern := `http:\/\/[\d\.]+:9200` - matched, err := regexp.MatchString(pattern, client.conns[0].URL()) - if err != nil { - t.Fatal(err) - } - if !matched { - t.Fatalf("expected node URL pattern %q; got: %q", pattern, client.conns[0].URL()) - } - case <-time.After(2 * time.Second): - t.Fatal("expected no timeout in sniff") - break - } -} - -// -- Selector -- - -func TestClientSelectConnHealthy(t *testing.T) { - client, err := NewClient( - SetSniff(false), - SetHealthcheck(false), - SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201")) - if err != nil { - t.Fatal(err) - } - - // Both are healthy, so we should get both URLs in round-robin - client.conns[0].MarkAsHealthy() - client.conns[1].MarkAsHealthy() - - // #1: Return 1st - c, err := client.next() - if err != nil { - t.Fatal(err) - } - if c.URL() != client.conns[0].URL() { - t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL()) - } - // #2: Return 2nd - c, err = client.next() - if err != nil { - t.Fatal(err) - } - if c.URL() != client.conns[1].URL() { - t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL()) - } - // #3: Return 1st - c, err = client.next() - if err != nil { - t.Fatal(err) - } - if c.URL() != client.conns[0].URL() { - t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL()) - } -} - -func TestClientSelectConnHealthyAndDead(t *testing.T) { - client, err := NewClient( - SetSniff(false), - SetHealthcheck(false), - SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201")) - if err != nil { - t.Fatal(err) - } - - // 1st is healthy, second is dead - client.conns[0].MarkAsHealthy() - client.conns[1].MarkAsDead() - - // #1: Return 1st - c, err := client.next() - if err != nil { - t.Fatal(err) - } - if c.URL() != client.conns[0].URL() { - t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL()) - } - // #2: Return 1st again - c, err = client.next() - if err != nil { - t.Fatal(err) - } - if c.URL() != client.conns[0].URL() { - t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL()) - } - // #3: Return 1st again and again - c, err = client.next() - if err != nil { - t.Fatal(err) - } - if c.URL() != client.conns[0].URL() { - t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL()) - } -} - -func TestClientSelectConnDeadAndHealthy(t *testing.T) { - client, err := NewClient( - SetSniff(false), - SetHealthcheck(false), - SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201")) - if err != nil { - t.Fatal(err) - } - - // 1st is dead, 2nd is healthy - client.conns[0].MarkAsDead() - client.conns[1].MarkAsHealthy() - - // #1: Return 2nd - c, err := client.next() - if err != nil { - t.Fatal(err) - } - if c.URL() != client.conns[1].URL() { - t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL()) - } - // #2: Return 2nd again - c, err = client.next() - if err != nil { - t.Fatal(err) - } - if c.URL() != client.conns[1].URL() { - t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL()) - } - // #3: Return 2nd again and again - c, err = client.next() - if err != nil { - t.Fatal(err) - } - if c.URL() != client.conns[1].URL() { - t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL()) - } -} - -func TestClientSelectConnAllDead(t *testing.T) { - client, err := NewClient( - SetSniff(false), - SetHealthcheck(false), - SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201")) - if err != nil { - t.Fatal(err) - } - - // Both are dead - client.conns[0].MarkAsDead() - client.conns[1].MarkAsDead() - - // If all connections are dead, next should make them alive again, but - // still return ErrNoClient when it first finds out. - c, err := client.next() - if err != ErrNoClient { - t.Fatal(err) - } - if c != nil { - t.Fatalf("expected no connection; got: %v", c) - } - // Return a connection - c, err = client.next() - if err != nil { - t.Fatalf("expected no error; got: %v", err) - } - if c == nil { - t.Fatalf("expected connection; got: %v", c) - } - // Return a connection - c, err = client.next() - if err != nil { - t.Fatalf("expected no error; got: %v", err) - } - if c == nil { - t.Fatalf("expected connection; got: %v", c) - } -} - -// -- ElasticsearchVersion -- - -func TestElasticsearchVersion(t *testing.T) { - client, err := NewClient() - if err != nil { - t.Fatal(err) - } - version, err := client.ElasticsearchVersion(DefaultURL) - if err != nil { - t.Fatal(err) - } - if version == "" { - t.Errorf("expected a version number, got: %q", version) - } -} - -// -- IndexNames -- - -func TestIndexNames(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - names, err := client.IndexNames() - if err != nil { - t.Fatal(err) - } - if len(names) == 0 { - t.Fatalf("expected some index names, got: %d", len(names)) - } - var found bool - for _, name := range names { - if name == testIndexName { - found = true - break - } - } - if !found { - t.Fatalf("expected to find index %q; got: %v", testIndexName, found) - } -} - -// -- PerformRequest -- - -func TestPerformRequest(t *testing.T) { - client, err := NewClient() - if err != nil { - t.Fatal(err) - } - res, err := client.PerformRequest("GET", "/", nil, nil) - if err != nil { - t.Fatal(err) - } - if res == nil { - t.Fatal("expected response to be != nil") - } - - ret := new(PingResult) - if err := json.Unmarshal(res.Body, ret); err != nil { - t.Fatalf("expected no error on decode; got: %v", err) - } - if ret.ClusterName == "" { - t.Errorf("expected cluster name; got: %q", ret.ClusterName) - } -} - -func TestPerformRequestWithSimpleClient(t *testing.T) { - client, err := NewSimpleClient() - if err != nil { - t.Fatal(err) - } - res, err := client.PerformRequest("GET", "/", nil, nil) - if err != nil { - t.Fatal(err) - } - if res == nil { - t.Fatal("expected response to be != nil") - } - - ret := new(PingResult) - if err := json.Unmarshal(res.Body, ret); err != nil { - t.Fatalf("expected no error on decode; got: %v", err) - } - if ret.ClusterName == "" { - t.Errorf("expected cluster name; got: %q", ret.ClusterName) - } -} - -func TestPerformRequestWithLogger(t *testing.T) { - var w bytes.Buffer - out := log.New(&w, "LOGGER ", log.LstdFlags) - - client, err := NewClient(SetInfoLog(out)) - if err != nil { - t.Fatal(err) - } - - res, err := client.PerformRequest("GET", "/", nil, nil) - if err != nil { - t.Fatal(err) - } - if res == nil { - t.Fatal("expected response to be != nil") - } - - ret := new(PingResult) - if err := json.Unmarshal(res.Body, ret); err != nil { - t.Fatalf("expected no error on decode; got: %v", err) - } - if ret.ClusterName == "" { - t.Errorf("expected cluster name; got: %q", ret.ClusterName) - } - - got := w.String() - pattern := `^LOGGER \d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} GET http://.*/ \[status:200, request:\d+\.\d{3}s\]\n` - matched, err := regexp.MatchString(pattern, got) - if err != nil { - t.Fatalf("expected log line to match %q; got: %v", pattern, err) - } - if !matched { - t.Errorf("expected log line to match %q; got: %v", pattern, got) - } -} - -func TestPerformRequestWithLoggerAndTracer(t *testing.T) { - var lw bytes.Buffer - lout := log.New(&lw, "LOGGER ", log.LstdFlags) - - var tw bytes.Buffer - tout := log.New(&tw, "TRACER ", log.LstdFlags) - - client, err := NewClient(SetInfoLog(lout), SetTraceLog(tout)) - if err != nil { - t.Fatal(err) - } - - res, err := client.PerformRequest("GET", "/", nil, nil) - if err != nil { - t.Fatal(err) - } - if res == nil { - t.Fatal("expected response to be != nil") - } - - ret := new(PingResult) - if err := json.Unmarshal(res.Body, ret); err != nil { - t.Fatalf("expected no error on decode; got: %v", err) - } - if ret.ClusterName == "" { - t.Errorf("expected cluster name; got: %q", ret.ClusterName) - } - - lgot := lw.String() - if lgot == "" { - t.Errorf("expected logger output; got: %q", lgot) - } - - tgot := tw.String() - if tgot == "" { - t.Errorf("expected tracer output; got: %q", tgot) - } -} - -type customLogger struct { - out bytes.Buffer -} - -func (l *customLogger) Printf(format string, v ...interface{}) { - l.out.WriteString(fmt.Sprintf(format, v...) + "\n") -} - -func TestPerformRequestWithCustomLogger(t *testing.T) { - logger := &customLogger{} - - client, err := NewClient(SetInfoLog(logger)) - if err != nil { - t.Fatal(err) - } - - res, err := client.PerformRequest("GET", "/", nil, nil) - if err != nil { - t.Fatal(err) - } - if res == nil { - t.Fatal("expected response to be != nil") - } - - ret := new(PingResult) - if err := json.Unmarshal(res.Body, ret); err != nil { - t.Fatalf("expected no error on decode; got: %v", err) - } - if ret.ClusterName == "" { - t.Errorf("expected cluster name; got: %q", ret.ClusterName) - } - - got := logger.out.String() - pattern := `^GET http://.*/ \[status:200, request:\d+\.\d{3}s\]\n` - matched, err := regexp.MatchString(pattern, got) - if err != nil { - t.Fatalf("expected log line to match %q; got: %v", pattern, err) - } - if !matched { - t.Errorf("expected log line to match %q; got: %v", pattern, got) - } -} - -// failingTransport will run a fail callback if it sees a given URL path prefix. -type failingTransport struct { - path string // path prefix to look for - fail func(*http.Request) (*http.Response, error) // call when path prefix is found - next http.RoundTripper // next round-tripper (use http.DefaultTransport if nil) -} - -// RoundTrip implements a failing transport. -func (tr *failingTransport) RoundTrip(r *http.Request) (*http.Response, error) { - if strings.HasPrefix(r.URL.Path, tr.path) && tr.fail != nil { - return tr.fail(r) - } - if tr.next != nil { - return tr.next.RoundTrip(r) - } - return http.DefaultTransport.RoundTrip(r) -} - -func TestPerformRequestRetryOnHttpError(t *testing.T) { - var numFailedReqs int - fail := func(r *http.Request) (*http.Response, error) { - numFailedReqs += 1 - //return &http.Response{Request: r, StatusCode: 400}, nil - return nil, errors.New("request failed") - } - - // Run against a failing endpoint and see if PerformRequest - // retries correctly. - tr := &failingTransport{path: "/fail", fail: fail} - httpClient := &http.Client{Transport: tr} - - client, err := NewClient(SetHttpClient(httpClient), SetMaxRetries(5)) - if err != nil { - t.Fatal(err) - } - - res, err := client.PerformRequest("GET", "/fail", nil, nil) - if err == nil { - t.Fatal("expected error") - } - if res != nil { - t.Fatal("expected no response") - } - // Connection should be marked as dead after it failed - if numFailedReqs != 5 { - t.Errorf("expected %d failed requests; got: %d", 5, numFailedReqs) - } -} - -func TestPerformRequestNoRetryOnValidButUnsuccessfulHttpStatus(t *testing.T) { - var numFailedReqs int - fail := func(r *http.Request) (*http.Response, error) { - numFailedReqs += 1 - return &http.Response{Request: r, StatusCode: 500}, nil - } - - // Run against a failing endpoint and see if PerformRequest - // retries correctly. - tr := &failingTransport{path: "/fail", fail: fail} - httpClient := &http.Client{Transport: tr} - - client, err := NewClient(SetHttpClient(httpClient), SetMaxRetries(5)) - if err != nil { - t.Fatal(err) - } - - res, err := client.PerformRequest("GET", "/fail", nil, nil) - if err == nil { - t.Fatal("expected error") - } - if res != nil { - t.Fatal("expected no response") - } - // Retry should not have triggered additional requests because - if numFailedReqs != 1 { - t.Errorf("expected %d failed requests; got: %d", 1, numFailedReqs) - } -} - -// failingBody will return an error when json.Marshal is called on it. -type failingBody struct{} - -// MarshalJSON implements the json.Marshaler interface and always returns an error. -func (fb failingBody) MarshalJSON() ([]byte, error) { - return nil, errors.New("failing to marshal") -} - -func TestPerformRequestWithSetBodyError(t *testing.T) { - client, err := NewClient() - if err != nil { - t.Fatal(err) - } - res, err := client.PerformRequest("GET", "/", nil, failingBody{}) - if err == nil { - t.Fatal("expected error") - } - if res != nil { - t.Fatal("expected no response") - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/cluster-test/Makefile b/services/templeton/vendor/src/github.com/olivere/elastic/cluster-test/Makefile deleted file mode 100644 index cc6261db5..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/cluster-test/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -.PHONY: build run-omega-cluster-test - -default: build - -build: - go build cluster-test.go - -run-omega-cluster-test: - go run -race cluster-test.go \ - -nodes=http://192.168.2.65:8200,http://192.168.2.64:8200 \ - -n=5 \ - -retries=5 \ - -sniff=true -sniffer=10s \ - -healthcheck=true -healthchecker=5s \ - -errorlog=errors.log - diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/cluster-test/README.md b/services/templeton/vendor/src/github.com/olivere/elastic/cluster-test/README.md deleted file mode 100644 index f10748cc2..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/cluster-test/README.md +++ /dev/null @@ -1,63 +0,0 @@ -# Cluster Test - -This directory contains a program you can use to test a cluster. - -Here's how: - -First, install a cluster of Elasticsearch nodes. You can install them on -different computers, or start several nodes on a single machine. - -Build cluster-test by `go build cluster-test.go` (or build with `make`). - -Run `./cluster-test -h` to get a list of flags: - -```sh -$ ./cluster-test -h -Usage of ./cluster-test: - -errorlog="": error log file - -healthcheck=true: enable or disable healthchecks - -healthchecker=1m0s: healthcheck interval - -index="twitter": name of ES index to use - -infolog="": info log file - -n=5: number of goroutines that run searches - -nodes="": comma-separated list of ES URLs (e.g. 'http://192.168.2.10:9200,http://192.168.2.11:9200') - -retries=0: number of retries - -sniff=true: enable or disable sniffer - -sniffer=15m0s: sniffer interval - -tracelog="": trace log file -``` - -Example: - -```sh -$ ./cluster-test -nodes=http://127.0.0.1:9200,http://127.0.0.1:9201,http://127.0.0.1:9202 -n=5 -index=twitter -retries=5 -sniff=true -sniffer=10s -healthcheck=true -healthchecker=5s -errorlog=error.log -``` - -The above example will create an index and start some search jobs on the -cluster defined by http://127.0.0.1:9200, http://127.0.0.1:9201, -and http://127.0.0.1:9202. - -* It will create an index called `twitter` on the cluster (`-index=twitter`) -* It will run 5 search jobs in parallel (`-n=5`). -* It will retry failed requests 5 times (`-retries=5`). -* It will sniff the cluster periodically (`-sniff=true`). -* It will sniff the cluster every 10 seconds (`-sniffer=10s`). -* It will perform health checks periodically (`-healthcheck=true`). -* It will perform health checks on the nodes every 5 seconds (`-healthchecker=5s`). -* It will write an error log file (`-errorlog=error.log`). - -If you want to test Elastic with nodes going up and down, you can use a -chaos monkey script like this and run it on the nodes of your cluster: - -```sh -#!/bin/bash -while true -do - echo "Starting ES node" - elasticsearch -d -Xmx4g -Xms1g -Des.config=elasticsearch.yml -p es.pid - sleep `jot -r 1 10 300` # wait for 10-300s - echo "Stopping ES node" - kill -TERM `cat es.pid` - sleep `jot -r 1 10 60` # wait for 10-60s -done -``` diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/cluster-test/cluster-test.go b/services/templeton/vendor/src/github.com/olivere/elastic/cluster-test/cluster-test.go deleted file mode 100644 index 8880992ef..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/cluster-test/cluster-test.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package main - -import ( - "encoding/json" - "errors" - "flag" - "fmt" - "log" - "math/rand" - "os" - "runtime" - "strings" - "sync/atomic" - "time" - - "gopkg.in/olivere/elastic.v3" -) - -type Tweet struct { - User string `json:"user"` - Message string `json:"message"` - Retweets int `json:"retweets"` - Image string `json:"image,omitempty"` - Created time.Time `json:"created,omitempty"` - Tags []string `json:"tags,omitempty"` - Location string `json:"location,omitempty"` - Suggest *elastic.SuggestField `json:"suggest_field,omitempty"` -} - -var ( - nodes = flag.String("nodes", "", "comma-separated list of ES URLs (e.g. 'http://192.168.2.10:9200,http://192.168.2.11:9200')") - n = flag.Int("n", 5, "number of goroutines that run searches") - index = flag.String("index", "twitter", "name of ES index to use") - errorlogfile = flag.String("errorlog", "", "error log file") - infologfile = flag.String("infolog", "", "info log file") - tracelogfile = flag.String("tracelog", "", "trace log file") - retries = flag.Int("retries", elastic.DefaultMaxRetries, "number of retries") - sniff = flag.Bool("sniff", elastic.DefaultSnifferEnabled, "enable or disable sniffer") - sniffer = flag.Duration("sniffer", elastic.DefaultSnifferInterval, "sniffer interval") - healthcheck = flag.Bool("healthcheck", elastic.DefaultHealthcheckEnabled, "enable or disable healthchecks") - healthchecker = flag.Duration("healthchecker", elastic.DefaultHealthcheckInterval, "healthcheck interval") -) - -func main() { - flag.Parse() - - runtime.GOMAXPROCS(runtime.NumCPU()) - - if *nodes == "" { - log.Fatal("no nodes specified") - } - urls := strings.SplitN(*nodes, ",", -1) - - testcase, err := NewTestCase(*index, urls) - if err != nil { - log.Fatal(err) - } - - testcase.SetErrorLogFile(*errorlogfile) - testcase.SetInfoLogFile(*infologfile) - testcase.SetTraceLogFile(*tracelogfile) - testcase.SetMaxRetries(*retries) - testcase.SetHealthcheck(*healthcheck) - testcase.SetHealthcheckInterval(*healthchecker) - testcase.SetSniff(*sniff) - testcase.SetSnifferInterval(*sniffer) - - if err := testcase.Run(*n); err != nil { - log.Fatal(err) - } - - select {} -} - -type RunInfo struct { - Success bool -} - -type TestCase struct { - nodes []string - client *elastic.Client - runs int64 - failures int64 - runCh chan RunInfo - index string - errorlogfile string - infologfile string - tracelogfile string - maxRetries int - healthcheck bool - healthcheckInterval time.Duration - sniff bool - snifferInterval time.Duration -} - -func NewTestCase(index string, nodes []string) (*TestCase, error) { - if index == "" { - return nil, errors.New("no index name specified") - } - - return &TestCase{ - index: index, - nodes: nodes, - runCh: make(chan RunInfo), - }, nil -} - -func (t *TestCase) SetIndex(name string) { - t.index = name -} - -func (t *TestCase) SetErrorLogFile(name string) { - t.errorlogfile = name -} - -func (t *TestCase) SetInfoLogFile(name string) { - t.infologfile = name -} - -func (t *TestCase) SetTraceLogFile(name string) { - t.tracelogfile = name -} - -func (t *TestCase) SetMaxRetries(n int) { - t.maxRetries = n -} - -func (t *TestCase) SetSniff(enabled bool) { - t.sniff = enabled -} - -func (t *TestCase) SetSnifferInterval(d time.Duration) { - t.snifferInterval = d -} - -func (t *TestCase) SetHealthcheck(enabled bool) { - t.healthcheck = enabled -} - -func (t *TestCase) SetHealthcheckInterval(d time.Duration) { - t.healthcheckInterval = d -} - -func (t *TestCase) Run(n int) error { - if err := t.setup(); err != nil { - return err - } - - for i := 1; i < n; i++ { - go t.search() - } - - go t.monitor() - - return nil -} - -func (t *TestCase) monitor() { - print := func() { - fmt.Printf("\033[32m%5d\033[0m; \033[31m%5d\033[0m: %s%s\r", t.runs, t.failures, t.client.String(), " ") - } - - for { - select { - case run := <-t.runCh: - atomic.AddInt64(&t.runs, 1) - if !run.Success { - atomic.AddInt64(&t.failures, 1) - fmt.Println() - } - print() - case <-time.After(5 * time.Second): - // Print stats after some inactivity - print() - break - } - } -} - -func (t *TestCase) setup() error { - var errorlogger *log.Logger - if t.errorlogfile != "" { - f, err := os.OpenFile(t.errorlogfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664) - if err != nil { - return err - } - errorlogger = log.New(f, "", log.Ltime|log.Lmicroseconds|log.Lshortfile) - } - - var infologger *log.Logger - if t.infologfile != "" { - f, err := os.OpenFile(t.infologfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664) - if err != nil { - return err - } - infologger = log.New(f, "", log.LstdFlags) - } - - // Trace request and response details like this - var tracelogger *log.Logger - if t.tracelogfile != "" { - f, err := os.OpenFile(t.tracelogfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664) - if err != nil { - return err - } - tracelogger = log.New(f, "", log.LstdFlags) - } - - client, err := elastic.NewClient( - elastic.SetURL(t.nodes...), - elastic.SetErrorLog(errorlogger), - elastic.SetInfoLog(infologger), - elastic.SetTraceLog(tracelogger), - elastic.SetMaxRetries(t.maxRetries), - elastic.SetSniff(t.sniff), - elastic.SetSnifferInterval(t.snifferInterval), - elastic.SetHealthcheck(t.healthcheck), - elastic.SetHealthcheckInterval(t.healthcheckInterval)) - if err != nil { - // Handle error - return err - } - t.client = client - - // Use the IndexExists service to check if a specified index exists. - exists, err := t.client.IndexExists(t.index).Do() - if err != nil { - return err - } - if exists { - deleteIndex, err := t.client.DeleteIndex(t.index).Do() - if err != nil { - return err - } - if !deleteIndex.Acknowledged { - return errors.New("delete index not acknowledged") - } - } - - // Create a new index. - createIndex, err := t.client.CreateIndex(t.index).Do() - if err != nil { - return err - } - if !createIndex.Acknowledged { - return errors.New("create index not acknowledged") - } - - // Index a tweet (using JSON serialization) - tweet1 := Tweet{User: "olivere", Message: "Take Five", Retweets: 0} - _, err = t.client.Index(). - Index(t.index). - Type("tweet"). - Id("1"). - BodyJson(tweet1). - Do() - if err != nil { - return err - } - - // Index a second tweet (by string) - tweet2 := `{"user" : "olivere", "message" : "It's a Raggy Waltz"}` - _, err = t.client.Index(). - Index(t.index). - Type("tweet"). - Id("2"). - BodyString(tweet2). - Do() - if err != nil { - return err - } - - // Flush to make sure the documents got written. - _, err = t.client.Flush().Index(t.index).Do() - if err != nil { - return err - } - - return nil -} - -func (t *TestCase) search() { - // Loop forever to check for connection issues - for { - // Get tweet with specified ID - get1, err := t.client.Get(). - Index(t.index). - Type("tweet"). - Id("1"). - Do() - if err != nil { - //failf("Get failed: %v", err) - t.runCh <- RunInfo{Success: false} - continue - } - if !get1.Found { - //log.Printf("Document %s not found\n", "1") - //fmt.Printf("Got document %s in version %d from index %s, type %s\n", get1.Id, get1.Version, get1.Index, get1.Type) - t.runCh <- RunInfo{Success: false} - continue - } - - // Search with a term query - searchResult, err := t.client.Search(). - Index(t.index). // search in index t.index - Query(elastic.NewTermQuery("user", "olivere")). // specify the query - Sort("user", true). // sort by "user" field, ascending - From(0).Size(10). // take documents 0-9 - Pretty(true). // pretty print request and response JSON - Do() // execute - if err != nil { - //failf("Search failed: %v\n", err) - t.runCh <- RunInfo{Success: false} - continue - } - - // searchResult is of type SearchResult and returns hits, suggestions, - // and all kinds of other information from Elasticsearch. - //fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis) - - // Number of hits - if searchResult.Hits != nil { - //fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits) - - // Iterate through results - for _, hit := range searchResult.Hits.Hits { - // hit.Index contains the name of the index - - // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}). - var tweet Tweet - err := json.Unmarshal(*hit.Source, &tweet) - if err != nil { - // Deserialization failed - //failf("Deserialize failed: %v\n", err) - t.runCh <- RunInfo{Success: false} - continue - } - - // Work with tweet - //fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) - } - } else { - // No hits - //fmt.Print("Found no tweets\n") - } - - t.runCh <- RunInfo{Success: true} - - // Sleep some time - time.Sleep(time.Duration(rand.Intn(500)) * time.Millisecond) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/cluster_health.go b/services/templeton/vendor/src/github.com/olivere/elastic/cluster_health.go deleted file mode 100644 index 0c51c6041..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/cluster_health.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// ClusterHealthService allows to get a very simple status on the health of the cluster. -// -// See http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html -// for details. -type ClusterHealthService struct { - client *Client - pretty bool - indices []string - level string - local *bool - masterTimeout string - timeout string - waitForActiveShards *int - waitForNodes string - waitForRelocatingShards *int - waitForStatus string -} - -// NewClusterHealthService creates a new ClusterHealthService. -func NewClusterHealthService(client *Client) *ClusterHealthService { - return &ClusterHealthService{ - client: client, - indices: make([]string, 0), - } -} - -// Index limits the information returned to specific indices. -func (s *ClusterHealthService) Index(indices ...string) *ClusterHealthService { - s.indices = append(s.indices, indices...) - return s -} - -// Level specifies the level of detail for returned information. -func (s *ClusterHealthService) Level(level string) *ClusterHealthService { - s.level = level - return s -} - -// Local indicates whether to return local information. If it is true, -// we do not retrieve the state from master node (default: false). -func (s *ClusterHealthService) Local(local bool) *ClusterHealthService { - s.local = &local - return s -} - -// MasterTimeout specifies an explicit operation timeout for connection to master node. -func (s *ClusterHealthService) MasterTimeout(masterTimeout string) *ClusterHealthService { - s.masterTimeout = masterTimeout - return s -} - -// Timeout specifies an explicit operation timeout. -func (s *ClusterHealthService) Timeout(timeout string) *ClusterHealthService { - s.timeout = timeout - return s -} - -// WaitForActiveShards can be used to wait until the specified number of shards are active. -func (s *ClusterHealthService) WaitForActiveShards(waitForActiveShards int) *ClusterHealthService { - s.waitForActiveShards = &waitForActiveShards - return s -} - -// WaitForNodes can be used to wait until the specified number of nodes are available. -// Example: "12" to wait for exact values, ">12" and "<12" for ranges. -func (s *ClusterHealthService) WaitForNodes(waitForNodes string) *ClusterHealthService { - s.waitForNodes = waitForNodes - return s -} - -// WaitForRelocatingShards can be used to wait until the specified number of relocating shards is finished. -func (s *ClusterHealthService) WaitForRelocatingShards(waitForRelocatingShards int) *ClusterHealthService { - s.waitForRelocatingShards = &waitForRelocatingShards - return s -} - -// WaitForStatus can be used to wait until the cluster is in a specific state. -// Valid values are: green, yellow, or red. -func (s *ClusterHealthService) WaitForStatus(waitForStatus string) *ClusterHealthService { - s.waitForStatus = waitForStatus - return s -} - -// WaitForGreenStatus will wait for the "green" state. -func (s *ClusterHealthService) WaitForGreenStatus() *ClusterHealthService { - return s.WaitForStatus("green") -} - -// WaitForYellowStatus will wait for the "yellow" state. -func (s *ClusterHealthService) WaitForYellowStatus() *ClusterHealthService { - return s.WaitForStatus("yellow") -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *ClusterHealthService) Pretty(pretty bool) *ClusterHealthService { - s.pretty = pretty - return s -} - -// buildURL builds the URL for the operation. -func (s *ClusterHealthService) buildURL() (string, url.Values, error) { - // Build URL - var err error - var path string - if len(s.indices) > 0 { - path, err = uritemplates.Expand("/_cluster/health/{index}", map[string]string{ - "index": strings.Join(s.indices, ","), - }) - } else { - path = "/_cluster/health" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.level != "" { - params.Set("level", s.level) - } - if s.local != nil { - params.Set("local", fmt.Sprintf("%v", *s.local)) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.waitForActiveShards != nil { - params.Set("wait_for_active_shards", fmt.Sprintf("%v", s.waitForActiveShards)) - } - if s.waitForNodes != "" { - params.Set("wait_for_nodes", s.waitForNodes) - } - if s.waitForRelocatingShards != nil { - params.Set("wait_for_relocating_shards", fmt.Sprintf("%v", s.waitForRelocatingShards)) - } - if s.waitForStatus != "" { - params.Set("wait_for_status", s.waitForStatus) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *ClusterHealthService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *ClusterHealthService) Do() (*ClusterHealthResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest("GET", path, params, nil) - if err != nil { - return nil, err - } - - // Return operation response - resp := new(ClusterHealthResponse) - if err := json.Unmarshal(res.Body, resp); err != nil { - return nil, err - } - return resp, nil -} - -// ClusterHealthResponse is the response of ClusterHealthService.Do. -type ClusterHealthResponse struct { - ClusterName string `json:"cluster_name"` - Status string `json:"status"` - TimedOut bool `json:"timed_out"` - NumberOfNodes int `json:"number_of_nodes"` - NumberOfDataNodes int `json:"number_of_data_nodes"` - ActivePrimaryShards int `json:"active_primary_shards"` - ActiveShards int `json:"active_shards"` - RelocatingShards int `json:"relocating_shards"` - InitializingShards int `json:"initializing_shards"` - UnassignedShards int `json:"unassigned_shards"` - DelayedUnassignedShards int `json:"delayed_unassigned_shards"` - NumberOfPendingTasks int `json:"number_of_pending_tasks"` - NumberOfInFlightFetch int `json:"number_of_in_flight_fetch"` - TaskMaxWaitTimeInQueueInMillis int `json:"task_max_waiting_in_queue_millis"` - ActiveShardsPercentAsNumber float64 `json:"active_shards_percent_as_number"` - - // Validation failures -> index name -> array of validation failures - ValidationFailures []map[string][]string `json:"validation_failures"` - - // Index name -> index health - Indices map[string]*ClusterIndexHealth `json:"indices"` -} - -// ClusterIndexHealth will be returned as part of ClusterHealthResponse. -type ClusterIndexHealth struct { - Status string `json:"status"` - NumberOfShards int `json:"number_of_shards"` - NumberOfReplicas int `json:"number_of_replicas"` - ActivePrimaryShards int `json:"active_primary_shards"` - ActiveShards int `json:"active_shards"` - RelocatingShards int `json:"relocating_shards"` - InitializingShards int `json:"initializing_shards"` - UnassignedShards int `json:"unassigned_shards"` - // Validation failures - ValidationFailures []string `json:"validation_failures"` - // Shards by id, e.g. "0" or "1" - Shards map[string]*ClusterShardHealth `json:"shards"` -} - -// ClusterShardHealth will be returned as part of ClusterHealthResponse. -type ClusterShardHealth struct { - Status string `json:"status"` - PrimaryActive bool `json:"primary_active"` - ActiveShards int `json:"active_shards"` - RelocatingShards int `json:"relocating_shards"` - InitializingShards int `json:"initializing_shards"` - UnassignedShards int `json:"unassigned_shards"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/cluster_health_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/cluster_health_test.go deleted file mode 100644 index fcb612f19..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/cluster_health_test.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "net/url" - "testing" -) - -func TestClusterHealth(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - // Get cluster health - res, err := client.ClusterHealth().Index(testIndexName).Level("shards").Pretty(true).Do() - if err != nil { - t.Fatal(err) - } - if res == nil { - t.Fatalf("expected res to be != nil; got: %v", res) - } - if res.Status != "green" && res.Status != "red" && res.Status != "yellow" { - t.Fatalf("expected status \"green\", \"red\", or \"yellow\"; got: %q", res.Status) - } -} - -func TestClusterHealthURLs(t *testing.T) { - tests := []struct { - Service *ClusterHealthService - ExpectedPath string - ExpectedParams url.Values - }{ - { - Service: &ClusterHealthService{ - indices: []string{}, - }, - ExpectedPath: "/_cluster/health", - }, - { - Service: &ClusterHealthService{ - indices: []string{"twitter"}, - }, - ExpectedPath: "/_cluster/health/twitter", - }, - { - Service: &ClusterHealthService{ - indices: []string{"twitter", "gplus"}, - }, - ExpectedPath: "/_cluster/health/twitter%2Cgplus", - }, - { - Service: &ClusterHealthService{ - indices: []string{"twitter"}, - waitForStatus: "yellow", - }, - ExpectedPath: "/_cluster/health/twitter", - ExpectedParams: url.Values{"wait_for_status": []string{"yellow"}}, - }, - } - - for _, test := range tests { - gotPath, gotParams, err := test.Service.buildURL() - if err != nil { - t.Fatalf("expected no error; got: %v", err) - } - if gotPath != test.ExpectedPath { - t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath) - } - if gotParams.Encode() != test.ExpectedParams.Encode() { - t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams) - } - } -} - -func TestClusterHealthWaitForStatus(t *testing.T) { - client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) - - // Cluster health on an index that does not exist should never get to yellow - health, err := client.ClusterHealth().Index("no-such-index").WaitForStatus("yellow").Timeout("1s").Do() - if err == nil { - t.Fatalf("expected timeout error; got: %v", err) - } - if !IsTimeout(err) { - t.Fatalf("expected timeout error; got: %v", err) - } - if health != nil { - t.Fatalf("expected no response; got: %v", health) - } - - // Cluster wide health - health, err = client.ClusterHealth().WaitForGreenStatus().Timeout("10s").Do() - if err != nil { - t.Fatalf("expected no error; got: %v", err) - } - if health.TimedOut != false { - t.Fatalf("expected no timeout; got: %v "+ - "(does your local cluster contain unassigned shards?)", health.TimedOut) - } - if health.Status != "green" { - t.Fatalf("expected health = %q; got: %q", "green", health.Status) - } - - // Cluster wide health via shortcut on client - err = client.WaitForGreenStatus("10s") - if err != nil { - t.Fatalf("expected no error; got: %v", err) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/cluster_state.go b/services/templeton/vendor/src/github.com/olivere/elastic/cluster_state.go deleted file mode 100644 index 9c3678c75..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/cluster_state.go +++ /dev/null @@ -1,284 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// ClusterStateService allows to get a comprehensive state information of the whole cluster. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-state.html -// for details. -type ClusterStateService struct { - client *Client - pretty bool - indices []string - metrics []string - allowNoIndices *bool - expandWildcards string - flatSettings *bool - ignoreUnavailable *bool - local *bool - masterTimeout string -} - -// NewClusterStateService creates a new ClusterStateService. -func NewClusterStateService(client *Client) *ClusterStateService { - return &ClusterStateService{ - client: client, - indices: make([]string, 0), - metrics: make([]string, 0), - } -} - -// Index is a list of index names. Use _all or an empty string to -// perform the operation on all indices. -func (s *ClusterStateService) Index(indices ...string) *ClusterStateService { - s.indices = append(s.indices, indices...) - return s -} - -// Metric limits the information returned to the specified metric. -// It can be one of: version, master_node, nodes, routing_table, metadata, -// blocks, or customs. -func (s *ClusterStateService) Metric(metrics ...string) *ClusterStateService { - s.metrics = append(s.metrics, metrics...) - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. -// (This includes `_all` string or when no indices have been specified). -func (s *ClusterStateService) AllowNoIndices(allowNoIndices bool) *ClusterStateService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both.. -func (s *ClusterStateService) ExpandWildcards(expandWildcards string) *ClusterStateService { - s.expandWildcards = expandWildcards - return s -} - -// FlatSettings, when set, returns settings in flat format (default: false). -func (s *ClusterStateService) FlatSettings(flatSettings bool) *ClusterStateService { - s.flatSettings = &flatSettings - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *ClusterStateService) IgnoreUnavailable(ignoreUnavailable bool) *ClusterStateService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// Local indicates whether to return local information. When set, it does not -// retrieve the state from master node (default: false). -func (s *ClusterStateService) Local(local bool) *ClusterStateService { - s.local = &local - return s -} - -// MasterTimeout specifies timeout for connection to master. -func (s *ClusterStateService) MasterTimeout(masterTimeout string) *ClusterStateService { - s.masterTimeout = masterTimeout - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *ClusterStateService) Pretty(pretty bool) *ClusterStateService { - s.pretty = pretty - return s -} - -// buildURL builds the URL for the operation. -func (s *ClusterStateService) buildURL() (string, url.Values, error) { - // Build URL - metrics := strings.Join(s.metrics, ",") - if metrics == "" { - metrics = "_all" - } - indices := strings.Join(s.indices, ",") - if indices == "" { - indices = "_all" - } - path, err := uritemplates.Expand("/_cluster/state/{metrics}/{indices}", map[string]string{ - "metrics": metrics, - "indices": indices, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.flatSettings != nil { - params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.local != nil { - params.Set("local", fmt.Sprintf("%v", *s.local)) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *ClusterStateService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *ClusterStateService) Do() (*ClusterStateResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest("GET", path, params, nil) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(ClusterStateResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// ClusterStateResponse is the response of ClusterStateService.Do. -type ClusterStateResponse struct { - ClusterName string `json:"cluster_name"` - Version int64 `json:"version"` - StateUUID string `json:"state_uuid"` - MasterNode string `json:"master_node"` - Blocks map[string]*clusterBlocks `json:"blocks"` - Nodes map[string]*discoveryNode `json:"nodes"` - Metadata *clusterStateMetadata `json:"metadata"` - RoutingTable map[string]*clusterStateRoutingTable `json:"routing_table"` - RoutingNodes *clusterStateRoutingNode `json:"routing_nodes"` - Customs map[string]interface{} `json:"customs"` -} - -type clusterBlocks struct { - Global map[string]*clusterBlock `json:"global"` // id -> cluster block - Indices map[string]*clusterBlock `json:"indices"` // index name -> cluster block -} - -type clusterBlock struct { - Description string `json:"description"` - Retryable bool `json:"retryable"` - DisableStatePersistence bool `json:"disable_state_persistence"` - Levels []string `json:"levels"` -} - -type clusterStateMetadata struct { - ClusterUUID string `json:"cluster_uuid"` - Templates map[string]*indexTemplateMetaData `json:"templates"` // template name -> index template metadata - Indices map[string]*indexMetaData `json:"indices"` // index name _> meta data - RoutingTable struct { - Indices map[string]*indexRoutingTable `json:"indices"` // index name -> routing table - } `json:"routing_table"` - RoutingNodes struct { - Unassigned []*shardRouting `json:"unassigned"` - Nodes []*shardRouting `json:"nodes"` - } `json:"routing_nodes"` - Customs map[string]interface{} `json:"customs"` -} - -type discoveryNode struct { - Name string `json:"name"` // server name, e.g. "es1" - TransportAddress string `json:"transport_address"` // e.g. inet[/1.2.3.4:9300] - Attributes map[string]interface{} `json:"attributes"` // e.g. { "data": true, "master": true } -} - -type clusterStateRoutingTable struct { - Indices map[string]interface{} `json:"indices"` -} - -type clusterStateRoutingNode struct { - Unassigned []*shardRouting `json:"unassigned"` - // Node Id -> shardRouting - Nodes map[string][]*shardRouting `json:"nodes"` -} - -type indexTemplateMetaData struct { - Template string `json:"template"` // e.g. "store-*" - Order int `json:"order"` - Settings map[string]interface{} `json:"settings"` // index settings - Mappings map[string]interface{} `json:"mappings"` // type name -> mapping -} - -type indexMetaData struct { - State string `json:"state"` - Settings map[string]interface{} `json:"settings"` - Mappings map[string]interface{} `json:"mappings"` - Aliases []string `json:"aliases"` // e.g. [ "alias1", "alias2" ] -} - -type indexRoutingTable struct { - Shards map[string]*shardRouting `json:"shards"` -} - -type shardRouting struct { - State string `json:"state"` - Primary bool `json:"primary"` - Node string `json:"node"` - RelocatingNode string `json:"relocating_node"` - Shard int `json:"shard"` - Index string `json:"index"` - Version int64 `json:"state"` - RestoreSource *RestoreSource `json:"restore_source"` - AllocationId *allocationId `json:"allocation_id"` - UnassignedInfo *unassignedInfo `json:"unassigned_info"` -} - -type RestoreSource struct { - Repository string `json:"repository"` - Snapshot string `json:"snapshot"` - Version string `json:"version"` - Index string `json:"index"` -} - -type allocationId struct { - Id string `json:"id"` - RelocationId string `json:"relocation_id"` -} - -type unassignedInfo struct { - Reason string `json:"reason"` - At string `json:"at"` - Details string `json:"details"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/cluster_state_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/cluster_state_test.go deleted file mode 100644 index e73a8eeb7..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/cluster_state_test.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "net/url" - "testing" -) - -func TestClusterState(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - // Get cluster state - res, err := client.ClusterState().Index("_all").Metric("_all").Pretty(true).Do() - if err != nil { - t.Fatal(err) - } - if res == nil { - t.Fatalf("expected res to be != nil; got: %v", res) - } - if res.ClusterName == "" { - t.Fatalf("expected a cluster name; got: %q", res.ClusterName) - } -} - -func TestClusterStateURLs(t *testing.T) { - tests := []struct { - Service *ClusterStateService - ExpectedPath string - ExpectedParams url.Values - }{ - { - Service: &ClusterStateService{ - indices: []string{}, - metrics: []string{}, - }, - ExpectedPath: "/_cluster/state/_all/_all", - }, - { - Service: &ClusterStateService{ - indices: []string{"twitter"}, - metrics: []string{}, - }, - ExpectedPath: "/_cluster/state/_all/twitter", - }, - { - Service: &ClusterStateService{ - indices: []string{"twitter", "gplus"}, - metrics: []string{}, - }, - ExpectedPath: "/_cluster/state/_all/twitter%2Cgplus", - }, - { - Service: &ClusterStateService{ - indices: []string{}, - metrics: []string{"nodes"}, - }, - ExpectedPath: "/_cluster/state/nodes/_all", - }, - { - Service: &ClusterStateService{ - indices: []string{"twitter"}, - metrics: []string{"nodes"}, - }, - ExpectedPath: "/_cluster/state/nodes/twitter", - }, - { - Service: &ClusterStateService{ - indices: []string{"twitter"}, - metrics: []string{"nodes"}, - masterTimeout: "1s", - }, - ExpectedPath: "/_cluster/state/nodes/twitter", - ExpectedParams: url.Values{"master_timeout": []string{"1s"}}, - }, - } - - for _, test := range tests { - gotPath, gotParams, err := test.Service.buildURL() - if err != nil { - t.Fatalf("expected no error; got: %v", err) - } - if gotPath != test.ExpectedPath { - t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath) - } - if gotParams.Encode() != test.ExpectedParams.Encode() { - t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams) - } - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/cluster_stats.go b/services/templeton/vendor/src/github.com/olivere/elastic/cluster_stats.go deleted file mode 100644 index 1f0430592..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/cluster_stats.go +++ /dev/null @@ -1,349 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// ClusterStatsService is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/cluster-stats.html. -type ClusterStatsService struct { - client *Client - pretty bool - nodeId []string - flatSettings *bool - human *bool -} - -// NewClusterStatsService creates a new ClusterStatsService. -func NewClusterStatsService(client *Client) *ClusterStatsService { - return &ClusterStatsService{ - client: client, - nodeId: make([]string, 0), - } -} - -// NodeId is documented as: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes. -func (s *ClusterStatsService) NodeId(nodeId []string) *ClusterStatsService { - s.nodeId = nodeId - return s -} - -// FlatSettings is documented as: Return settings in flat format (default: false). -func (s *ClusterStatsService) FlatSettings(flatSettings bool) *ClusterStatsService { - s.flatSettings = &flatSettings - return s -} - -// Human is documented as: Whether to return time and byte values in human-readable format.. -func (s *ClusterStatsService) Human(human bool) *ClusterStatsService { - s.human = &human - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *ClusterStatsService) Pretty(pretty bool) *ClusterStatsService { - s.pretty = pretty - return s -} - -// buildURL builds the URL for the operation. -func (s *ClusterStatsService) buildURL() (string, url.Values, error) { - // Build URL - var err error - var path string - - if len(s.nodeId) > 0 { - path, err = uritemplates.Expand("/_cluster/stats/nodes/{node_id}", map[string]string{ - "node_id": strings.Join(s.nodeId, ","), - }) - if err != nil { - return "", url.Values{}, err - } - } else { - path, err = uritemplates.Expand("/_cluster/stats", map[string]string{}) - if err != nil { - return "", url.Values{}, err - } - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.flatSettings != nil { - params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) - } - if s.human != nil { - params.Set("human", fmt.Sprintf("%v", *s.human)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *ClusterStatsService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *ClusterStatsService) Do() (*ClusterStatsResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest("GET", path, params, nil) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(ClusterStatsResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// ClusterStatsResponse is the response of ClusterStatsService.Do. -type ClusterStatsResponse struct { - Timestamp int64 `json:"timestamp"` - ClusterName string `json:"cluster_name"` - ClusterUUID string `json:"uuid"` - Status string `json:"status"` - Indices *ClusterStatsIndices `json:"indices"` - Nodes *ClusterStatsNodes `json:"nodes"` -} - -type ClusterStatsIndices struct { - Count int `json:"count"` - Shards *ClusterStatsIndicesShards `json:"shards"` - Docs *ClusterStatsIndicesDocs `json:"docs"` - Store *ClusterStatsIndicesStore `json:"store"` - FieldData *ClusterStatsIndicesFieldData `json:"fielddata"` - FilterCache *ClusterStatsIndicesFilterCache `json:"filter_cache"` - IdCache *ClusterStatsIndicesIdCache `json:"id_cache"` - Completion *ClusterStatsIndicesCompletion `json:"completion"` - Segments *ClusterStatsIndicesSegments `json:"segments"` - Percolate *ClusterStatsIndicesPercolate `json:"percolate"` -} - -type ClusterStatsIndicesShards struct { - Total int `json:"total"` - Primaries int `json:"primaries"` - Replication float64 `json:"replication"` - Index *ClusterStatsIndicesShardsIndex `json:"index"` -} - -type ClusterStatsIndicesShardsIndex struct { - Shards *ClusterStatsIndicesShardsIndexIntMinMax `json:"shards"` - Primaries *ClusterStatsIndicesShardsIndexIntMinMax `json:"primaries"` - Replication *ClusterStatsIndicesShardsIndexFloat64MinMax `json:"replication"` -} - -type ClusterStatsIndicesShardsIndexIntMinMax struct { - Min int `json:"min"` - Max int `json:"max"` - Avg float64 `json:"avg"` -} - -type ClusterStatsIndicesShardsIndexFloat64MinMax struct { - Min float64 `json:"min"` - Max float64 `json:"max"` - Avg float64 `json:"avg"` -} - -type ClusterStatsIndicesDocs struct { - Count int `json:"count"` - Deleted int `json:"deleted"` -} - -type ClusterStatsIndicesStore struct { - Size string `json:"size"` // e.g. "5.3gb" - SizeInBytes int64 `json:"size_in_bytes"` - ThrottleTime string `json:"throttle_time"` // e.g. "0s" - ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"` -} - -type ClusterStatsIndicesFieldData struct { - MemorySize string `json:"memory_size"` // e.g. "61.3kb" - MemorySizeInBytes int64 `json:"memory_size_in_bytes"` - Evictions int64 `json:"evictions"` - Fields map[string]struct { - MemorySize string `json:"memory_size"` // e.g. "61.3kb" - MemorySizeInBytes int64 `json:"memory_size_in_bytes"` - } `json:"fields"` -} - -type ClusterStatsIndicesFilterCache struct { - MemorySize string `json:"memory_size"` // e.g. "61.3kb" - MemorySizeInBytes int64 `json:"memory_size_in_bytes"` - Evictions int64 `json:"evictions"` -} - -type ClusterStatsIndicesIdCache struct { - MemorySize string `json:"memory_size"` // e.g. "61.3kb" - MemorySizeInBytes int64 `json:"memory_size_in_bytes"` -} - -type ClusterStatsIndicesCompletion struct { - Size string `json:"size"` // e.g. "61.3kb" - SizeInBytes int64 `json:"size_in_bytes"` - Fields map[string]struct { - Size string `json:"size"` // e.g. "61.3kb" - SizeInBytes int64 `json:"size_in_bytes"` - } `json:"fields"` -} - -type ClusterStatsIndicesSegments struct { - Count int64 `json:"count"` - Memory string `json:"memory"` // e.g. "61.3kb" - MemoryInBytes int64 `json:"memory_in_bytes"` - IndexWriterMemory string `json:"index_writer_memory"` // e.g. "61.3kb" - IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes"` - IndexWriterMaxMemory string `json:"index_writer_max_memory"` // e.g. "61.3kb" - IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes"` - VersionMapMemory string `json:"version_map_memory"` // e.g. "61.3kb" - VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes"` - FixedBitSet string `json:"fixed_bit_set"` // e.g. "61.3kb" - FixedBitSetInBytes int64 `json:"fixed_bit_set_memory_in_bytes"` -} - -type ClusterStatsIndicesPercolate struct { - Total int64 `json:"total"` - // TODO(oe) The JSON tag here is wrong as of ES 1.5.2 it seems - Time string `json:"get_time"` // e.g. "1s" - TimeInBytes int64 `json:"time_in_millis"` - Current int64 `json:"current"` - MemorySize string `json:"memory_size"` // e.g. "61.3kb" - MemorySizeInBytes int64 `json:"memory_sitze_in_bytes"` - Queries int64 `json:"queries"` -} - -// --- - -type ClusterStatsNodes struct { - Count *ClusterStatsNodesCounts `json:"counts"` - Versions []string `json:"versions"` - OS *ClusterStatsNodesOsStats `json:"os"` - Process *ClusterStatsNodesProcessStats `json:"process"` - JVM *ClusterStatsNodesJvmStats `json:"jvm"` - FS *ClusterStatsNodesFsStats `json:"fs"` - Plugins []*ClusterStatsNodesPlugin `json:"plugins"` -} - -type ClusterStatsNodesCounts struct { - Total int `json:"total"` - MasterOnly int `json:"master_only"` - DataOnly int `json:"data_only"` - MasterData int `json:"master_data"` - Client int `json:"client"` -} - -type ClusterStatsNodesOsStats struct { - AvailableProcessors int `json:"available_processors"` - Mem *ClusterStatsNodesOsStatsMem `json:"mem"` - CPU []*ClusterStatsNodesOsStatsCPU `json:"cpu"` -} - -type ClusterStatsNodesOsStatsMem struct { - Total string `json:"total"` // e.g. "16gb" - TotalInBytes int64 `json:"total_in_bytes"` -} - -type ClusterStatsNodesOsStatsCPU struct { - Vendor string `json:"vendor"` - Model string `json:"model"` - MHz int `json:"mhz"` - TotalCores int `json:"total_cores"` - TotalSockets int `json:"total_sockets"` - CoresPerSocket int `json:"cores_per_socket"` - CacheSize string `json:"cache_size"` // e.g. "256b" - CacheSizeInBytes int64 `json:"cache_size_in_bytes"` - Count int `json:"count"` -} - -type ClusterStatsNodesProcessStats struct { - CPU *ClusterStatsNodesProcessStatsCPU `json:"cpu"` - OpenFileDescriptors *ClusterStatsNodesProcessStatsOpenFileDescriptors `json:"open_file_descriptors"` -} - -type ClusterStatsNodesProcessStatsCPU struct { - Percent float64 `json:"percent"` -} - -type ClusterStatsNodesProcessStatsOpenFileDescriptors struct { - Min int64 `json:"min"` - Max int64 `json:"max"` - Avg int64 `json:"avg"` -} - -type ClusterStatsNodesJvmStats struct { - MaxUptime string `json:"max_uptime"` // e.g. "5h" - MaxUptimeInMillis int64 `json:"max_uptime_in_millis"` - Versions []*ClusterStatsNodesJvmStatsVersion `json:"versions"` - Mem *ClusterStatsNodesJvmStatsMem `json:"mem"` - Threads int64 `json:"threads"` -} - -type ClusterStatsNodesJvmStatsVersion struct { - Version string `json:"version"` // e.g. "1.8.0_45" - VMName string `json:"vm_name"` // e.g. "Java HotSpot(TM) 64-Bit Server VM" - VMVersion string `json:"vm_version"` // e.g. "25.45-b02" - VMVendor string `json:"vm_vendor"` // e.g. "Oracle Corporation" - Count int `json:"count"` -} - -type ClusterStatsNodesJvmStatsMem struct { - HeapUsed string `json:"heap_used"` - HeapUsedInBytes int64 `json:"heap_used_in_bytes"` - HeapMax string `json:"heap_max"` - HeapMaxInBytes int64 `json:"heap_max_in_bytes"` -} - -type ClusterStatsNodesFsStats struct { - Path string `json:"path"` - Mount string `json:"mount"` - Dev string `json:"dev"` - Total string `json:"total"` // e.g. "930.7gb"` - TotalInBytes int64 `json:"total_in_bytes"` - Free string `json:"free"` // e.g. "930.7gb"` - FreeInBytes int64 `json:"free_in_bytes"` - Available string `json:"available"` // e.g. "930.7gb"` - AvailableInBytes int64 `json:"available_in_bytes"` - DiskReads int64 `json:"disk_reads"` - DiskWrites int64 `json:"disk_writes"` - DiskIOOp int64 `json:"disk_io_op"` - DiskReadSize string `json:"disk_read_size"` // e.g. "0b"` - DiskReadSizeInBytes int64 `json:"disk_read_size_in_bytes"` - DiskWriteSize string `json:"disk_write_size"` // e.g. "0b"` - DiskWriteSizeInBytes int64 `json:"disk_write_size_in_bytes"` - DiskIOSize string `json:"disk_io_size"` // e.g. "0b"` - DiskIOSizeInBytes int64 `json:"disk_io_size_in_bytes"` - DiskQueue string `json:"disk_queue"` - DiskServiceTime string `json:"disk_service_time"` -} - -type ClusterStatsNodesPlugin struct { - Name string `json:"name"` - Version string `json:"version"` - Description string `json:"description"` - URL string `json:"url"` - JVM bool `json:"jvm"` - Site bool `json:"site"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/cluster_stats_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/cluster_stats_test.go deleted file mode 100644 index 74326a6e7..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/cluster_stats_test.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "net/url" - "testing" -) - -func TestClusterStats(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - // Get cluster stats - res, err := client.ClusterStats().Do() - if err != nil { - t.Fatal(err) - } - if res == nil { - t.Fatalf("expected res to be != nil; got: %v", res) - } - if res.ClusterName == "" { - t.Fatalf("expected a cluster name; got: %q", res.ClusterName) - } -} - -func TestClusterStatsURLs(t *testing.T) { - fFlag := false - tFlag := true - - tests := []struct { - Service *ClusterStatsService - ExpectedPath string - ExpectedParams url.Values - }{ - { - Service: &ClusterStatsService{ - nodeId: []string{}, - }, - ExpectedPath: "/_cluster/stats", - }, - { - Service: &ClusterStatsService{ - nodeId: []string{"node1"}, - }, - ExpectedPath: "/_cluster/stats/nodes/node1", - }, - { - Service: &ClusterStatsService{ - nodeId: []string{"node1", "node2"}, - }, - ExpectedPath: "/_cluster/stats/nodes/node1%2Cnode2", - }, - { - Service: &ClusterStatsService{ - nodeId: []string{}, - flatSettings: &tFlag, - }, - ExpectedPath: "/_cluster/stats", - ExpectedParams: url.Values{"flat_settings": []string{"true"}}, - }, - { - Service: &ClusterStatsService{ - nodeId: []string{"node1"}, - flatSettings: &fFlag, - }, - ExpectedPath: "/_cluster/stats/nodes/node1", - ExpectedParams: url.Values{"flat_settings": []string{"false"}}, - }, - } - - for _, test := range tests { - gotPath, gotParams, err := test.Service.buildURL() - if err != nil { - t.Fatalf("expected no error; got: %v", err) - } - if gotPath != test.ExpectedPath { - t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath) - } - if gotParams.Encode() != test.ExpectedParams.Encode() { - t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams) - } - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/config/elasticsearch.yml b/services/templeton/vendor/src/github.com/olivere/elastic/config/elasticsearch.yml deleted file mode 100644 index b571a064c..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/config/elasticsearch.yml +++ /dev/null @@ -1,103 +0,0 @@ -# ======================== Elasticsearch Configuration ========================= -# -# NOTE: Elasticsearch comes with reasonable defaults for most settings. -# Before you set out to tweak and tune the configuration, make sure you -# understand what are you trying to accomplish and the consequences. -# -# The primary way of configuring a node is via this file. This template lists -# the most important settings you may want to configure for a production cluster. -# -# Please see the documentation for further information on configuration options: -# -# -# ---------------------------------- Cluster ----------------------------------- -# -# Use a descriptive name for your cluster: -# -# cluster.name: my-application -# -# ------------------------------------ Node ------------------------------------ -# -# Use a descriptive name for the node: -# -# node.name: node-1 -# -# Add custom attributes to the node: -# -# node.rack: r1 -# -# ----------------------------------- Paths ------------------------------------ -# -# Path to directory where to store the data (separate multiple locations by comma): -# -# path.data: /path/to/data -# -# Path to log files: -# -# path.logs: /path/to/logs -# -# ----------------------------------- Memory ----------------------------------- -# -# Lock the memory on startup: -# -# bootstrap.mlockall: true -# -# Make sure that the `ES_HEAP_SIZE` environment variable is set to about half the memory -# available on the system and that the owner of the process is allowed to use this limit. -# -# Elasticsearch performs poorly when the system is swapping the memory. -# -# ---------------------------------- Network ----------------------------------- -# -# Set the bind adress to a specific IP (IPv4 or IPv6): -# -# network.host: 192.168.0.1 -# -# Set a custom port for HTTP: -# -# http.port: 9200 -# -# For more information, see the documentation at: -# -# -# ---------------------------------- Gateway ----------------------------------- -# -# Block initial recovery after a full cluster restart until N nodes are started: -# -# gateway.recover_after_nodes: 3 -# -# For more information, see the documentation at: -# -# -# --------------------------------- Discovery ---------------------------------- -# -# Elasticsearch nodes will find each other via multicast, by default. -# -# To use the unicast discovery, disable the multicast discovery: -# -# discovery.zen.ping.multicast.enabled: false -# -# Pass an initial list of hosts to perform discovery when new node is started: -# -# discovery.zen.ping.unicast.hosts: ["host1", "host2"] -# -# Prevent the "split brain" by configuring the majority of nodes (total number of nodes / 2 + 1): -# -# discovery.zen.minimum_master_nodes: 3 -# -# For more information, see the documentation at: -# -# -# ---------------------------------- Various ----------------------------------- -# -# Disable starting multiple nodes on a single system: -# -# node.max_local_storage_nodes: 1 -# -# Require explicit names when deleting indices: -# -# action.destructive_requires_name: true - -# Enable scripting as described here: https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html -script.inline: on -script.indexed: on diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/connection.go b/services/templeton/vendor/src/github.com/olivere/elastic/connection.go deleted file mode 100644 index b8b5bf8aa..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/connection.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "fmt" - "sync" - "time" -) - -// conn represents a single connection to a node in a cluster. -type conn struct { - sync.RWMutex - nodeID string // node ID - url string - failures int - dead bool - deadSince *time.Time -} - -// newConn creates a new connection to the given URL. -func newConn(nodeID, url string) *conn { - c := &conn{ - nodeID: nodeID, - url: url, - } - return c -} - -// String returns a representation of the connection status. -func (c *conn) String() string { - c.RLock() - defer c.RUnlock() - return fmt.Sprintf("%s [dead=%v,failures=%d,deadSince=%v]", c.url, c.dead, c.failures, c.deadSince) -} - -// NodeID returns the ID of the node of this connection. -func (c *conn) NodeID() string { - c.RLock() - defer c.RUnlock() - return c.nodeID -} - -// URL returns the URL of this connection. -func (c *conn) URL() string { - c.RLock() - defer c.RUnlock() - return c.url -} - -// IsDead returns true if this connection is marked as dead, i.e. a previous -// request to the URL has been unsuccessful. -func (c *conn) IsDead() bool { - c.RLock() - defer c.RUnlock() - return c.dead -} - -// MarkAsDead marks this connection as dead, increments the failures -// counter and stores the current time in dead since. -func (c *conn) MarkAsDead() { - c.Lock() - c.dead = true - if c.deadSince == nil { - utcNow := time.Now().UTC() - c.deadSince = &utcNow - } - c.failures += 1 - c.Unlock() -} - -// MarkAsAlive marks this connection as eligible to be returned from the -// pool of connections by the selector. -func (c *conn) MarkAsAlive() { - c.Lock() - c.dead = false - c.Unlock() -} - -// MarkAsHealthy marks this connection as healthy, i.e. a request has been -// successfully performed with it. -func (c *conn) MarkAsHealthy() { - c.Lock() - c.dead = false - c.deadSince = nil - c.failures = 0 - c.Unlock() -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/count.go b/services/templeton/vendor/src/github.com/olivere/elastic/count.go deleted file mode 100644 index ebc878b2d..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/count.go +++ /dev/null @@ -1,310 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// CountService is a convenient service for determining the -// number of documents in an index. Use SearchService with -// a SearchType of count for counting with queries etc. -type CountService struct { - client *Client - pretty bool - index []string - typ []string - allowNoIndices *bool - analyzeWildcard *bool - analyzer string - defaultOperator string - df string - expandWildcards string - ignoreUnavailable *bool - lenient *bool - lowercaseExpandedTerms *bool - minScore interface{} - preference string - q string - query Query - routing string - bodyJson interface{} - bodyString string -} - -// NewCountService creates a new CountService. -func NewCountService(client *Client) *CountService { - return &CountService{ - client: client, - } -} - -// Index sets the names of the indices to restrict the results. -func (s *CountService) Index(index ...string) *CountService { - if s.index == nil { - s.index = make([]string, 0) - } - s.index = append(s.index, index...) - return s -} - -// Type sets the types to use to restrict the results. -func (s *CountService) Type(typ ...string) *CountService { - if s.typ == nil { - s.typ = make([]string, 0) - } - s.typ = append(s.typ, typ...) - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. (This includes "_all" string -// or when no indices have been specified). -func (s *CountService) AllowNoIndices(allowNoIndices bool) *CountService { - s.allowNoIndices = &allowNoIndices - return s -} - -// AnalyzeWildcard specifies whether wildcard and prefix queries should be -// analyzed (default: false). -func (s *CountService) AnalyzeWildcard(analyzeWildcard bool) *CountService { - s.analyzeWildcard = &analyzeWildcard - return s -} - -// Analyzer specifies the analyzer to use for the query string. -func (s *CountService) Analyzer(analyzer string) *CountService { - s.analyzer = analyzer - return s -} - -// DefaultOperator specifies the default operator for query string query (AND or OR). -func (s *CountService) DefaultOperator(defaultOperator string) *CountService { - s.defaultOperator = defaultOperator - return s -} - -// Df specifies the field to use as default where no field prefix is given -// in the query string. -func (s *CountService) Df(df string) *CountService { - s.df = df - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both. -func (s *CountService) ExpandWildcards(expandWildcards string) *CountService { - s.expandWildcards = expandWildcards - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *CountService) IgnoreUnavailable(ignoreUnavailable bool) *CountService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// Lenient specifies whether format-based query failures (such as -// providing text to a numeric field) should be ignored. -func (s *CountService) Lenient(lenient bool) *CountService { - s.lenient = &lenient - return s -} - -// LowercaseExpandedTerms specifies whether query terms should be lowercased. -func (s *CountService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *CountService { - s.lowercaseExpandedTerms = &lowercaseExpandedTerms - return s -} - -// MinScore indicates to include only documents with a specific `_score` -// value in the result. -func (s *CountService) MinScore(minScore interface{}) *CountService { - s.minScore = minScore - return s -} - -// Preference specifies the node or shard the operation should be -// performed on (default: random). -func (s *CountService) Preference(preference string) *CountService { - s.preference = preference - return s -} - -// Q in the Lucene query string syntax. You can also use Query to pass -// a Query struct. -func (s *CountService) Q(q string) *CountService { - s.q = q - return s -} - -// Query specifies the query to pass. You can also pass a query string with Q. -func (s *CountService) Query(query Query) *CountService { - s.query = query - return s -} - -// Routing specifies the routing value. -func (s *CountService) Routing(routing string) *CountService { - s.routing = routing - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *CountService) Pretty(pretty bool) *CountService { - s.pretty = pretty - return s -} - -// BodyJson specifies the query to restrict the results specified with the -// Query DSL (optional). The interface{} will be serialized to a JSON document, -// so use a map[string]interface{}. -func (s *CountService) BodyJson(body interface{}) *CountService { - s.bodyJson = body - return s -} - -// Body specifies a query to restrict the results specified with -// the Query DSL (optional). -func (s *CountService) BodyString(body string) *CountService { - s.bodyString = body - return s -} - -// buildURL builds the URL for the operation. -func (s *CountService) buildURL() (string, url.Values, error) { - var err error - var path string - - if len(s.index) > 0 && len(s.typ) > 0 { - path, err = uritemplates.Expand("/{index}/{type}/_count", map[string]string{ - "index": strings.Join(s.index, ","), - "type": strings.Join(s.typ, ","), - }) - } else if len(s.index) > 0 { - path, err = uritemplates.Expand("/{index}/_count", map[string]string{ - "index": strings.Join(s.index, ","), - }) - } else if len(s.typ) > 0 { - path, err = uritemplates.Expand("/_all/{type}/_count", map[string]string{ - "type": strings.Join(s.typ, ","), - }) - } else { - path = "/_all/_count" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.analyzeWildcard != nil { - params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard)) - } - if s.analyzer != "" { - params.Set("analyzer", s.analyzer) - } - if s.defaultOperator != "" { - params.Set("default_operator", s.defaultOperator) - } - if s.df != "" { - params.Set("df", s.df) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.lenient != nil { - params.Set("lenient", fmt.Sprintf("%v", *s.lenient)) - } - if s.lowercaseExpandedTerms != nil { - params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms)) - } - if s.minScore != nil { - params.Set("min_score", fmt.Sprintf("%v", s.minScore)) - } - if s.preference != "" { - params.Set("preference", s.preference) - } - if s.q != "" { - params.Set("q", s.q) - } - if s.routing != "" { - params.Set("routing", s.routing) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *CountService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *CountService) Do() (int64, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return 0, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return 0, err - } - - // Setup HTTP request body - var body interface{} - if s.query != nil { - src, err := s.query.Source() - if err != nil { - return 0, err - } - query := make(map[string]interface{}) - query["query"] = src - body = query - } else if s.bodyJson != nil { - body = s.bodyJson - } else if s.bodyString != "" { - body = s.bodyString - } - - // Get HTTP response - res, err := s.client.PerformRequest("POST", path, params, body) - if err != nil { - return 0, err - } - - // Return result - ret := new(CountResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return 0, err - } - if ret != nil { - return ret.Count, nil - } - - return int64(0), nil -} - -// CountResponse is the response of using the Count API. -type CountResponse struct { - Count int64 `json:"count"` - Shards shardsInfo `json:"_shards,omitempty"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/count_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/count_test.go deleted file mode 100644 index bfc2a2955..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/count_test.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "testing" - -func TestCountURL(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tests := []struct { - Indices []string - Types []string - Expected string - }{ - { - []string{}, - []string{}, - "/_all/_count", - }, - { - []string{}, - []string{"tweet"}, - "/_all/tweet/_count", - }, - { - []string{"twitter-*"}, - []string{"tweet", "follower"}, - "/twitter-%2A/tweet%2Cfollower/_count", - }, - { - []string{"twitter-2014", "twitter-2015"}, - []string{"tweet", "follower"}, - "/twitter-2014%2Ctwitter-2015/tweet%2Cfollower/_count", - }, - } - - for _, test := range tests { - path, _, err := client.Count().Index(test.Indices...).Type(test.Types...).buildURL() - if err != nil { - t.Fatal(err) - } - if path != test.Expected { - t.Errorf("expected %q; got: %q", test.Expected, path) - } - } -} - -func TestCount(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Count documents - count, err := client.Count(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if count != 3 { - t.Errorf("expected Count = %d; got %d", 3, count) - } - - // Count documents - count, err = client.Count(testIndexName).Type("tweet").Do() - if err != nil { - t.Fatal(err) - } - if count != 3 { - t.Errorf("expected Count = %d; got %d", 3, count) - } - - // Count documents - count, err = client.Count(testIndexName).Type("gezwitscher").Do() - if err != nil { - t.Fatal(err) - } - if count != 0 { - t.Errorf("expected Count = %d; got %d", 0, count) - } - - // Count with query - query := NewTermQuery("user", "olivere") - count, err = client.Count(testIndexName).Query(query).Do() - if err != nil { - t.Fatal(err) - } - if count != 2 { - t.Errorf("expected Count = %d; got %d", 2, count) - } - - // Count with query and type - query = NewTermQuery("user", "olivere") - count, err = client.Count(testIndexName).Type("tweet").Query(query).Do() - if err != nil { - t.Fatal(err) - } - if count != 2 { - t.Errorf("expected Count = %d; got %d", 2, count) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/decoder.go b/services/templeton/vendor/src/github.com/olivere/elastic/decoder.go deleted file mode 100644 index 765a5be30..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/decoder.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" -) - -// Decoder is used to decode responses from Elasticsearch. -// Users of elastic can implement their own marshaler for advanced purposes -// and set them per Client (see SetDecoder). If none is specified, -// DefaultDecoder is used. -type Decoder interface { - Decode(data []byte, v interface{}) error -} - -// DefaultDecoder uses json.Unmarshal from the Go standard library -// to decode JSON data. -type DefaultDecoder struct{} - -// Decode decodes with json.Unmarshal from the Go standard library. -func (u *DefaultDecoder) Decode(data []byte, v interface{}) error { - return json.Unmarshal(data, v) -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/decoder_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/decoder_test.go deleted file mode 100644 index 5cfce9f5d..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/decoder_test.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "bytes" - "encoding/json" - "sync/atomic" - "testing" -) - -type decoder struct { - dec json.Decoder - - N int64 -} - -func (d *decoder) Decode(data []byte, v interface{}) error { - atomic.AddInt64(&d.N, 1) - dec := json.NewDecoder(bytes.NewReader(data)) - dec.UseNumber() - return dec.Decode(v) -} - -func TestDecoder(t *testing.T) { - dec := &decoder{} - client := setupTestClientAndCreateIndex(t, SetDecoder(dec), SetMaxRetries(0)) - - tweet := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - - // Add a document - indexResult, err := client.Index(). - Index(testIndexName). - Type("tweet"). - Id("1"). - BodyJson(&tweet). - Do() - if err != nil { - t.Fatal(err) - } - if indexResult == nil { - t.Errorf("expected result to be != nil; got: %v", indexResult) - } - if dec.N <= 0 { - t.Errorf("expected at least 1 call of decoder; got: %d", dec.N) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/delete.go b/services/templeton/vendor/src/github.com/olivere/elastic/delete.go deleted file mode 100644 index dca135ee1..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/delete.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// DeleteService allows to delete a typed JSON document from a specified -// index based on its id. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html -// for details. -type DeleteService struct { - client *Client - pretty bool - id string - index string - typ string - routing string - timeout string - version interface{} - versionType string - consistency string - parent string - refresh *bool - replication string -} - -// NewDeleteService creates a new DeleteService. -func NewDeleteService(client *Client) *DeleteService { - return &DeleteService{ - client: client, - } -} - -// Type is the type of the document. -func (s *DeleteService) Type(typ string) *DeleteService { - s.typ = typ - return s -} - -// Id is the document ID. -func (s *DeleteService) Id(id string) *DeleteService { - s.id = id - return s -} - -// Index is the name of the index. -func (s *DeleteService) Index(index string) *DeleteService { - s.index = index - return s -} - -// Replication specifies a replication type. -func (s *DeleteService) Replication(replication string) *DeleteService { - s.replication = replication - return s -} - -// Routing is a specific routing value. -func (s *DeleteService) Routing(routing string) *DeleteService { - s.routing = routing - return s -} - -// Timeout is an explicit operation timeout. -func (s *DeleteService) Timeout(timeout string) *DeleteService { - s.timeout = timeout - return s -} - -// Version is an explicit version number for concurrency control. -func (s *DeleteService) Version(version interface{}) *DeleteService { - s.version = version - return s -} - -// VersionType is a specific version type. -func (s *DeleteService) VersionType(versionType string) *DeleteService { - s.versionType = versionType - return s -} - -// Consistency defines a specific write consistency setting for the operation. -func (s *DeleteService) Consistency(consistency string) *DeleteService { - s.consistency = consistency - return s -} - -// Parent is the ID of parent document. -func (s *DeleteService) Parent(parent string) *DeleteService { - s.parent = parent - return s -} - -// Refresh the index after performing the operation. -func (s *DeleteService) Refresh(refresh bool) *DeleteService { - s.refresh = &refresh - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *DeleteService) Pretty(pretty bool) *DeleteService { - s.pretty = pretty - return s -} - -// buildURL builds the URL for the operation. -func (s *DeleteService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ - "index": s.index, - "type": s.typ, - "id": s.id, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.refresh != nil { - params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) - } - if s.replication != "" { - params.Set("replication", s.replication) - } - if s.routing != "" { - params.Set("routing", s.routing) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.version != nil { - params.Set("version", fmt.Sprintf("%v", s.version)) - } - if s.versionType != "" { - params.Set("version_type", s.versionType) - } - if s.consistency != "" { - params.Set("consistency", s.consistency) - } - if s.parent != "" { - params.Set("parent", s.parent) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *DeleteService) Validate() error { - var invalid []string - if s.typ == "" { - invalid = append(invalid, "Type") - } - if s.id == "" { - invalid = append(invalid, "Id") - } - if s.index == "" { - invalid = append(invalid, "Index") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *DeleteService) Do() (*DeleteResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest("DELETE", path, params, nil) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(DeleteResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// -- Result of a delete request. - -// DeleteResponse is the outcome of running DeleteService.Do. -type DeleteResponse struct { - // TODO _shards { total, failed, successful } - Found bool `json:"found"` - Index string `json:"_index"` - Type string `json:"_type"` - Id string `json:"_id"` - Version int64 `json:"_version"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/delete_by_query.go b/services/templeton/vendor/src/github.com/olivere/elastic/delete_by_query.go deleted file mode 100644 index 3db9c0ce8..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/delete_by_query.go +++ /dev/null @@ -1,302 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// DeleteByQueryService deletes documents that match a query. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/docs-delete-by-query.html. -type DeleteByQueryService struct { - client *Client - indices []string - types []string - analyzer string - consistency string - defaultOper string - df string - ignoreUnavailable *bool - allowNoIndices *bool - expandWildcards string - replication string - routing string - timeout string - pretty bool - q string - query Query -} - -// NewDeleteByQueryService creates a new DeleteByQueryService. -// You typically use the client's DeleteByQuery to get a reference to -// the service. -func NewDeleteByQueryService(client *Client) *DeleteByQueryService { - builder := &DeleteByQueryService{ - client: client, - } - return builder -} - -// Index sets the indices on which to perform the delete operation. -func (s *DeleteByQueryService) Index(indices ...string) *DeleteByQueryService { - if s.indices == nil { - s.indices = make([]string, 0) - } - s.indices = append(s.indices, indices...) - return s -} - -// Type limits the delete operation to the given types. -func (s *DeleteByQueryService) Type(types ...string) *DeleteByQueryService { - if s.types == nil { - s.types = make([]string, 0) - } - s.types = append(s.types, types...) - return s -} - -// Analyzer to use for the query string. -func (s *DeleteByQueryService) Analyzer(analyzer string) *DeleteByQueryService { - s.analyzer = analyzer - return s -} - -// Consistency represents the specific write consistency setting for the operation. -// It can be one, quorum, or all. -func (s *DeleteByQueryService) Consistency(consistency string) *DeleteByQueryService { - s.consistency = consistency - return s -} - -// DefaultOperator for query string query (AND or OR). -func (s *DeleteByQueryService) DefaultOperator(defaultOperator string) *DeleteByQueryService { - s.defaultOper = defaultOperator - return s -} - -// DF is the field to use as default where no field prefix is given in the query string. -func (s *DeleteByQueryService) DF(defaultField string) *DeleteByQueryService { - s.df = defaultField - return s -} - -// DefaultField is the field to use as default where no field prefix is given in the query string. -// It is an alias to the DF func. -func (s *DeleteByQueryService) DefaultField(defaultField string) *DeleteByQueryService { - s.df = defaultField - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *DeleteByQueryService) IgnoreUnavailable(ignore bool) *DeleteByQueryService { - s.ignoreUnavailable = &ignore - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices (including the _all string -// or when no indices have been specified). -func (s *DeleteByQueryService) AllowNoIndices(allow bool) *DeleteByQueryService { - s.allowNoIndices = &allow - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both. It can be "open" or "closed". -func (s *DeleteByQueryService) ExpandWildcards(expand string) *DeleteByQueryService { - s.expandWildcards = expand - return s -} - -// Replication sets a specific replication type (sync or async). -func (s *DeleteByQueryService) Replication(replication string) *DeleteByQueryService { - s.replication = replication - return s -} - -// Q specifies the query in Lucene query string syntax. You can also use -// Query to programmatically specify the query. -func (s *DeleteByQueryService) Q(query string) *DeleteByQueryService { - s.q = query - return s -} - -// QueryString is an alias to Q. Notice that you can also use Query to -// programmatically set the query. -func (s *DeleteByQueryService) QueryString(query string) *DeleteByQueryService { - s.q = query - return s -} - -// Routing sets a specific routing value. -func (s *DeleteByQueryService) Routing(routing string) *DeleteByQueryService { - s.routing = routing - return s -} - -// Timeout sets an explicit operation timeout, e.g. "1s" or "10000ms". -func (s *DeleteByQueryService) Timeout(timeout string) *DeleteByQueryService { - s.timeout = timeout - return s -} - -// Pretty indents the JSON output from Elasticsearch. -func (s *DeleteByQueryService) Pretty(pretty bool) *DeleteByQueryService { - s.pretty = pretty - return s -} - -// Query sets the query programmatically. -func (s *DeleteByQueryService) Query(query Query) *DeleteByQueryService { - s.query = query - return s -} - -// Do executes the delete-by-query operation. -func (s *DeleteByQueryService) Do() (*DeleteByQueryResult, error) { - var err error - - // Build url - path := "/" - - // Indices part - indexPart := make([]string, 0) - for _, index := range s.indices { - index, err = uritemplates.Expand("{index}", map[string]string{ - "index": index, - }) - if err != nil { - return nil, err - } - indexPart = append(indexPart, index) - } - if len(indexPart) > 0 { - path += strings.Join(indexPart, ",") - } - - // Types part - typesPart := make([]string, 0) - for _, typ := range s.types { - typ, err = uritemplates.Expand("{type}", map[string]string{ - "type": typ, - }) - if err != nil { - return nil, err - } - typesPart = append(typesPart, typ) - } - if len(typesPart) > 0 { - path += "/" + strings.Join(typesPart, ",") - } - - // Search - path += "/_query" - - // Parameters - params := make(url.Values) - if s.analyzer != "" { - params.Set("analyzer", s.analyzer) - } - if s.consistency != "" { - params.Set("consistency", s.consistency) - } - if s.defaultOper != "" { - params.Set("default_operator", s.defaultOper) - } - if s.df != "" { - params.Set("df", s.df) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.replication != "" { - params.Set("replication", s.replication) - } - if s.routing != "" { - params.Set("routing", s.routing) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.pretty { - params.Set("pretty", fmt.Sprintf("%v", s.pretty)) - } - if s.q != "" { - params.Set("q", s.q) - } - - // Set body if there is a query set - var body interface{} - if s.query != nil { - src, err := s.query.Source() - if err != nil { - return nil, err - } - query := make(map[string]interface{}) - query["query"] = src - body = query - } - - // Get response - res, err := s.client.PerformRequest("DELETE", path, params, body) - if err != nil { - return nil, err - } - - // Return result - ret := new(DeleteByQueryResult) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// DeleteByQueryResult is the outcome of executing Do with DeleteByQueryService. -type DeleteByQueryResult struct { - Took int64 `json:"took"` - TimedOut bool `json:"timed_out"` - Indices map[string]IndexDeleteByQueryResult `json:"_indices"` - Failures []shardOperationFailure `json:"failures"` -} - -// IndexNames returns the names of the indices the DeleteByQuery touched. -func (res DeleteByQueryResult) IndexNames() []string { - var indices []string - for index, _ := range res.Indices { - indices = append(indices, index) - } - return indices -} - -// All returns the index delete-by-query result of all indices. -func (res DeleteByQueryResult) All() IndexDeleteByQueryResult { - all, _ := res.Indices["_all"] - return all -} - -// IndexDeleteByQueryResult is the result of a delete-by-query for a specific -// index. -type IndexDeleteByQueryResult struct { - // Found documents, matching the query. - Found int `json:"found"` - // Deleted documents, successfully, from the given index. - Deleted int `json:"deleted"` - // Missing documents when trying to delete them. - Missing int `json:"missing"` - // Failed documents to be deleted for the given index. - Failed int `json:"failed"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/delete_by_query_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/delete_by_query_test.go deleted file mode 100644 index 71b786f6e..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/delete_by_query_test.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "testing" - -func TestDeleteByQuery(t *testing.T) { - client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) - - found, err := client.HasPlugin("delete-by-query") - if err != nil { - t.Fatal(err) - } - if !found { - t.Skip("DeleteByQuery in 2.0 is now a plugin (delete-by-query) and must be " + - "loaded in the configuration") - } - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} - - // Add all documents - _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Count documents - count, err := client.Count(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if count != 3 { - t.Fatalf("expected count = %d; got: %d", 3, count) - } - - // Delete all documents by sandrae - q := NewTermQuery("user", "sandrae") - res, err := client.DeleteByQuery().Index(testIndexName).Type("tweet").Query(q).Do() - if err != nil { - t.Fatal(err) - } - if res == nil { - t.Fatalf("expected response != nil; got: %v", res) - } - - // Check response - if got, want := len(res.IndexNames()), 2; got != want { - t.Fatalf("expected %d indices; got: %d", want, got) - } - idx, found := res.Indices["_all"] - if !found { - t.Fatalf("expected to find index %q", "_all") - } - if got, want := idx.Found, 1; got != want { - t.Fatalf("expected Found = %v; got: %v", want, got) - } - if got, want := idx.Deleted, 1; got != want { - t.Fatalf("expected Deleted = %v; got: %v", want, got) - } - if got, want := idx.Missing, 0; got != want { - t.Fatalf("expected Missing = %v; got: %v", want, got) - } - if got, want := idx.Failed, 0; got != want { - t.Fatalf("expected Failed = %v; got: %v", want, got) - } - idx, found = res.Indices[testIndexName] - if !found { - t.Errorf("expected Found = true; got: %v", found) - } - if got, want := idx.Found, 1; got != want { - t.Fatalf("expected Found = %v; got: %v", want, got) - } - if got, want := idx.Deleted, 1; got != want { - t.Fatalf("expected Deleted = %v; got: %v", want, got) - } - if got, want := idx.Missing, 0; got != want { - t.Fatalf("expected Missing = %v; got: %v", want, got) - } - if got, want := idx.Failed, 0; got != want { - t.Fatalf("expected Failed = %v; got: %v", want, got) - } - - // Flush and check count - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - count, err = client.Count(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if count != 2 { - t.Fatalf("expected Count = %d; got: %d", 2, count) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/delete_template.go b/services/templeton/vendor/src/github.com/olivere/elastic/delete_template.go deleted file mode 100644 index b8d0223f6..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/delete_template.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// DeleteTemplateService deletes a search template. More information can -// be found at http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html. -type DeleteTemplateService struct { - client *Client - pretty bool - id string - version *int - versionType string -} - -// NewDeleteTemplateService creates a new DeleteTemplateService. -func NewDeleteTemplateService(client *Client) *DeleteTemplateService { - return &DeleteTemplateService{ - client: client, - } -} - -// Id is the template ID. -func (s *DeleteTemplateService) Id(id string) *DeleteTemplateService { - s.id = id - return s -} - -// Version an explicit version number for concurrency control. -func (s *DeleteTemplateService) Version(version int) *DeleteTemplateService { - s.version = &version - return s -} - -// VersionType specifies a version type. -func (s *DeleteTemplateService) VersionType(versionType string) *DeleteTemplateService { - s.versionType = versionType - return s -} - -// buildURL builds the URL for the operation. -func (s *DeleteTemplateService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{ - "id": s.id, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.version != nil { - params.Set("version", fmt.Sprintf("%d", *s.version)) - } - if s.versionType != "" { - params.Set("version_type", s.versionType) - } - - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *DeleteTemplateService) Validate() error { - var invalid []string - if s.id == "" { - invalid = append(invalid, "Id") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *DeleteTemplateService) Do() (*DeleteTemplateResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest("DELETE", path, params, nil) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(DeleteTemplateResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// DeleteTemplateResponse is the response of DeleteTemplateService.Do. -type DeleteTemplateResponse struct { - Found bool `json:"found"` - Index string `json:"_index"` - Type string `json:"_type"` - Id string `json:"_id"` - Version int `json:"_version"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/delete_template_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/delete_template_test.go deleted file mode 100644 index 85bb7ad55..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/delete_template_test.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "testing" -) - -func TestDeleteTemplateValidate(t *testing.T) { - client := setupTestClient(t) - - // No template id -> fail with error - res, err := NewDeleteTemplateService(client).Do() - if err == nil { - t.Fatalf("expected Delete to fail without index name") - } - if res != nil { - t.Fatalf("expected result to be == nil; got: %v", res) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/delete_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/delete_test.go deleted file mode 100644 index 418fdec7d..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/delete_test.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "testing" -) - -func TestDelete(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Count documents - count, err := client.Count(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if count != 3 { - t.Errorf("expected Count = %d; got %d", 3, count) - } - - // Delete document 1 - res, err := client.Delete().Index(testIndexName).Type("tweet").Id("1").Do() - if err != nil { - t.Fatal(err) - } - if res.Found != true { - t.Errorf("expected Found = true; got %v", res.Found) - } - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - count, err = client.Count(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if count != 2 { - t.Errorf("expected Count = %d; got %d", 2, count) - } - - // Delete non existent document 99 - res, err = client.Delete().Index(testIndexName).Type("tweet").Id("99").Refresh(true).Do() - if err == nil { - t.Fatalf("expected error; got: %v", err) - } - if !IsNotFound(err) { - t.Errorf("expected NotFound error; got %v", err) - } - if res != nil { - t.Fatalf("expected no response; got: %v", res) - } - - count, err = client.Count(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if count != 2 { - t.Errorf("expected Count = %d; got %d", 2, count) - } -} - -func TestDeleteValidate(t *testing.T) { - client := setupTestClientAndCreateIndexAndAddDocs(t) - - // No index name -> fail with error - res, err := NewDeleteService(client).Type("tweet").Id("1").Do() - if err == nil { - t.Fatalf("expected Delete to fail without index name") - } - if res != nil { - t.Fatalf("expected result to be == nil; got: %v", res) - } - - // No type -> fail with error - res, err = NewDeleteService(client).Index(testIndexName).Id("1").Do() - if err == nil { - t.Fatalf("expected Delete to fail without type") - } - if res != nil { - t.Fatalf("expected result to be == nil; got: %v", res) - } - - // No id -> fail with error - res, err = NewDeleteService(client).Index(testIndexName).Type("tweet").Do() - if err == nil { - t.Fatalf("expected Delete to fail without id") - } - if res != nil { - t.Fatalf("expected result to be == nil; got: %v", res) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/doc.go b/services/templeton/vendor/src/github.com/olivere/elastic/doc.go deleted file mode 100644 index 336a734de..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/doc.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -/* -Package elastic provides an interface to the Elasticsearch server -(http://www.elasticsearch.org/). - -The first thing you do is to create a Client. If you have Elasticsearch -installed and running with its default settings -(i.e. available at http://127.0.0.1:9200), all you need to do is: - - client, err := elastic.NewClient() - if err != nil { - // Handle error - } - -If your Elasticsearch server is running on a different IP and/or port, -just provide a URL to NewClient: - - // Create a client and connect to http://192.168.2.10:9201 - client, err := elastic.NewClient(elastic.SetURL("http://192.168.2.10:9201")) - if err != nil { - // Handle error - } - -You can pass many more configuration parameters to NewClient. Review the -documentation of NewClient for more information. - -If no Elasticsearch server is available, services will fail when creating -a new request and will return ErrNoClient. - -A Client provides services. The services usually come with a variety of -methods to prepare the query and a Do function to execute it against the -Elasticsearch REST interface and return a response. Here is an example -of the IndexExists service that checks if a given index already exists. - - exists, err := client.IndexExists("twitter").Do() - if err != nil { - // Handle error - } - if !exists { - // Index does not exist yet. - } - -Look up the documentation for Client to get an idea of the services provided -and what kinds of responses you get when executing the Do function of a service. -Also see the wiki on Github for more details. - -*/ -package elastic diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/errors.go b/services/templeton/vendor/src/github.com/olivere/elastic/errors.go deleted file mode 100644 index 93c2c6de5..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/errors.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" -) - -// checkResponse will return an error if the request/response indicates -// an error returned from Elasticsearch. -// -// HTTP status codes between in the range [200..299] are considered successful. -// All other errors are considered errors except they are specified in -// ignoreErrors. This is necessary because for some services, HTTP status 404 -// is a valid response from Elasticsearch (e.g. the Exists service). -// -// The func tries to parse error details as returned from Elasticsearch -// and encapsulates them in type elastic.Error. -func checkResponse(req *http.Request, res *http.Response, ignoreErrors ...int) error { - // 200-299 are valid status codes - if res.StatusCode >= 200 && res.StatusCode <= 299 { - return nil - } - // Ignore certain errors? - for _, code := range ignoreErrors { - if code == res.StatusCode { - return nil - } - } - return createResponseError(res) -} - -// createResponseError creates an Error structure from the HTTP response, -// its status code and the error information sent by Elasticsearch. -func createResponseError(res *http.Response) error { - if res.Body == nil { - return &Error{Status: res.StatusCode} - } - data, err := ioutil.ReadAll(res.Body) - if err != nil { - return &Error{Status: res.StatusCode} - } - errReply := new(Error) - err = json.Unmarshal(data, errReply) - if err != nil { - return &Error{Status: res.StatusCode} - } - if errReply != nil { - if errReply.Status == 0 { - errReply.Status = res.StatusCode - } - return errReply - } - return &Error{Status: res.StatusCode} -} - -// Error encapsulates error details as returned from Elasticsearch. -type Error struct { - Status int `json:"status"` - Details *ErrorDetails `json:"error,omitempty"` -} - -// ErrorDetails encapsulate error details from Elasticsearch. -// It is used in e.g. elastic.Error and elastic.BulkResponseItem. -type ErrorDetails struct { - Type string `json:"type"` - Reason string `json:"reason"` - ResourceType string `json:"resource.type,omitempty"` - ResourceId string `json:"resource.id,omitempty"` - Index string `json:"index,omitempty"` - Phase string `json:"phase,omitempty"` - Grouped bool `json:"grouped,omitempty"` - CausedBy map[string]interface{} `json:"caused_by,omitempty"` - RootCause []*ErrorDetails `json:"root_cause,omitempty"` - FailedShards []map[string]interface{} `json:"failed_shards,omitempty"` -} - -// Error returns a string representation of the error. -func (e *Error) Error() string { - if e.Details != nil && e.Details.Reason != "" { - return fmt.Sprintf("elastic: Error %d (%s): %s [type=%s]", e.Status, http.StatusText(e.Status), e.Details.Reason, e.Details.Type) - } else { - return fmt.Sprintf("elastic: Error %d (%s)", e.Status, http.StatusText(e.Status)) - } -} - -// IsNotFound returns true if the given error indicates that Elasticsearch -// returned HTTP status 404. The err parameter can be of type *elastic.Error, -// elastic.Error, *http.Response or int (indicating the HTTP status code). -func IsNotFound(err interface{}) bool { - switch e := err.(type) { - case *http.Response: - return e.StatusCode == http.StatusNotFound - case *Error: - return e.Status == http.StatusNotFound - case Error: - return e.Status == http.StatusNotFound - case int: - return e == http.StatusNotFound - } - return false -} - -// IsTimeout returns true if the given error indicates that Elasticsearch -// returned HTTP status 408. The err parameter can be of type *elastic.Error, -// elastic.Error, *http.Response or int (indicating the HTTP status code). -func IsTimeout(err interface{}) bool { - switch e := err.(type) { - case *http.Response: - return e.StatusCode == http.StatusRequestTimeout - case *Error: - return e.Status == http.StatusRequestTimeout - case Error: - return e.Status == http.StatusRequestTimeout - case int: - return e == http.StatusRequestTimeout - } - return false -} - -// -- General errors -- - -// shardsInfo represents information from a shard. -type shardsInfo struct { - Total int `json:"total"` - Successful int `json:"successful"` - Failed int `json:"failed"` -} - -// shardOperationFailure represents a shard failure. -type shardOperationFailure struct { - Shard int `json:"shard"` - Index string `json:"index"` - Status string `json:"status"` - // "reason" -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/errors_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/errors_test.go deleted file mode 100644 index c33dc2d6d..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/errors_test.go +++ /dev/null @@ -1,202 +0,0 @@ -package elastic - -import ( - "bufio" - "fmt" - "net/http" - "strings" - "testing" -) - -func TestResponseError(t *testing.T) { - raw := "HTTP/1.1 404 Not Found\r\n" + - "\r\n" + - `{"error":{"root_cause":[{"type":"index_missing_exception","reason":"no such index","index":"elastic-test"}],"type":"index_missing_exception","reason":"no such index","index":"elastic-test"},"status":404}` + "\r\n" - r := bufio.NewReader(strings.NewReader(raw)) - - req, err := http.NewRequest("GET", "/", nil) - if err != nil { - t.Fatal(err) - } - - resp, err := http.ReadResponse(r, nil) - if err != nil { - t.Fatal(err) - } - err = checkResponse(req, resp) - if err == nil { - t.Fatalf("expected error; got: %v", err) - } - - // Check for correct error message - expected := fmt.Sprintf("elastic: Error %d (%s): no such index [type=index_missing_exception]", resp.StatusCode, http.StatusText(resp.StatusCode)) - got := err.Error() - if got != expected { - t.Fatalf("expected %q; got: %q", expected, got) - } - - // Check that error is of type *elastic.Error, which contains additional information - e, ok := err.(*Error) - if !ok { - t.Fatal("expected error to be of type *elastic.Error") - } - if e.Status != resp.StatusCode { - t.Fatalf("expected status code %d; got: %d", resp.StatusCode, e.Status) - } - if e.Details == nil { - t.Fatalf("expected error details; got: %v", e.Details) - } - if got, want := e.Details.Index, "elastic-test"; got != want { - t.Fatalf("expected error details index %q; got: %q", want, got) - } - if got, want := e.Details.Type, "index_missing_exception"; got != want { - t.Fatalf("expected error details type %q; got: %q", want, got) - } - if got, want := e.Details.Reason, "no such index"; got != want { - t.Fatalf("expected error details reason %q; got: %q", want, got) - } - if got, want := len(e.Details.RootCause), 1; got != want { - t.Fatalf("expected %d error details root causes; got: %d", want, got) - } - - if got, want := e.Details.RootCause[0].Index, "elastic-test"; got != want { - t.Fatalf("expected root cause index %q; got: %q", want, got) - } - if got, want := e.Details.RootCause[0].Type, "index_missing_exception"; got != want { - t.Fatalf("expected root cause type %q; got: %q", want, got) - } - if got, want := e.Details.RootCause[0].Reason, "no such index"; got != want { - t.Fatalf("expected root cause reason %q; got: %q", want, got) - } -} - -func TestResponseErrorHTML(t *testing.T) { - raw := "HTTP/1.1 413 Request Entity Too Large\r\n" + - "\r\n" + - ` -413 Request Entity Too Large - -

413 Request Entity Too Large

-
nginx/1.6.2
- -` + "\r\n" - r := bufio.NewReader(strings.NewReader(raw)) - - req, err := http.NewRequest("GET", "/", nil) - if err != nil { - t.Fatal(err) - } - - resp, err := http.ReadResponse(r, nil) - if err != nil { - t.Fatal(err) - } - err = checkResponse(req, resp) - if err == nil { - t.Fatalf("expected error; got: %v", err) - } - - // Check for correct error message - expected := fmt.Sprintf("elastic: Error %d (%s)", http.StatusRequestEntityTooLarge, http.StatusText(http.StatusRequestEntityTooLarge)) - got := err.Error() - if got != expected { - t.Fatalf("expected %q; got: %q", expected, got) - } -} - -func TestResponseErrorWithIgnore(t *testing.T) { - raw := "HTTP/1.1 404 Not Found\r\n" + - "\r\n" + - `{"some":"response"}` + "\r\n" - r := bufio.NewReader(strings.NewReader(raw)) - - req, err := http.NewRequest("HEAD", "/", nil) - if err != nil { - t.Fatal(err) - } - - resp, err := http.ReadResponse(r, nil) - if err != nil { - t.Fatal(err) - } - err = checkResponse(req, resp) - if err == nil { - t.Fatalf("expected error; got: %v", err) - } - err = checkResponse(req, resp, 404) // ignore 404 errors - if err != nil { - t.Fatalf("expected no error; got: %v", err) - } -} - -func TestIsNotFound(t *testing.T) { - if got, want := IsNotFound(nil), false; got != want { - t.Errorf("expected %v; got: %v", want, got) - } - if got, want := IsNotFound(""), false; got != want { - t.Errorf("expected %v; got: %v", want, got) - } - if got, want := IsNotFound(200), false; got != want { - t.Errorf("expected %v; got: %v", want, got) - } - if got, want := IsNotFound(404), true; got != want { - t.Errorf("expected %v; got: %v", want, got) - } - - if got, want := IsNotFound(&Error{Status: 404}), true; got != want { - t.Errorf("expected %v; got: %v", want, got) - } - if got, want := IsNotFound(&Error{Status: 200}), false; got != want { - t.Errorf("expected %v; got: %v", want, got) - } - - if got, want := IsNotFound(Error{Status: 404}), true; got != want { - t.Errorf("expected %v; got: %v", want, got) - } - if got, want := IsNotFound(Error{Status: 200}), false; got != want { - t.Errorf("expected %v; got: %v", want, got) - } - - if got, want := IsNotFound(&http.Response{StatusCode: 404}), true; got != want { - t.Errorf("expected %v; got: %v", want, got) - } - if got, want := IsNotFound(&http.Response{StatusCode: 200}), false; got != want { - t.Errorf("expected %v; got: %v", want, got) - } -} - -func TestIsTimeout(t *testing.T) { - if got, want := IsTimeout(nil), false; got != want { - t.Errorf("expected %v; got: %v", want, got) - } - if got, want := IsTimeout(""), false; got != want { - t.Errorf("expected %v; got: %v", want, got) - } - if got, want := IsTimeout(200), false; got != want { - t.Errorf("expected %v; got: %v", want, got) - } - if got, want := IsTimeout(408), true; got != want { - t.Errorf("expected %v; got: %v", want, got) - } - - if got, want := IsTimeout(&Error{Status: 408}), true; got != want { - t.Errorf("expected %v; got: %v", want, got) - } - if got, want := IsTimeout(&Error{Status: 200}), false; got != want { - t.Errorf("expected %v; got: %v", want, got) - } - - if got, want := IsTimeout(Error{Status: 408}), true; got != want { - t.Errorf("expected %v; got: %v", want, got) - } - if got, want := IsTimeout(Error{Status: 200}), false; got != want { - t.Errorf("expected %v; got: %v", want, got) - } - - if got, want := IsTimeout(&http.Response{StatusCode: 408}), true; got != want { - t.Errorf("expected %v; got: %v", want, got) - } - if got, want := IsTimeout(&http.Response{StatusCode: 200}), false; got != want { - t.Errorf("expected %v; got: %v", want, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/example_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/example_test.go deleted file mode 100644 index 8fc03ec1a..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/example_test.go +++ /dev/null @@ -1,547 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic_test - -import ( - "encoding/json" - "fmt" - "log" - "os" - "reflect" - "time" - - "gopkg.in/olivere/elastic.v3" -) - -type Tweet struct { - User string `json:"user"` - Message string `json:"message"` - Retweets int `json:"retweets"` - Image string `json:"image,omitempty"` - Created time.Time `json:"created,omitempty"` - Tags []string `json:"tags,omitempty"` - Location string `json:"location,omitempty"` - Suggest *elastic.SuggestField `json:"suggest_field,omitempty"` -} - -func Example() { - errorlog := log.New(os.Stdout, "APP ", log.LstdFlags) - - // Obtain a client. You can provide your own HTTP client here. - client, err := elastic.NewClient(elastic.SetErrorLog(errorlog)) - if err != nil { - // Handle error - panic(err) - } - - // Trace request and response details like this - //client.SetTracer(log.New(os.Stdout, "", 0)) - - // Ping the Elasticsearch server to get e.g. the version number - info, code, err := client.Ping("http://127.0.0.1:9200").Do() - if err != nil { - // Handle error - panic(err) - } - fmt.Printf("Elasticsearch returned with code %d and version %s", code, info.Version.Number) - - // Getting the ES version number is quite common, so there's a shortcut - esversion, err := client.ElasticsearchVersion("http://127.0.0.1:9200") - if err != nil { - // Handle error - panic(err) - } - fmt.Printf("Elasticsearch version %s", esversion) - - // Use the IndexExists service to check if a specified index exists. - exists, err := client.IndexExists("twitter").Do() - if err != nil { - // Handle error - panic(err) - } - if !exists { - // Create a new index. - createIndex, err := client.CreateIndex("twitter").Do() - if err != nil { - // Handle error - panic(err) - } - if !createIndex.Acknowledged { - // Not acknowledged - } - } - - // Index a tweet (using JSON serialization) - tweet1 := Tweet{User: "olivere", Message: "Take Five", Retweets: 0} - put1, err := client.Index(). - Index("twitter"). - Type("tweet"). - Id("1"). - BodyJson(tweet1). - Do() - if err != nil { - // Handle error - panic(err) - } - fmt.Printf("Indexed tweet %s to index %s, type %s\n", put1.Id, put1.Index, put1.Type) - - // Index a second tweet (by string) - tweet2 := `{"user" : "olivere", "message" : "It's a Raggy Waltz"}` - put2, err := client.Index(). - Index("twitter"). - Type("tweet"). - Id("2"). - BodyString(tweet2). - Do() - if err != nil { - // Handle error - panic(err) - } - fmt.Printf("Indexed tweet %s to index %s, type %s\n", put2.Id, put2.Index, put2.Type) - - // Get tweet with specified ID - get1, err := client.Get(). - Index("twitter"). - Type("tweet"). - Id("1"). - Do() - if err != nil { - // Handle error - panic(err) - } - if get1.Found { - fmt.Printf("Got document %s in version %d from index %s, type %s\n", get1.Id, get1.Version, get1.Index, get1.Type) - } - - // Flush to make sure the documents got written. - _, err = client.Flush().Index("twitter").Do() - if err != nil { - panic(err) - } - - // Search with a term query - termQuery := elastic.NewTermQuery("user", "olivere") - searchResult, err := client.Search(). - Index("twitter"). // search in index "twitter" - Query(termQuery). // specify the query - Sort("user", true). // sort by "user" field, ascending - From(0).Size(10). // take documents 0-9 - Pretty(true). // pretty print request and response JSON - Do() // execute - if err != nil { - // Handle error - panic(err) - } - - // searchResult is of type SearchResult and returns hits, suggestions, - // and all kinds of other information from Elasticsearch. - fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis) - - // Each is a convenience function that iterates over hits in a search result. - // It makes sure you don't need to check for nil values in the response. - // However, it ignores errors in serialization. If you want full control - // over iterating the hits, see below. - var ttyp Tweet - for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) { - t := item.(Tweet) - fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) - } - // TotalHits is another convenience function that works even when something goes wrong. - fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits()) - - // Here's how you iterate through results with full control over each step. - if searchResult.Hits != nil { - fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits) - - // Iterate through results - for _, hit := range searchResult.Hits.Hits { - // hit.Index contains the name of the index - - // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}). - var t Tweet - err := json.Unmarshal(*hit.Source, &t) - if err != nil { - // Deserialization failed - } - - // Work with tweet - fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) - } - } else { - // No hits - fmt.Print("Found no tweets\n") - } - - // Update a tweet by the update API of Elasticsearch. - // We just increment the number of retweets. - script := elastic.NewScript("ctx._source.retweets += num").Param("num", 1) - update, err := client.Update().Index("twitter").Type("tweet").Id("1"). - Script(script). - Upsert(map[string]interface{}{"retweets": 0}). - Do() - if err != nil { - // Handle error - panic(err) - } - fmt.Printf("New version of tweet %q is now %d", update.Id, update.Version) - - // ... - - // Delete an index. - deleteIndex, err := client.DeleteIndex("twitter").Do() - if err != nil { - // Handle error - panic(err) - } - if !deleteIndex.Acknowledged { - // Not acknowledged - } -} - -func ExampleClient_NewClient_default() { - // Obtain a client to the Elasticsearch instance on http://127.0.0.1:9200. - client, err := elastic.NewClient() - if err != nil { - // Handle error - fmt.Printf("connection failed: %v\n", err) - } else { - fmt.Println("connected") - } - _ = client - // Output: - // connected -} - -func ExampleClient_NewClient_cluster() { - // Obtain a client for an Elasticsearch cluster of two nodes, - // running on 10.0.1.1 and 10.0.1.2. - client, err := elastic.NewClient(elastic.SetURL("http://10.0.1.1:9200", "http://10.0.1.2:9200")) - if err != nil { - // Handle error - panic(err) - } - _ = client -} - -func ExampleClient_NewClient_manyOptions() { - // Obtain a client for an Elasticsearch cluster of two nodes, - // running on 10.0.1.1 and 10.0.1.2. Do not run the sniffer. - // Set the healthcheck interval to 10s. When requests fail, - // retry 5 times. Print error messages to os.Stderr and informational - // messages to os.Stdout. - client, err := elastic.NewClient( - elastic.SetURL("http://10.0.1.1:9200", "http://10.0.1.2:9200"), - elastic.SetSniff(false), - elastic.SetHealthcheckInterval(10*time.Second), - elastic.SetMaxRetries(5), - elastic.SetErrorLog(log.New(os.Stderr, "ELASTIC ", log.LstdFlags)), - elastic.SetInfoLog(log.New(os.Stdout, "", log.LstdFlags))) - if err != nil { - // Handle error - panic(err) - } - _ = client -} - -func ExampleIndexExistsService() { - // Get a client to the local Elasticsearch instance. - client, err := elastic.NewClient() - if err != nil { - // Handle error - panic(err) - } - // Use the IndexExists service to check if the index "twitter" exists. - exists, err := client.IndexExists("twitter").Do() - if err != nil { - // Handle error - panic(err) - } - if exists { - // ... - } -} - -func ExampleCreateIndexService() { - // Get a client to the local Elasticsearch instance. - client, err := elastic.NewClient() - if err != nil { - // Handle error - panic(err) - } - // Create a new index. - createIndex, err := client.CreateIndex("twitter").Do() - if err != nil { - // Handle error - panic(err) - } - if !createIndex.Acknowledged { - // Not acknowledged - } -} - -func ExampleDeleteIndexService() { - // Get a client to the local Elasticsearch instance. - client, err := elastic.NewClient() - if err != nil { - // Handle error - panic(err) - } - // Delete an index. - deleteIndex, err := client.DeleteIndex("twitter").Do() - if err != nil { - // Handle error - panic(err) - } - if !deleteIndex.Acknowledged { - // Not acknowledged - } -} - -func ExampleSearchService() { - // Get a client to the local Elasticsearch instance. - client, err := elastic.NewClient() - if err != nil { - // Handle error - panic(err) - } - - // Search with a term query - termQuery := elastic.NewTermQuery("user", "olivere") - searchResult, err := client.Search(). - Index("twitter"). // search in index "twitter" - Query(termQuery). // specify the query - Sort("user", true). // sort by "user" field, ascending - From(0).Size(10). // take documents 0-9 - Pretty(true). // pretty print request and response JSON - Do() // execute - if err != nil { - // Handle error - panic(err) - } - - // searchResult is of type SearchResult and returns hits, suggestions, - // and all kinds of other information from Elasticsearch. - fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis) - - // Number of hits - if searchResult.Hits != nil { - fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits) - - // Iterate through results - for _, hit := range searchResult.Hits.Hits { - // hit.Index contains the name of the index - - // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}). - var t Tweet - err := json.Unmarshal(*hit.Source, &t) - if err != nil { - // Deserialization failed - } - - // Work with tweet - fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) - } - } else { - // No hits - fmt.Print("Found no tweets\n") - } -} - -func ExampleAggregations() { - // Get a client to the local Elasticsearch instance. - client, err := elastic.NewClient() - if err != nil { - // Handle error - panic(err) - } - - // Create an aggregation for users and a sub-aggregation for a date histogram of tweets (per year). - timeline := elastic.NewTermsAggregation().Field("user").Size(10).OrderByCountDesc() - histogram := elastic.NewDateHistogramAggregation().Field("created").Interval("year") - timeline = timeline.SubAggregation("history", histogram) - - // Search with a term query - searchResult, err := client.Search(). - Index("twitter"). // search in index "twitter" - Query(elastic.NewMatchAllQuery()). // return all results, but ... - SearchType("count"). // ... do not return hits, just the count - Aggregation("timeline", timeline). // add our aggregation to the query - Pretty(true). // pretty print request and response JSON - Do() // execute - if err != nil { - // Handle error - panic(err) - } - - // Access "timeline" aggregate in search result. - agg, found := searchResult.Aggregations.Terms("timeline") - if !found { - log.Fatalf("we sould have a terms aggregation called %q", "timeline") - } - for _, userBucket := range agg.Buckets { - // Every bucket should have the user field as key. - user := userBucket.Key - - // The sub-aggregation history should have the number of tweets per year. - histogram, found := userBucket.DateHistogram("history") - if found { - for _, year := range histogram.Buckets { - fmt.Printf("user %q has %d tweets in %q\n", user, year.DocCount, year.KeyAsString) - } - } - } -} - -func ExampleSearchResult() { - client, err := elastic.NewClient() - if err != nil { - panic(err) - } - - // Do a search - searchResult, err := client.Search().Index("twitter").Query(elastic.NewMatchAllQuery()).Do() - if err != nil { - panic(err) - } - - // searchResult is of type SearchResult and returns hits, suggestions, - // and all kinds of other information from Elasticsearch. - fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis) - - // Each is a utility function that iterates over hits in a search result. - // It makes sure you don't need to check for nil values in the response. - // However, it ignores errors in serialization. If you want full control - // over iterating the hits, see below. - var ttyp Tweet - for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) { - t := item.(Tweet) - fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) - } - fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits()) - - // Here's how you iterate hits with full control. - if searchResult.Hits != nil { - fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits) - - // Iterate through results - for _, hit := range searchResult.Hits.Hits { - // hit.Index contains the name of the index - - // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}). - var t Tweet - err := json.Unmarshal(*hit.Source, &t) - if err != nil { - // Deserialization failed - } - - // Work with tweet - fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) - } - } else { - // No hits - fmt.Print("Found no tweets\n") - } -} - -func ExamplePutTemplateService() { - client, err := elastic.NewClient() - if err != nil { - panic(err) - } - - // Create search template - tmpl := `{"template":{"query":{"match":{"title":"{{query_string}}"}}}}` - - // Create template - resp, err := client.PutTemplate(). - Id("my-search-template"). // Name of the template - BodyString(tmpl). // Search template itself - Do() // Execute - if err != nil { - panic(err) - } - if resp.Created { - fmt.Println("search template created") - } -} - -func ExampleGetTemplateService() { - client, err := elastic.NewClient() - if err != nil { - panic(err) - } - - // Get template stored under "my-search-template" - resp, err := client.GetTemplate().Id("my-search-template").Do() - if err != nil { - panic(err) - } - fmt.Printf("search template is: %q\n", resp.Template) -} - -func ExampleDeleteTemplateService() { - client, err := elastic.NewClient() - if err != nil { - panic(err) - } - - // Delete template - resp, err := client.DeleteTemplate().Id("my-search-template").Do() - if err != nil { - panic(err) - } - if resp != nil && resp.Found { - fmt.Println("template deleted") - } -} - -func ExampleClusterHealthService() { - client, err := elastic.NewClient() - if err != nil { - panic(err) - } - - // Get cluster health - res, err := client.ClusterHealth().Index("twitter").Do() - if err != nil { - panic(err) - } - if res == nil { - panic(err) - } - fmt.Printf("Cluster status is %q\n", res.Status) -} - -func ExampleClusterHealthService_WaitForGreen() { - client, err := elastic.NewClient() - if err != nil { - panic(err) - } - - // Wait for status green - res, err := client.ClusterHealth().WaitForStatus("green").Timeout("15s").Do() - if err != nil { - panic(err) - } - if res.TimedOut { - fmt.Printf("time out waiting for cluster status %q\n", "green") - } else { - fmt.Printf("cluster status is %q\n", res.Status) - } -} - -func ExampleClusterStateService() { - client, err := elastic.NewClient() - if err != nil { - panic(err) - } - - // Get cluster state - res, err := client.ClusterState().Metric("version").Do() - if err != nil { - panic(err) - } - fmt.Printf("Cluster %q has version %d", res.ClusterName, res.Version) -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/exists.go b/services/templeton/vendor/src/github.com/olivere/elastic/exists.go deleted file mode 100644 index 7a42d53c9..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/exists.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "fmt" - "net/http" - "net/url" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// ExistsService checks for the existence of a document using HEAD. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html -// for details. -type ExistsService struct { - client *Client - pretty bool - id string - index string - typ string - preference string - realtime *bool - refresh *bool - routing string - parent string -} - -// NewExistsService creates a new ExistsService. -func NewExistsService(client *Client) *ExistsService { - return &ExistsService{ - client: client, - } -} - -// Id is the document ID. -func (s *ExistsService) Id(id string) *ExistsService { - s.id = id - return s -} - -// Index is the name of the index. -func (s *ExistsService) Index(index string) *ExistsService { - s.index = index - return s -} - -// Type is the type of the document (use `_all` to fetch the first document -// matching the ID across all types). -func (s *ExistsService) Type(typ string) *ExistsService { - s.typ = typ - return s -} - -// Preference specifies the node or shard the operation should be performed on (default: random). -func (s *ExistsService) Preference(preference string) *ExistsService { - s.preference = preference - return s -} - -// Realtime specifies whether to perform the operation in realtime or search mode. -func (s *ExistsService) Realtime(realtime bool) *ExistsService { - s.realtime = &realtime - return s -} - -// Refresh the shard containing the document before performing the operation. -func (s *ExistsService) Refresh(refresh bool) *ExistsService { - s.refresh = &refresh - return s -} - -// Routing is a specific routing value. -func (s *ExistsService) Routing(routing string) *ExistsService { - s.routing = routing - return s -} - -// Parent is the ID of the parent document. -func (s *ExistsService) Parent(parent string) *ExistsService { - s.parent = parent - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *ExistsService) Pretty(pretty bool) *ExistsService { - s.pretty = pretty - return s -} - -// buildURL builds the URL for the operation. -func (s *ExistsService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ - "id": s.id, - "index": s.index, - "type": s.typ, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.realtime != nil { - params.Set("realtime", fmt.Sprintf("%v", *s.realtime)) - } - if s.refresh != nil { - params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) - } - if s.routing != "" { - params.Set("routing", s.routing) - } - if s.parent != "" { - params.Set("parent", s.parent) - } - if s.preference != "" { - params.Set("preference", s.preference) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *ExistsService) Validate() error { - var invalid []string - if s.id == "" { - invalid = append(invalid, "Id") - } - if s.index == "" { - invalid = append(invalid, "Index") - } - if s.typ == "" { - invalid = append(invalid, "Type") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *ExistsService) Do() (bool, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return false, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return false, err - } - - // Get HTTP response - res, err := s.client.PerformRequest("HEAD", path, params, nil, 404) - if err != nil { - return false, err - } - - // Return operation response - switch res.StatusCode { - case http.StatusOK: - return true, nil - case http.StatusNotFound: - return false, nil - default: - return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/exists_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/exists_test.go deleted file mode 100644 index 58a4fe707..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/exists_test.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "testing" -) - -func TestExists(t *testing.T) { - client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) - - exists, err := client.Exists().Index(testIndexName).Type("comment").Id("1").Parent("tweet").Do() - if err != nil { - t.Fatal(err) - } - if !exists { - t.Fatal("expected document to exist") - } -} - -func TestExistsValidate(t *testing.T) { - client := setupTestClient(t) - - // No index -> fail with error - res, err := NewExistsService(client).Type("tweet").Id("1").Do() - if err == nil { - t.Fatalf("expected Delete to fail without index name") - } - if res != false { - t.Fatalf("expected result to be false; got: %v", res) - } - - // No type -> fail with error - res, err = NewExistsService(client).Index(testIndexName).Id("1").Do() - if err == nil { - t.Fatalf("expected Delete to fail without index name") - } - if res != false { - t.Fatalf("expected result to be false; got: %v", res) - } - - // No id -> fail with error - res, err = NewExistsService(client).Index(testIndexName).Type("tweet").Do() - if err == nil { - t.Fatalf("expected Delete to fail without index name") - } - if res != false { - t.Fatalf("expected result to be false; got: %v", res) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/explain.go b/services/templeton/vendor/src/github.com/olivere/elastic/explain.go deleted file mode 100644 index e922bc9b5..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/explain.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "log" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -var ( - _ = fmt.Print - _ = log.Print - _ = strings.Index - _ = uritemplates.Expand - _ = url.Parse -) - -// ExplainService computes a score explanation for a query and -// a specific document. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-explain.html. -type ExplainService struct { - client *Client - pretty bool - id string - index string - typ string - q string - routing string - lenient *bool - analyzer string - df string - fields []string - lowercaseExpandedTerms *bool - xSourceInclude []string - analyzeWildcard *bool - parent string - preference string - xSource []string - defaultOperator string - xSourceExclude []string - source string - bodyJson interface{} - bodyString string -} - -// NewExplainService creates a new ExplainService. -func NewExplainService(client *Client) *ExplainService { - return &ExplainService{ - client: client, - xSource: make([]string, 0), - xSourceExclude: make([]string, 0), - fields: make([]string, 0), - xSourceInclude: make([]string, 0), - } -} - -// Id is the document ID. -func (s *ExplainService) Id(id string) *ExplainService { - s.id = id - return s -} - -// Index is the name of the index. -func (s *ExplainService) Index(index string) *ExplainService { - s.index = index - return s -} - -// Type is the type of the document. -func (s *ExplainService) Type(typ string) *ExplainService { - s.typ = typ - return s -} - -// Source is the URL-encoded query definition (instead of using the request body). -func (s *ExplainService) Source(source string) *ExplainService { - s.source = source - return s -} - -// XSourceExclude is a list of fields to exclude from the returned _source field. -func (s *ExplainService) XSourceExclude(xSourceExclude ...string) *ExplainService { - s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...) - return s -} - -// Lenient specifies whether format-based query failures -// (such as providing text to a numeric field) should be ignored. -func (s *ExplainService) Lenient(lenient bool) *ExplainService { - s.lenient = &lenient - return s -} - -// Query in the Lucene query string syntax. -func (s *ExplainService) Q(q string) *ExplainService { - s.q = q - return s -} - -// Routing sets a specific routing value. -func (s *ExplainService) Routing(routing string) *ExplainService { - s.routing = routing - return s -} - -// AnalyzeWildcard specifies whether wildcards and prefix queries -// in the query string query should be analyzed (default: false). -func (s *ExplainService) AnalyzeWildcard(analyzeWildcard bool) *ExplainService { - s.analyzeWildcard = &analyzeWildcard - return s -} - -// Analyzer is the analyzer for the query string query. -func (s *ExplainService) Analyzer(analyzer string) *ExplainService { - s.analyzer = analyzer - return s -} - -// Df is the default field for query string query (default: _all). -func (s *ExplainService) Df(df string) *ExplainService { - s.df = df - return s -} - -// Fields is a list of fields to return in the response. -func (s *ExplainService) Fields(fields ...string) *ExplainService { - s.fields = append(s.fields, fields...) - return s -} - -// LowercaseExpandedTerms specifies whether query terms should be lowercased. -func (s *ExplainService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *ExplainService { - s.lowercaseExpandedTerms = &lowercaseExpandedTerms - return s -} - -// XSourceInclude is a list of fields to extract and return from the _source field. -func (s *ExplainService) XSourceInclude(xSourceInclude ...string) *ExplainService { - s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...) - return s -} - -// DefaultOperator is the default operator for query string query (AND or OR). -func (s *ExplainService) DefaultOperator(defaultOperator string) *ExplainService { - s.defaultOperator = defaultOperator - return s -} - -// Parent is the ID of the parent document. -func (s *ExplainService) Parent(parent string) *ExplainService { - s.parent = parent - return s -} - -// Preference specifies the node or shard the operation should be performed on (default: random). -func (s *ExplainService) Preference(preference string) *ExplainService { - s.preference = preference - return s -} - -// XSource is true or false to return the _source field or not, or a list of fields to return. -func (s *ExplainService) XSource(xSource ...string) *ExplainService { - s.xSource = append(s.xSource, xSource...) - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *ExplainService) Pretty(pretty bool) *ExplainService { - s.pretty = pretty - return s -} - -// Query sets a query definition using the Query DSL. -func (s *ExplainService) Query(query Query) *ExplainService { - src, err := query.Source() - if err != nil { - // Do nothing in case of an error - return s - } - body := make(map[string]interface{}) - body["query"] = src - s.bodyJson = body - return s -} - -// BodyJson sets the query definition using the Query DSL. -func (s *ExplainService) BodyJson(body interface{}) *ExplainService { - s.bodyJson = body - return s -} - -// BodyString sets the query definition using the Query DSL as a string. -func (s *ExplainService) BodyString(body string) *ExplainService { - s.bodyString = body - return s -} - -// buildURL builds the URL for the operation. -func (s *ExplainService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/{index}/{type}/{id}/_explain", map[string]string{ - "id": s.id, - "index": s.index, - "type": s.typ, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if len(s.xSource) > 0 { - params.Set("_source", strings.Join(s.xSource, ",")) - } - if s.defaultOperator != "" { - params.Set("default_operator", s.defaultOperator) - } - if s.parent != "" { - params.Set("parent", s.parent) - } - if s.preference != "" { - params.Set("preference", s.preference) - } - if s.source != "" { - params.Set("source", s.source) - } - if len(s.xSourceExclude) > 0 { - params.Set("_source_exclude", strings.Join(s.xSourceExclude, ",")) - } - if s.lenient != nil { - params.Set("lenient", fmt.Sprintf("%v", *s.lenient)) - } - if s.q != "" { - params.Set("q", s.q) - } - if s.routing != "" { - params.Set("routing", s.routing) - } - if len(s.fields) > 0 { - params.Set("fields", strings.Join(s.fields, ",")) - } - if s.lowercaseExpandedTerms != nil { - params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms)) - } - if len(s.xSourceInclude) > 0 { - params.Set("_source_include", strings.Join(s.xSourceInclude, ",")) - } - if s.analyzeWildcard != nil { - params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard)) - } - if s.analyzer != "" { - params.Set("analyzer", s.analyzer) - } - if s.df != "" { - params.Set("df", s.df) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *ExplainService) Validate() error { - var invalid []string - if s.index == "" { - invalid = append(invalid, "Index") - } - if s.typ == "" { - invalid = append(invalid, "Type") - } - if s.id == "" { - invalid = append(invalid, "Id") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *ExplainService) Do() (*ExplainResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else { - body = s.bodyString - } - - // Get HTTP response - res, err := s.client.PerformRequest("GET", path, params, body) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(ExplainResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// ExplainResponse is the response of ExplainService.Do. -type ExplainResponse struct { - Index string `json:"_index"` - Type string `json:"_type"` - Id string `json:"_id"` - Matched bool `json:"matched"` - Explanation map[string]interface{} `json:"explanation"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/explain_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/explain_test.go deleted file mode 100644 index e799d6c52..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/explain_test.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "testing" - -func TestExplain(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - - // Add a document - indexResult, err := client.Index(). - Index(testIndexName). - Type("tweet"). - Id("1"). - BodyJson(&tweet1). - Refresh(true). - Do() - if err != nil { - t.Fatal(err) - } - if indexResult == nil { - t.Errorf("expected result to be != nil; got: %v", indexResult) - } - - // Explain - query := NewTermQuery("user", "olivere") - expl, err := client.Explain(testIndexName, "tweet", "1").Query(query).Do() - if err != nil { - t.Fatal(err) - } - if expl == nil { - t.Fatal("expected to return an explanation") - } - if !expl.Matched { - t.Errorf("expected matched to be %v; got: %v", true, expl.Matched) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/fetch_source_context.go b/services/templeton/vendor/src/github.com/olivere/elastic/fetch_source_context.go deleted file mode 100644 index e13c9eb47..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/fetch_source_context.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "net/url" - "strings" -) - -type FetchSourceContext struct { - fetchSource bool - transformSource bool - includes []string - excludes []string -} - -func NewFetchSourceContext(fetchSource bool) *FetchSourceContext { - return &FetchSourceContext{ - fetchSource: fetchSource, - includes: make([]string, 0), - excludes: make([]string, 0), - } -} - -func (fsc *FetchSourceContext) FetchSource() bool { - return fsc.fetchSource -} - -func (fsc *FetchSourceContext) SetFetchSource(fetchSource bool) { - fsc.fetchSource = fetchSource -} - -func (fsc *FetchSourceContext) Include(includes ...string) *FetchSourceContext { - fsc.includes = append(fsc.includes, includes...) - return fsc -} - -func (fsc *FetchSourceContext) Exclude(excludes ...string) *FetchSourceContext { - fsc.excludes = append(fsc.excludes, excludes...) - return fsc -} - -func (fsc *FetchSourceContext) TransformSource(transformSource bool) *FetchSourceContext { - fsc.transformSource = transformSource - return fsc -} - -func (fsc *FetchSourceContext) Source() (interface{}, error) { - if !fsc.fetchSource { - return false, nil - } - return map[string]interface{}{ - "includes": fsc.includes, - "excludes": fsc.excludes, - }, nil -} - -// Query returns the parameters in a form suitable for a URL query string. -func (fsc *FetchSourceContext) Query() url.Values { - params := url.Values{} - if !fsc.fetchSource { - params.Add("_source", "false") - return params - } - if len(fsc.includes) > 0 { - params.Add("_source_include", strings.Join(fsc.includes, ",")) - } - if len(fsc.excludes) > 0 { - params.Add("_source_exclude", strings.Join(fsc.excludes, ",")) - } - return params -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/fetch_source_context_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/fetch_source_context_test.go deleted file mode 100644 index 2bb683d69..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/fetch_source_context_test.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestFetchSourceContextNoFetchSource(t *testing.T) { - builder := NewFetchSourceContext(false) - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `false` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestFetchSourceContextNoFetchSourceIgnoreIncludesAndExcludes(t *testing.T) { - builder := NewFetchSourceContext(false).Include("a", "b").Exclude("c") - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `false` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestFetchSourceContextFetchSource(t *testing.T) { - builder := NewFetchSourceContext(true) - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"excludes":[],"includes":[]}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestFetchSourceContextFetchSourceWithIncludesOnly(t *testing.T) { - builder := NewFetchSourceContext(true).Include("a", "b") - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"excludes":[],"includes":["a","b"]}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestFetchSourceContextFetchSourceWithIncludesAndExcludes(t *testing.T) { - builder := NewFetchSourceContext(true).Include("a", "b").Exclude("c") - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"excludes":["c"],"includes":["a","b"]}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestFetchSourceContextQueryDefaults(t *testing.T) { - builder := NewFetchSourceContext(true) - values := builder.Query() - got := values.Encode() - expected := "" - if got != expected { - t.Errorf("expected %q; got: %q", expected, got) - } -} - -func TestFetchSourceContextQueryNoFetchSource(t *testing.T) { - builder := NewFetchSourceContext(false) - values := builder.Query() - got := values.Encode() - expected := "_source=false" - if got != expected { - t.Errorf("expected %q; got: %q", expected, got) - } -} - -func TestFetchSourceContextQueryFetchSourceWithIncludesAndExcludes(t *testing.T) { - builder := NewFetchSourceContext(true).Include("a", "b").Exclude("c") - values := builder.Query() - got := values.Encode() - expected := "_source_exclude=c&_source_include=a%2Cb" - if got != expected { - t.Errorf("expected %q; got: %q", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/geo_point.go b/services/templeton/vendor/src/github.com/olivere/elastic/geo_point.go deleted file mode 100644 index a09351ca2..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/geo_point.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "fmt" - "strconv" - "strings" -) - -// GeoPoint is a geographic position described via latitude and longitude. -type GeoPoint struct { - Lat float64 `json:"lat"` - Lon float64 `json:"lon"` -} - -// Source returns the object to be serialized in Elasticsearch DSL. -func (pt *GeoPoint) Source() map[string]float64 { - return map[string]float64{ - "lat": pt.Lat, - "lon": pt.Lon, - } -} - -// GeoPointFromLatLon initializes a new GeoPoint by latitude and longitude. -func GeoPointFromLatLon(lat, lon float64) *GeoPoint { - return &GeoPoint{Lat: lat, Lon: lon} -} - -// GeoPointFromString initializes a new GeoPoint by a string that is -// formatted as "{latitude},{longitude}", e.g. "40.10210,-70.12091". -func GeoPointFromString(latLon string) (*GeoPoint, error) { - latlon := strings.SplitN(latLon, ",", 2) - if len(latlon) != 2 { - return nil, fmt.Errorf("elastic: %s is not a valid geo point string", latLon) - } - lat, err := strconv.ParseFloat(latlon[0], 64) - if err != nil { - return nil, err - } - lon, err := strconv.ParseFloat(latlon[1], 64) - if err != nil { - return nil, err - } - return &GeoPoint{Lat: lat, Lon: lon}, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/geo_point_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/geo_point_test.go deleted file mode 100644 index ebc28c2ec..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/geo_point_test.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestGeoPointSource(t *testing.T) { - pt := GeoPoint{Lat: 40, Lon: -70} - - data, err := json.Marshal(pt.Source()) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"lat":40,"lon":-70}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/get.go b/services/templeton/vendor/src/github.com/olivere/elastic/get.go deleted file mode 100644 index eb2221755..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/get.go +++ /dev/null @@ -1,271 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// GetService allows to get a typed JSON document from the index based -// on its id. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html -// for details. -type GetService struct { - client *Client - pretty bool - index string - typ string - id string - routing string - preference string - fields []string - refresh *bool - realtime *bool - fsc *FetchSourceContext - version interface{} - versionType string - parent string - ignoreErrorsOnGeneratedFields *bool -} - -// NewGetService creates a new GetService. -func NewGetService(client *Client) *GetService { - return &GetService{ - client: client, - typ: "_all", - } -} - -/* -// String returns a string representation of the GetService request. -func (s *GetService) String() string { - return fmt.Sprintf("[%v][%v][%v]: routing [%v]", - s.index, - s.typ, - s.id, - s.routing) -} -*/ - -// Index is the name of the index. -func (s *GetService) Index(index string) *GetService { - s.index = index - return s -} - -// Type is the type of the document (use `_all` to fetch the first document -// matching the ID across all types). -func (s *GetService) Type(typ string) *GetService { - s.typ = typ - return s -} - -// Id is the document ID. -func (s *GetService) Id(id string) *GetService { - s.id = id - return s -} - -// Parent is the ID of the parent document. -func (s *GetService) Parent(parent string) *GetService { - s.parent = parent - return s -} - -// Routing is the specific routing value. -func (s *GetService) Routing(routing string) *GetService { - s.routing = routing - return s -} - -// Preference specifies the node or shard the operation should be performed on (default: random). -func (s *GetService) Preference(preference string) *GetService { - s.preference = preference - return s -} - -// Fields is a list of fields to return in the response. -func (s *GetService) Fields(fields ...string) *GetService { - if s.fields == nil { - s.fields = make([]string, 0) - } - s.fields = append(s.fields, fields...) - return s -} - -func (s *GetService) FetchSource(fetchSource bool) *GetService { - if s.fsc == nil { - s.fsc = NewFetchSourceContext(fetchSource) - } else { - s.fsc.SetFetchSource(fetchSource) - } - return s -} - -func (s *GetService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *GetService { - s.fsc = fetchSourceContext - return s -} - -// Refresh the shard containing the document before performing the operation. -func (s *GetService) Refresh(refresh bool) *GetService { - s.refresh = &refresh - return s -} - -// Realtime specifies whether to perform the operation in realtime or search mode. -func (s *GetService) Realtime(realtime bool) *GetService { - s.realtime = &realtime - return s -} - -// VersionType is the specific version type. -func (s *GetService) VersionType(versionType string) *GetService { - s.versionType = versionType - return s -} - -// Version is an explicit version number for concurrency control. -func (s *GetService) Version(version interface{}) *GetService { - s.version = version - return s -} - -// IgnoreErrorsOnGeneratedFields indicates whether to ignore fields that -// are generated if the transaction log is accessed. -func (s *GetService) IgnoreErrorsOnGeneratedFields(ignore bool) *GetService { - s.ignoreErrorsOnGeneratedFields = &ignore - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *GetService) Pretty(pretty bool) *GetService { - s.pretty = pretty - return s -} - -// Validate checks if the operation is valid. -func (s *GetService) Validate() error { - var invalid []string - if s.id == "" { - invalid = append(invalid, "Id") - } - if s.index == "" { - invalid = append(invalid, "Index") - } - if s.typ == "" { - invalid = append(invalid, "Type") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// buildURL builds the URL for the operation. -func (s *GetService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ - "id": s.id, - "index": s.index, - "type": s.typ, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.routing != "" { - params.Set("routing", s.routing) - } - if s.parent != "" { - params.Set("parent", s.parent) - } - if s.preference != "" { - params.Set("preference", s.preference) - } - if len(s.fields) > 0 { - params.Set("fields", strings.Join(s.fields, ",")) - } - if s.refresh != nil { - params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) - } - if s.version != nil { - params.Set("version", fmt.Sprintf("%v", s.version)) - } - if s.versionType != "" { - params.Set("version_type", s.versionType) - } - if s.realtime != nil { - params.Set("realtime", fmt.Sprintf("%v", *s.realtime)) - } - if s.ignoreErrorsOnGeneratedFields != nil { - params.Add("ignore_errors_on_generated_fields", fmt.Sprintf("%v", *s.ignoreErrorsOnGeneratedFields)) - } - if s.fsc != nil { - for k, values := range s.fsc.Query() { - params.Add(k, strings.Join(values, ",")) - } - } - return path, params, nil -} - -// Do executes the operation. -func (s *GetService) Do() (*GetResult, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest("GET", path, params, nil) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(GetResult) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// -- Result of a get request. - -// GetResult is the outcome of GetService.Do. -type GetResult struct { - Index string `json:"_index"` // index meta field - Type string `json:"_type"` // type meta field - Id string `json:"_id"` // id meta field - Uid string `json:"_uid"` // uid meta field (see MapperService.java for all meta fields) - Timestamp int64 `json:"_timestamp"` // timestamp meta field - TTL int64 `json:"_ttl"` // ttl meta field - Routing string `json:"_routing"` // routing meta field - Parent string `json:"_parent"` // parent meta field - Version *int64 `json:"_version"` // version number, when Version is set to true in SearchService - Source *json.RawMessage `json:"_source,omitempty"` - Found bool `json:"found,omitempty"` - Fields map[string]interface{} `json:"fields,omitempty"` - //Error string `json:"error,omitempty"` // used only in MultiGet - // TODO double-check that MultiGet now returns details error information - Error *ErrorDetails `json:"error,omitempty"` // only used in MultiGet -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/get_template.go b/services/templeton/vendor/src/github.com/olivere/elastic/get_template.go deleted file mode 100644 index 328d6e516..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/get_template.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// GetTemplateService reads a search template. -// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html. -type GetTemplateService struct { - client *Client - pretty bool - id string - version interface{} - versionType string -} - -// NewGetTemplateService creates a new GetTemplateService. -func NewGetTemplateService(client *Client) *GetTemplateService { - return &GetTemplateService{ - client: client, - } -} - -// Id is the template ID. -func (s *GetTemplateService) Id(id string) *GetTemplateService { - s.id = id - return s -} - -// Version is an explicit version number for concurrency control. -func (s *GetTemplateService) Version(version interface{}) *GetTemplateService { - s.version = version - return s -} - -// VersionType is a specific version type. -func (s *GetTemplateService) VersionType(versionType string) *GetTemplateService { - s.versionType = versionType - return s -} - -// buildURL builds the URL for the operation. -func (s *GetTemplateService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{ - "id": s.id, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.version != nil { - params.Set("version", fmt.Sprintf("%v", s.version)) - } - if s.versionType != "" { - params.Set("version_type", s.versionType) - } - - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *GetTemplateService) Validate() error { - var invalid []string - if s.id == "" { - invalid = append(invalid, "Id") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation and returns the template. -func (s *GetTemplateService) Do() (*GetTemplateResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest("GET", path, params, nil) - if err != nil { - return nil, err - } - - // Return result - ret := new(GetTemplateResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -type GetTemplateResponse struct { - Template string `json:"template"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/get_template_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/get_template_test.go deleted file mode 100644 index 00aea6899..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/get_template_test.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "testing" -) - -func TestGetPutDeleteTemplate(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - // This is a search template, not an index template! - tmpl := `{ - "template": { - "query" : { "term" : { "{{my_field}}" : "{{my_value}}" } }, - "size" : "{{my_size}}" - }, - "params":{ - "my_field" : "user", - "my_value" : "olivere", - "my_size" : 5 - } -}` - putres, err := client.PutTemplate().Id("elastic-template").BodyString(tmpl).Do() - if err != nil { - t.Fatalf("expected no error; got: %v", err) - } - if putres == nil { - t.Fatalf("expected response; got: %v", putres) - } - if !putres.Created { - t.Fatalf("expected template to be created; got: %v", putres.Created) - } - - // Always delete template - defer client.DeleteTemplate().Id("elastic-template").Do() - - // Get template - getres, err := client.GetTemplate().Id("elastic-template").Do() - if err != nil { - t.Fatalf("expected no error; got: %v", err) - } - if getres == nil { - t.Fatalf("expected response; got: %v", getres) - } - if getres.Template == "" { - t.Errorf("expected template %q; got: %q", tmpl, getres.Template) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/get_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/get_test.go deleted file mode 100644 index 25dbe7391..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/get_test.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestGet(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - // Get document 1 - res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").Do() - if err != nil { - t.Fatal(err) - } - if res.Found != true { - t.Errorf("expected Found = true; got %v", res.Found) - } - if res.Source == nil { - t.Errorf("expected Source != nil; got %v", res.Source) - } - - // Get non existent document 99 - res, err = client.Get().Index(testIndexName).Type("tweet").Id("99").Do() - if err == nil { - t.Fatalf("expected error; got: %v", err) - } - if !IsNotFound(err) { - t.Errorf("expected NotFound error; got: %v", err) - } - if res != nil { - t.Errorf("expected no response; got: %v", res) - } -} - -func TestGetWithSourceFiltering(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - // Get document 1, without source - res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").FetchSource(false).Do() - if err != nil { - t.Fatal(err) - } - if res.Found != true { - t.Errorf("expected Found = true; got %v", res.Found) - } - if res.Source != nil { - t.Errorf("expected Source == nil; got %v", res.Source) - } - - // Get document 1, exclude Message field - fsc := NewFetchSourceContext(true).Exclude("message") - res, err = client.Get().Index(testIndexName).Type("tweet").Id("1").FetchSourceContext(fsc).Do() - if err != nil { - t.Fatal(err) - } - if res.Found != true { - t.Errorf("expected Found = true; got %v", res.Found) - } - if res.Source == nil { - t.Errorf("expected Source != nil; got %v", res.Source) - } - var tw tweet - err = json.Unmarshal(*res.Source, &tw) - if err != nil { - t.Fatal(err) - } - if tw.User != "olivere" { - t.Errorf("expected user %q; got: %q", "olivere", tw.User) - } - if tw.Message != "" { - t.Errorf("expected message %q; got: %q", "", tw.Message) - } -} - -func TestGetWithFields(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - // Get document 1, specifying fields - res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").Fields("message").Do() - if err != nil { - t.Fatal(err) - } - if res.Found != true { - t.Errorf("expected Found = true; got: %v", res.Found) - } - - // We must NOT have the "user" field - _, ok := res.Fields["user"] - if ok { - t.Fatalf("expected no field %q in document", "user") - } - - // We must have the "message" field - messageField, ok := res.Fields["message"] - if !ok { - t.Fatalf("expected field %q in document", "message") - } - - // Depending on the version of elasticsearch the message field will be returned - // as a string or a slice of strings. This test works in both cases. - - messageString, ok := messageField.(string) - if !ok { - messageArray, ok := messageField.([]interface{}) - if !ok { - t.Fatalf("expected field %q to be a string or a slice of strings; got: %T", "message", messageField) - } else { - messageString, ok = messageArray[0].(string) - if !ok { - t.Fatalf("expected field %q to be a string or a slice of strings; got: %T", "message", messageField) - } - } - } - - if messageString != tweet1.Message { - t.Errorf("expected message %q; got: %q", tweet1.Message, messageString) - } -} - -func TestGetValidate(t *testing.T) { - // Mitigate against http://stackoverflow.com/questions/27491738/elasticsearch-go-index-failures-no-feature-for-name - client := setupTestClientAndCreateIndex(t) - - if _, err := client.Get().Do(); err == nil { - t.Fatal("expected Get to fail") - } - if _, err := client.Get().Index(testIndexName).Do(); err == nil { - t.Fatal("expected Get to fail") - } - if _, err := client.Get().Type("tweet").Do(); err == nil { - t.Fatal("expected Get to fail") - } - if _, err := client.Get().Id("1").Do(); err == nil { - t.Fatal("expected Get to fail") - } - if _, err := client.Get().Index(testIndexName).Type("tweet").Do(); err == nil { - t.Fatal("expected Get to fail") - } - if _, err := client.Get().Type("tweet").Id("1").Do(); err == nil { - t.Fatal("expected Get to fail") - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/highlight.go b/services/templeton/vendor/src/github.com/olivere/elastic/highlight.go deleted file mode 100644 index 44501a731..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/highlight.go +++ /dev/null @@ -1,455 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// Highlight allows highlighting search results on one or more fields. -// For details, see: -// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-highlighting.html -type Highlight struct { - fields []*HighlighterField - tagsSchema *string - highlightFilter *bool - fragmentSize *int - numOfFragments *int - preTags []string - postTags []string - order *string - encoder *string - requireFieldMatch *bool - boundaryMaxScan *int - boundaryChars []rune - highlighterType *string - fragmenter *string - highlightQuery Query - noMatchSize *int - phraseLimit *int - options map[string]interface{} - forceSource *bool - useExplicitFieldOrder bool -} - -func NewHighlight() *Highlight { - hl := &Highlight{ - fields: make([]*HighlighterField, 0), - preTags: make([]string, 0), - postTags: make([]string, 0), - boundaryChars: make([]rune, 0), - options: make(map[string]interface{}), - } - return hl -} - -func (hl *Highlight) Fields(fields ...*HighlighterField) *Highlight { - hl.fields = append(hl.fields, fields...) - return hl -} - -func (hl *Highlight) Field(name string) *Highlight { - field := NewHighlighterField(name) - hl.fields = append(hl.fields, field) - return hl -} - -func (hl *Highlight) TagsSchema(schemaName string) *Highlight { - hl.tagsSchema = &schemaName - return hl -} - -func (hl *Highlight) HighlightFilter(highlightFilter bool) *Highlight { - hl.highlightFilter = &highlightFilter - return hl -} - -func (hl *Highlight) FragmentSize(fragmentSize int) *Highlight { - hl.fragmentSize = &fragmentSize - return hl -} - -func (hl *Highlight) NumOfFragments(numOfFragments int) *Highlight { - hl.numOfFragments = &numOfFragments - return hl -} - -func (hl *Highlight) Encoder(encoder string) *Highlight { - hl.encoder = &encoder - return hl -} - -func (hl *Highlight) PreTags(preTags ...string) *Highlight { - hl.preTags = append(hl.preTags, preTags...) - return hl -} - -func (hl *Highlight) PostTags(postTags ...string) *Highlight { - hl.postTags = append(hl.postTags, postTags...) - return hl -} - -func (hl *Highlight) Order(order string) *Highlight { - hl.order = &order - return hl -} - -func (hl *Highlight) RequireFieldMatch(requireFieldMatch bool) *Highlight { - hl.requireFieldMatch = &requireFieldMatch - return hl -} - -func (hl *Highlight) BoundaryMaxScan(boundaryMaxScan int) *Highlight { - hl.boundaryMaxScan = &boundaryMaxScan - return hl -} - -func (hl *Highlight) BoundaryChars(boundaryChars ...rune) *Highlight { - hl.boundaryChars = append(hl.boundaryChars, boundaryChars...) - return hl -} - -func (hl *Highlight) HighlighterType(highlighterType string) *Highlight { - hl.highlighterType = &highlighterType - return hl -} - -func (hl *Highlight) Fragmenter(fragmenter string) *Highlight { - hl.fragmenter = &fragmenter - return hl -} - -func (hl *Highlight) HighlighQuery(highlightQuery Query) *Highlight { - hl.highlightQuery = highlightQuery - return hl -} - -func (hl *Highlight) NoMatchSize(noMatchSize int) *Highlight { - hl.noMatchSize = &noMatchSize - return hl -} - -func (hl *Highlight) Options(options map[string]interface{}) *Highlight { - hl.options = options - return hl -} - -func (hl *Highlight) ForceSource(forceSource bool) *Highlight { - hl.forceSource = &forceSource - return hl -} - -func (hl *Highlight) UseExplicitFieldOrder(useExplicitFieldOrder bool) *Highlight { - hl.useExplicitFieldOrder = useExplicitFieldOrder - return hl -} - -// Creates the query source for the bool query. -func (hl *Highlight) Source() (interface{}, error) { - // Returns the map inside of "highlight": - // "highlight":{ - // ... this ... - // } - source := make(map[string]interface{}) - if hl.tagsSchema != nil { - source["tags_schema"] = *hl.tagsSchema - } - if hl.preTags != nil && len(hl.preTags) > 0 { - source["pre_tags"] = hl.preTags - } - if hl.postTags != nil && len(hl.postTags) > 0 { - source["post_tags"] = hl.postTags - } - if hl.order != nil { - source["order"] = *hl.order - } - if hl.highlightFilter != nil { - source["highlight_filter"] = *hl.highlightFilter - } - if hl.fragmentSize != nil { - source["fragment_size"] = *hl.fragmentSize - } - if hl.numOfFragments != nil { - source["number_of_fragments"] = *hl.numOfFragments - } - if hl.encoder != nil { - source["encoder"] = *hl.encoder - } - if hl.requireFieldMatch != nil { - source["require_field_match"] = *hl.requireFieldMatch - } - if hl.boundaryMaxScan != nil { - source["boundary_max_scan"] = *hl.boundaryMaxScan - } - if hl.boundaryChars != nil && len(hl.boundaryChars) > 0 { - source["boundary_chars"] = hl.boundaryChars - } - if hl.highlighterType != nil { - source["type"] = *hl.highlighterType - } - if hl.fragmenter != nil { - source["fragmenter"] = *hl.fragmenter - } - if hl.highlightQuery != nil { - src, err := hl.highlightQuery.Source() - if err != nil { - return nil, err - } - source["highlight_query"] = src - } - if hl.noMatchSize != nil { - source["no_match_size"] = *hl.noMatchSize - } - if hl.phraseLimit != nil { - source["phrase_limit"] = *hl.phraseLimit - } - if hl.options != nil && len(hl.options) > 0 { - source["options"] = hl.options - } - if hl.forceSource != nil { - source["force_source"] = *hl.forceSource - } - - if hl.fields != nil && len(hl.fields) > 0 { - if hl.useExplicitFieldOrder { - // Use a slice for the fields - fields := make([]map[string]interface{}, 0) - for _, field := range hl.fields { - src, err := field.Source() - if err != nil { - return nil, err - } - fmap := make(map[string]interface{}) - fmap[field.Name] = src - fields = append(fields, fmap) - } - source["fields"] = fields - } else { - // Use a map for the fields - fields := make(map[string]interface{}, 0) - for _, field := range hl.fields { - src, err := field.Source() - if err != nil { - return nil, err - } - fields[field.Name] = src - } - source["fields"] = fields - } - } - - return source, nil -} - -// HighlighterField specifies a highlighted field. -type HighlighterField struct { - Name string - - preTags []string - postTags []string - fragmentSize int - fragmentOffset int - numOfFragments int - highlightFilter *bool - order *string - requireFieldMatch *bool - boundaryMaxScan int - boundaryChars []rune - highlighterType *string - fragmenter *string - highlightQuery Query - noMatchSize *int - matchedFields []string - phraseLimit *int - options map[string]interface{} - forceSource *bool - - /* - Name string - preTags []string - postTags []string - fragmentSize int - numOfFragments int - fragmentOffset int - highlightFilter *bool - order string - requireFieldMatch *bool - boundaryMaxScan int - boundaryChars []rune - highlighterType string - fragmenter string - highlightQuery Query - noMatchSize *int - matchedFields []string - options map[string]interface{} - forceSource *bool - */ -} - -func NewHighlighterField(name string) *HighlighterField { - return &HighlighterField{ - Name: name, - preTags: make([]string, 0), - postTags: make([]string, 0), - fragmentSize: -1, - fragmentOffset: -1, - numOfFragments: -1, - boundaryMaxScan: -1, - boundaryChars: make([]rune, 0), - matchedFields: make([]string, 0), - options: make(map[string]interface{}), - } -} - -func (f *HighlighterField) PreTags(preTags ...string) *HighlighterField { - f.preTags = append(f.preTags, preTags...) - return f -} - -func (f *HighlighterField) PostTags(postTags ...string) *HighlighterField { - f.postTags = append(f.postTags, postTags...) - return f -} - -func (f *HighlighterField) FragmentSize(fragmentSize int) *HighlighterField { - f.fragmentSize = fragmentSize - return f -} - -func (f *HighlighterField) FragmentOffset(fragmentOffset int) *HighlighterField { - f.fragmentOffset = fragmentOffset - return f -} - -func (f *HighlighterField) NumOfFragments(numOfFragments int) *HighlighterField { - f.numOfFragments = numOfFragments - return f -} - -func (f *HighlighterField) HighlightFilter(highlightFilter bool) *HighlighterField { - f.highlightFilter = &highlightFilter - return f -} - -func (f *HighlighterField) Order(order string) *HighlighterField { - f.order = &order - return f -} - -func (f *HighlighterField) RequireFieldMatch(requireFieldMatch bool) *HighlighterField { - f.requireFieldMatch = &requireFieldMatch - return f -} - -func (f *HighlighterField) BoundaryMaxScan(boundaryMaxScan int) *HighlighterField { - f.boundaryMaxScan = boundaryMaxScan - return f -} - -func (f *HighlighterField) BoundaryChars(boundaryChars ...rune) *HighlighterField { - f.boundaryChars = append(f.boundaryChars, boundaryChars...) - return f -} - -func (f *HighlighterField) HighlighterType(highlighterType string) *HighlighterField { - f.highlighterType = &highlighterType - return f -} - -func (f *HighlighterField) Fragmenter(fragmenter string) *HighlighterField { - f.fragmenter = &fragmenter - return f -} - -func (f *HighlighterField) HighlightQuery(highlightQuery Query) *HighlighterField { - f.highlightQuery = highlightQuery - return f -} - -func (f *HighlighterField) NoMatchSize(noMatchSize int) *HighlighterField { - f.noMatchSize = &noMatchSize - return f -} - -func (f *HighlighterField) Options(options map[string]interface{}) *HighlighterField { - f.options = options - return f -} - -func (f *HighlighterField) MatchedFields(matchedFields ...string) *HighlighterField { - f.matchedFields = append(f.matchedFields, matchedFields...) - return f -} - -func (f *HighlighterField) PhraseLimit(phraseLimit int) *HighlighterField { - f.phraseLimit = &phraseLimit - return f -} - -func (f *HighlighterField) ForceSource(forceSource bool) *HighlighterField { - f.forceSource = &forceSource - return f -} - -func (f *HighlighterField) Source() (interface{}, error) { - source := make(map[string]interface{}) - - if f.preTags != nil && len(f.preTags) > 0 { - source["pre_tags"] = f.preTags - } - if f.postTags != nil && len(f.postTags) > 0 { - source["post_tags"] = f.postTags - } - if f.fragmentSize != -1 { - source["fragment_size"] = f.fragmentSize - } - if f.numOfFragments != -1 { - source["number_of_fragments"] = f.numOfFragments - } - if f.fragmentOffset != -1 { - source["fragment_offset"] = f.fragmentOffset - } - if f.highlightFilter != nil { - source["highlight_filter"] = *f.highlightFilter - } - if f.order != nil { - source["order"] = *f.order - } - if f.requireFieldMatch != nil { - source["require_field_match"] = *f.requireFieldMatch - } - if f.boundaryMaxScan != -1 { - source["boundary_max_scan"] = f.boundaryMaxScan - } - if f.boundaryChars != nil && len(f.boundaryChars) > 0 { - source["boundary_chars"] = f.boundaryChars - } - if f.highlighterType != nil { - source["type"] = *f.highlighterType - } - if f.fragmenter != nil { - source["fragmenter"] = *f.fragmenter - } - if f.highlightQuery != nil { - src, err := f.highlightQuery.Source() - if err != nil { - return nil, err - } - source["highlight_query"] = src - } - if f.noMatchSize != nil { - source["no_match_size"] = *f.noMatchSize - } - if f.matchedFields != nil && len(f.matchedFields) > 0 { - source["matched_fields"] = f.matchedFields - } - if f.phraseLimit != nil { - source["phrase_limit"] = *f.phraseLimit - } - if f.options != nil && len(f.options) > 0 { - source["options"] = f.options - } - if f.forceSource != nil { - source["force_source"] = *f.forceSource - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/highlight_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/highlight_test.go deleted file mode 100644 index be5cd963e..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/highlight_test.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - _ "net/http" - "testing" -) - -func TestHighlighterField(t *testing.T) { - field := NewHighlighterField("grade") - src, err := field.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestHighlighterFieldWithOptions(t *testing.T) { - field := NewHighlighterField("grade").FragmentSize(2).NumOfFragments(1) - src, err := field.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"fragment_size":2,"number_of_fragments":1}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestHighlightWithStringField(t *testing.T) { - builder := NewHighlight().Field("grade") - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"fields":{"grade":{}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestHighlightWithFields(t *testing.T) { - gradeField := NewHighlighterField("grade") - builder := NewHighlight().Fields(gradeField) - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"fields":{"grade":{}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestHighlightWithMultipleFields(t *testing.T) { - gradeField := NewHighlighterField("grade") - colorField := NewHighlighterField("color") - builder := NewHighlight().Fields(gradeField, colorField) - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"fields":{"color":{},"grade":{}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestHighlighterWithExplicitFieldOrder(t *testing.T) { - gradeField := NewHighlighterField("grade").FragmentSize(2) - colorField := NewHighlighterField("color").FragmentSize(2).NumOfFragments(1) - builder := NewHighlight().Fields(gradeField, colorField).UseExplicitFieldOrder(true) - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"fields":[{"grade":{"fragment_size":2}},{"color":{"fragment_size":2,"number_of_fragments":1}}]}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestHighlightWithTermQuery(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun to do."} - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Specify highlighter - hl := NewHighlight() - hl = hl.Fields(NewHighlighterField("message")) - hl = hl.PreTags("").PostTags("") - - // Match all should return all documents - query := NewPrefixQuery("message", "golang") - searchResult, err := client.Search(). - Index(testIndexName). - Highlight(hl). - Query(query). - Do() - if err != nil { - t.Fatal(err) - } - if searchResult.Hits == nil { - t.Fatalf("expected SearchResult.Hits != nil; got nil") - } - if searchResult.Hits.TotalHits != 1 { - t.Fatalf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult.Hits.TotalHits) - } - if len(searchResult.Hits.Hits) != 1 { - t.Fatalf("expected len(SearchResult.Hits.Hits) = %d; got %d", 1, len(searchResult.Hits.Hits)) - } - - hit := searchResult.Hits.Hits[0] - var tw tweet - if err := json.Unmarshal(*hit.Source, &tw); err != nil { - t.Fatal(err) - } - if hit.Highlight == nil || len(hit.Highlight) == 0 { - t.Fatal("expected hit to have a highlight; got nil") - } - if hl, found := hit.Highlight["message"]; found { - if len(hl) != 1 { - t.Fatalf("expected to have one highlight for field \"message\"; got %d", len(hl)) - } - expected := "Welcome to Golang and Elasticsearch." - if hl[0] != expected { - t.Errorf("expected to have highlight \"%s\"; got \"%s\"", expected, hl[0]) - } - } else { - t.Fatal("expected to have a highlight on field \"message\"; got none") - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/index.go b/services/templeton/vendor/src/github.com/olivere/elastic/index.go deleted file mode 100644 index bdaba0560..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/index.go +++ /dev/null @@ -1,284 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// IndexService adds or updates a typed JSON document in a specified index, -// making it searchable. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html -// for details. -type IndexService struct { - client *Client - pretty bool - id string - index string - typ string - parent string - replication string - routing string - timeout string - timestamp string - ttl string - version interface{} - opType string - versionType string - refresh *bool - consistency string - bodyJson interface{} - bodyString string -} - -// NewIndexService creates a new IndexService. -func NewIndexService(client *Client) *IndexService { - return &IndexService{ - client: client, - } -} - -// Id is the document ID. -func (s *IndexService) Id(id string) *IndexService { - s.id = id - return s -} - -// Index is the name of the index. -func (s *IndexService) Index(index string) *IndexService { - s.index = index - return s -} - -// Type is the type of the document. -func (s *IndexService) Type(typ string) *IndexService { - s.typ = typ - return s -} - -// Consistency is an explicit write consistency setting for the operation. -func (s *IndexService) Consistency(consistency string) *IndexService { - s.consistency = consistency - return s -} - -// Refresh the index after performing the operation. -func (s *IndexService) Refresh(refresh bool) *IndexService { - s.refresh = &refresh - return s -} - -// Ttl is an expiration time for the document. -func (s *IndexService) Ttl(ttl string) *IndexService { - s.ttl = ttl - return s -} - -// TTL is an expiration time for the document (alias for Ttl). -func (s *IndexService) TTL(ttl string) *IndexService { - s.ttl = ttl - return s -} - -// Version is an explicit version number for concurrency control. -func (s *IndexService) Version(version interface{}) *IndexService { - s.version = version - return s -} - -// OpType is an explicit operation type, i.e. "create" or "index" (default). -func (s *IndexService) OpType(opType string) *IndexService { - s.opType = opType - return s -} - -// Parent is the ID of the parent document. -func (s *IndexService) Parent(parent string) *IndexService { - s.parent = parent - return s -} - -// Replication is a specific replication type. -func (s *IndexService) Replication(replication string) *IndexService { - s.replication = replication - return s -} - -// Routing is a specific routing value. -func (s *IndexService) Routing(routing string) *IndexService { - s.routing = routing - return s -} - -// Timeout is an explicit operation timeout. -func (s *IndexService) Timeout(timeout string) *IndexService { - s.timeout = timeout - return s -} - -// Timestamp is an explicit timestamp for the document. -func (s *IndexService) Timestamp(timestamp string) *IndexService { - s.timestamp = timestamp - return s -} - -// VersionType is a specific version type. -func (s *IndexService) VersionType(versionType string) *IndexService { - s.versionType = versionType - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *IndexService) Pretty(pretty bool) *IndexService { - s.pretty = pretty - return s -} - -// BodyJson is the document as a serializable JSON interface. -func (s *IndexService) BodyJson(body interface{}) *IndexService { - s.bodyJson = body - return s -} - -// BodyString is the document encoded as a string. -func (s *IndexService) BodyString(body string) *IndexService { - s.bodyString = body - return s -} - -// buildURL builds the URL for the operation. -func (s *IndexService) buildURL() (string, string, url.Values, error) { - var err error - var method, path string - - if s.id != "" { - // Create document with manual id - method = "PUT" - path, err = uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ - "id": s.id, - "index": s.index, - "type": s.typ, - }) - } else { - // Automatic ID generation - // See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#index-creation - method = "POST" - path, err = uritemplates.Expand("/{index}/{type}/", map[string]string{ - "index": s.index, - "type": s.typ, - }) - } - if err != nil { - return "", "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.consistency != "" { - params.Set("consistency", s.consistency) - } - if s.refresh != nil { - params.Set("refresh", fmt.Sprintf("%v", *s.refresh)) - } - if s.opType != "" { - params.Set("op_type", s.opType) - } - if s.parent != "" { - params.Set("parent", s.parent) - } - if s.replication != "" { - params.Set("replication", s.replication) - } - if s.routing != "" { - params.Set("routing", s.routing) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.timestamp != "" { - params.Set("timestamp", s.timestamp) - } - if s.ttl != "" { - params.Set("ttl", s.ttl) - } - if s.version != nil { - params.Set("version", fmt.Sprintf("%v", s.version)) - } - if s.versionType != "" { - params.Set("version_type", s.versionType) - } - return method, path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndexService) Validate() error { - var invalid []string - if s.index == "" { - invalid = append(invalid, "Index") - } - if s.typ == "" { - invalid = append(invalid, "Type") - } - if s.bodyString == "" && s.bodyJson == nil { - invalid = append(invalid, "BodyJson") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IndexService) Do() (*IndexResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - method, path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else { - body = s.bodyString - } - - // Get HTTP response - res, err := s.client.PerformRequest(method, path, params, body) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IndexResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndexResponse is the result of indexing a document in Elasticsearch. -type IndexResponse struct { - // TODO _shards { total, failed, successful } - Index string `json:"_index"` - Type string `json:"_type"` - Id string `json:"_id"` - Version int `json:"_version"` - Created bool `json:"created"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/index_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/index_test.go deleted file mode 100644 index 01722b3e3..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/index_test.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestIndexLifecycle(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - - // Add a document - indexResult, err := client.Index(). - Index(testIndexName). - Type("tweet"). - Id("1"). - BodyJson(&tweet1). - Do() - if err != nil { - t.Fatal(err) - } - if indexResult == nil { - t.Errorf("expected result to be != nil; got: %v", indexResult) - } - - // Exists - exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do() - if err != nil { - t.Fatal(err) - } - if !exists { - t.Errorf("expected exists %v; got %v", true, exists) - } - - // Get document - getResult, err := client.Get(). - Index(testIndexName). - Type("tweet"). - Id("1"). - Do() - if err != nil { - t.Fatal(err) - } - if getResult.Index != testIndexName { - t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index) - } - if getResult.Type != "tweet" { - t.Errorf("expected GetResult.Type %q; got %q", "tweet", getResult.Type) - } - if getResult.Id != "1" { - t.Errorf("expected GetResult.Id %q; got %q", "1", getResult.Id) - } - if getResult.Source == nil { - t.Errorf("expected GetResult.Source to be != nil; got nil") - } - - // Decode the Source field - var tweetGot tweet - err = json.Unmarshal(*getResult.Source, &tweetGot) - if err != nil { - t.Fatal(err) - } - if tweetGot.User != tweet1.User { - t.Errorf("expected Tweet.User to be %q; got %q", tweet1.User, tweetGot.User) - } - if tweetGot.Message != tweet1.Message { - t.Errorf("expected Tweet.Message to be %q; got %q", tweet1.Message, tweetGot.Message) - } - - // Delete document again - deleteResult, err := client.Delete().Index(testIndexName).Type("tweet").Id("1").Do() - if err != nil { - t.Fatal(err) - } - if deleteResult == nil { - t.Errorf("expected result to be != nil; got: %v", deleteResult) - } - - // Exists - exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("1").Do() - if err != nil { - t.Fatal(err) - } - if exists { - t.Errorf("expected exists %v; got %v", false, exists) - } -} - -func TestIndexLifecycleWithAutomaticIDGeneration(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - - // Add a document - indexResult, err := client.Index(). - Index(testIndexName). - Type("tweet"). - BodyJson(&tweet1). - Do() - if err != nil { - t.Fatal(err) - } - if indexResult == nil { - t.Errorf("expected result to be != nil; got: %v", indexResult) - } - if indexResult.Id == "" { - t.Fatalf("expected Es to generate an automatic ID, got: %v", indexResult.Id) - } - id := indexResult.Id - - // Exists - exists, err := client.Exists().Index(testIndexName).Type("tweet").Id(id).Do() - if err != nil { - t.Fatal(err) - } - if !exists { - t.Errorf("expected exists %v; got %v", true, exists) - } - - // Get document - getResult, err := client.Get(). - Index(testIndexName). - Type("tweet"). - Id(id). - Do() - if err != nil { - t.Fatal(err) - } - if getResult.Index != testIndexName { - t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index) - } - if getResult.Type != "tweet" { - t.Errorf("expected GetResult.Type %q; got %q", "tweet", getResult.Type) - } - if getResult.Id != id { - t.Errorf("expected GetResult.Id %q; got %q", id, getResult.Id) - } - if getResult.Source == nil { - t.Errorf("expected GetResult.Source to be != nil; got nil") - } - - // Decode the Source field - var tweetGot tweet - err = json.Unmarshal(*getResult.Source, &tweetGot) - if err != nil { - t.Fatal(err) - } - if tweetGot.User != tweet1.User { - t.Errorf("expected Tweet.User to be %q; got %q", tweet1.User, tweetGot.User) - } - if tweetGot.Message != tweet1.Message { - t.Errorf("expected Tweet.Message to be %q; got %q", tweet1.Message, tweetGot.Message) - } - - // Delete document again - deleteResult, err := client.Delete().Index(testIndexName).Type("tweet").Id(id).Do() - if err != nil { - t.Fatal(err) - } - if deleteResult == nil { - t.Errorf("expected result to be != nil; got: %v", deleteResult) - } - - // Exists - exists, err = client.Exists().Index(testIndexName).Type("tweet").Id(id).Do() - if err != nil { - t.Fatal(err) - } - if exists { - t.Errorf("expected exists %v; got %v", false, exists) - } -} - -func TestIndexValidate(t *testing.T) { - client := setupTestClient(t) - - tweet := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - - // No index name -> fail with error - res, err := NewIndexService(client).Type("tweet").Id("1").BodyJson(&tweet).Do() - if err == nil { - t.Fatalf("expected Index to fail without index name") - } - if res != nil { - t.Fatalf("expected result to be == nil; got: %v", res) - } - - // No index name -> fail with error - res, err = NewIndexService(client).Index(testIndexName).Id("1").BodyJson(&tweet).Do() - if err == nil { - t.Fatalf("expected Index to fail without type") - } - if res != nil { - t.Fatalf("expected result to be == nil; got: %v", res) - } -} - -func TestIndexCreateExistsOpenCloseDelete(t *testing.T) { - // TODO: Find out how to make these test robust - t.Skip("test fails regularly with 409 (Conflict): " + - "IndexPrimaryShardNotAllocatedException[[elastic-test] " + - "primary not allocated post api... skipping") - - client := setupTestClient(t) - - // Create index - createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do() - if err != nil { - t.Fatal(err) - } - if createIndex == nil { - t.Fatalf("expected response; got: %v", createIndex) - } - if !createIndex.Acknowledged { - t.Errorf("expected ack for creating index; got: %v", createIndex.Acknowledged) - } - - // Exists - indexExists, err := client.IndexExists(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if !indexExists { - t.Fatalf("expected index exists=%v; got %v", true, indexExists) - } - - // Flush - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Close index - closeIndex, err := client.CloseIndex(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if closeIndex == nil { - t.Fatalf("expected response; got: %v", closeIndex) - } - if !closeIndex.Acknowledged { - t.Errorf("expected ack for closing index; got: %v", closeIndex.Acknowledged) - } - - // Open index - openIndex, err := client.OpenIndex(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if openIndex == nil { - t.Fatalf("expected response; got: %v", openIndex) - } - if !openIndex.Acknowledged { - t.Errorf("expected ack for opening index; got: %v", openIndex.Acknowledged) - } - - // Flush - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Delete index - deleteIndex, err := client.DeleteIndex(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if deleteIndex == nil { - t.Fatalf("expected response; got: %v", deleteIndex) - } - if !deleteIndex.Acknowledged { - t.Errorf("expected ack for deleting index; got %v", deleteIndex.Acknowledged) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_close.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_close.go deleted file mode 100644 index ad344cb26..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_close.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// IndicesCloseService closes an index. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html -// for details. -type IndicesCloseService struct { - client *Client - pretty bool - index string - timeout string - masterTimeout string - ignoreUnavailable *bool - allowNoIndices *bool - expandWildcards string -} - -// NewIndicesCloseService creates and initializes a new IndicesCloseService. -func NewIndicesCloseService(client *Client) *IndicesCloseService { - return &IndicesCloseService{client: client} -} - -// Index is the name of the index to close. -func (s *IndicesCloseService) Index(index string) *IndicesCloseService { - s.index = index - return s -} - -// Timeout is an explicit operation timeout. -func (s *IndicesCloseService) Timeout(timeout string) *IndicesCloseService { - s.timeout = timeout - return s -} - -// MasterTimeout specifies the timeout for connection to master. -func (s *IndicesCloseService) MasterTimeout(masterTimeout string) *IndicesCloseService { - s.masterTimeout = masterTimeout - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *IndicesCloseService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesCloseService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified). -func (s *IndicesCloseService) AllowNoIndices(allowNoIndices bool) *IndicesCloseService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both. -func (s *IndicesCloseService) ExpandWildcards(expandWildcards string) *IndicesCloseService { - s.expandWildcards = expandWildcards - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *IndicesCloseService) Pretty(pretty bool) *IndicesCloseService { - s.pretty = pretty - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesCloseService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/{index}/_close", map[string]string{ - "index": s.index, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesCloseService) Validate() error { - var invalid []string - if s.index == "" { - invalid = append(invalid, "Index") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IndicesCloseService) Do() (*IndicesCloseResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest("POST", path, params, nil) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IndicesCloseResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndicesCloseResponse is the response of IndicesCloseService.Do. -type IndicesCloseResponse struct { - Acknowledged bool `json:"acknowledged"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_close_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_close_test.go deleted file mode 100644 index 7293bb1c4..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_close_test.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "testing" - -// TODO(oe): Find out why this test fails on Travis CI. -/* -func TestIndicesOpenAndClose(t *testing.T) { - client := setupTestClient(t) - - // Create index - createIndex, err := client.CreateIndex(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if !createIndex.Acknowledged { - t.Errorf("expected CreateIndexResult.Acknowledged %v; got %v", true, createIndex.Acknowledged) - } - defer func() { - // Delete index - deleteIndex, err := client.DeleteIndex(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if !deleteIndex.Acknowledged { - t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged) - } - }() - - waitForYellow := func() { - // Wait for status yellow - res, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("15s").Do() - if err != nil { - t.Fatal(err) - } - if res != nil && res.TimedOut { - t.Fatalf("cluster time out waiting for status %q", "yellow") - } - } - - // Wait for cluster - waitForYellow() - - // Close index - cresp, err := client.CloseIndex(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if !cresp.Acknowledged { - t.Fatalf("expected close index of %q to be acknowledged\n", testIndexName) - } - - // Wait for cluster - waitForYellow() - - // Open index again - oresp, err := client.OpenIndex(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if !oresp.Acknowledged { - t.Fatalf("expected open index of %q to be acknowledged\n", testIndexName) - } -} -*/ - -func TestIndicesCloseValidate(t *testing.T) { - client := setupTestClient(t) - - // No index name -> fail with error - res, err := NewIndicesCloseService(client).Do() - if err == nil { - t.Fatalf("expected IndicesClose to fail without index name") - } - if res != nil { - t.Fatalf("expected result to be == nil; got: %v", res) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_create.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_create.go deleted file mode 100644 index 1e98447ea..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_create.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "errors" - "net/url" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// IndicesCreateService creates a new index. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html -// for details. -type IndicesCreateService struct { - client *Client - pretty bool - index string - timeout string - masterTimeout string - bodyJson interface{} - bodyString string -} - -// NewIndicesCreateService returns a new IndicesCreateService. -func NewIndicesCreateService(client *Client) *IndicesCreateService { - return &IndicesCreateService{client: client} -} - -// Index is the name of the index to create. -func (b *IndicesCreateService) Index(index string) *IndicesCreateService { - b.index = index - return b -} - -// Timeout the explicit operation timeout, e.g. "5s". -func (s *IndicesCreateService) Timeout(timeout string) *IndicesCreateService { - s.timeout = timeout - return s -} - -// MasterTimeout specifies the timeout for connection to master. -func (s *IndicesCreateService) MasterTimeout(masterTimeout string) *IndicesCreateService { - s.masterTimeout = masterTimeout - return s -} - -// Body specifies the configuration of the index as a string. -// It is an alias for BodyString. -func (b *IndicesCreateService) Body(body string) *IndicesCreateService { - b.bodyString = body - return b -} - -// BodyString specifies the configuration of the index as a string. -func (b *IndicesCreateService) BodyString(body string) *IndicesCreateService { - b.bodyString = body - return b -} - -// BodyJson specifies the configuration of the index. The interface{} will -// be serializes as a JSON document, so use a map[string]interface{}. -func (b *IndicesCreateService) BodyJson(body interface{}) *IndicesCreateService { - b.bodyJson = body - return b -} - -// Pretty indicates that the JSON response be indented and human readable. -func (b *IndicesCreateService) Pretty(pretty bool) *IndicesCreateService { - b.pretty = pretty - return b -} - -// Do executes the operation. -func (b *IndicesCreateService) Do() (*IndicesCreateResult, error) { - if b.index == "" { - return nil, errors.New("missing index name") - } - - // Build url - path, err := uritemplates.Expand("/{index}", map[string]string{ - "index": b.index, - }) - if err != nil { - return nil, err - } - - params := make(url.Values) - if b.pretty { - params.Set("pretty", "1") - } - if b.masterTimeout != "" { - params.Set("master_timeout", b.masterTimeout) - } - if b.timeout != "" { - params.Set("timeout", b.timeout) - } - - // Setup HTTP request body - var body interface{} - if b.bodyJson != nil { - body = b.bodyJson - } else { - body = b.bodyString - } - - // Get response - res, err := b.client.PerformRequest("PUT", path, params, body) - if err != nil { - return nil, err - } - - ret := new(IndicesCreateResult) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// -- Result of a create index request. - -// IndicesCreateResult is the outcome of creating a new index. -type IndicesCreateResult struct { - Acknowledged bool `json:"acknowledged"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_create_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_create_test.go deleted file mode 100644 index b3723950a..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_create_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "testing" - -func TestIndicesLifecycle(t *testing.T) { - client := setupTestClient(t) - - // Create index - createIndex, err := client.CreateIndex(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if !createIndex.Acknowledged { - t.Errorf("expected IndicesCreateResult.Acknowledged %v; got %v", true, createIndex.Acknowledged) - } - - // Check if index exists - indexExists, err := client.IndexExists(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if !indexExists { - t.Fatalf("index %s should exist, but doesn't\n", testIndexName) - } - - // Delete index - deleteIndex, err := client.DeleteIndex(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if !deleteIndex.Acknowledged { - t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged) - } - - // Check if index exists - indexExists, err = client.IndexExists(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if indexExists { - t.Fatalf("index %s should not exist, but does\n", testIndexName) - } -} - -func TestIndicesCreateValidate(t *testing.T) { - client := setupTestClient(t) - - // No index name -> fail with error - res, err := NewIndicesCreateService(client).Body(testMapping).Do() - if err == nil { - t.Fatalf("expected IndicesCreate to fail without index name") - } - if res != nil { - t.Fatalf("expected result to be == nil; got: %v", res) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete.go deleted file mode 100644 index e2582dc6f..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// IndicesDeleteService allows to delete existing indices. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html -// for details. -type IndicesDeleteService struct { - client *Client - pretty bool - index []string - timeout string - masterTimeout string -} - -// NewIndicesDeleteService creates and initializes a new IndicesDeleteService. -func NewIndicesDeleteService(client *Client) *IndicesDeleteService { - return &IndicesDeleteService{ - client: client, - index: make([]string, 0), - } -} - -// Index adds the list of indices to delete. -// Use `_all` or `*` string to delete all indices. -func (s *IndicesDeleteService) Index(index []string) *IndicesDeleteService { - s.index = index - return s -} - -// Timeout is an explicit operation timeout. -func (s *IndicesDeleteService) Timeout(timeout string) *IndicesDeleteService { - s.timeout = timeout - return s -} - -// MasterTimeout specifies the timeout for connection to master. -func (s *IndicesDeleteService) MasterTimeout(masterTimeout string) *IndicesDeleteService { - s.masterTimeout = masterTimeout - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *IndicesDeleteService) Pretty(pretty bool) *IndicesDeleteService { - s.pretty = pretty - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesDeleteService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/{index}", map[string]string{ - "index": strings.Join(s.index, ","), - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesDeleteService) Validate() error { - var invalid []string - if len(s.index) == 0 { - invalid = append(invalid, "Index") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IndicesDeleteService) Do() (*IndicesDeleteResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest("DELETE", path, params, nil) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IndicesDeleteResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// -- Result of a delete index request. - -// IndicesDeleteResponse is the response of IndicesDeleteService.Do. -type IndicesDeleteResponse struct { - Acknowledged bool `json:"acknowledged"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_template.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_template.go deleted file mode 100644 index 2c62a06cd..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_template.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// IndicesDeleteTemplateService deletes index templates. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html. -type IndicesDeleteTemplateService struct { - client *Client - pretty bool - name string - timeout string - masterTimeout string -} - -// NewIndicesDeleteTemplateService creates a new IndicesDeleteTemplateService. -func NewIndicesDeleteTemplateService(client *Client) *IndicesDeleteTemplateService { - return &IndicesDeleteTemplateService{ - client: client, - } -} - -// Name is the name of the template. -func (s *IndicesDeleteTemplateService) Name(name string) *IndicesDeleteTemplateService { - s.name = name - return s -} - -// Timeout is an explicit operation timeout. -func (s *IndicesDeleteTemplateService) Timeout(timeout string) *IndicesDeleteTemplateService { - s.timeout = timeout - return s -} - -// MasterTimeout specifies the timeout for connection to master. -func (s *IndicesDeleteTemplateService) MasterTimeout(masterTimeout string) *IndicesDeleteTemplateService { - s.masterTimeout = masterTimeout - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *IndicesDeleteTemplateService) Pretty(pretty bool) *IndicesDeleteTemplateService { - s.pretty = pretty - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesDeleteTemplateService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_template/{name}", map[string]string{ - "name": s.name, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesDeleteTemplateService) Validate() error { - var invalid []string - if s.name == "" { - invalid = append(invalid, "Name") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IndicesDeleteTemplateService) Do() (*IndicesDeleteTemplateResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest("DELETE", path, params, nil) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IndicesDeleteTemplateResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndicesDeleteTemplateResponse is the response of IndicesDeleteTemplateService.Do. -type IndicesDeleteTemplateResponse struct { - Acknowledged bool `json:"acknowledged,omitempty"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_test.go deleted file mode 100644 index d84edb8de..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_test.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "testing" - -func TestIndicesDeleteValidate(t *testing.T) { - client := setupTestClient(t) - - // No index name -> fail with error - res, err := NewIndicesDeleteService(client).Do() - if err == nil { - t.Fatalf("expected IndicesDelete to fail without index name") - } - if res != nil { - t.Fatalf("expected result to be == nil; got: %v", res) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_warmer.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_warmer.go deleted file mode 100644 index 79aa4c2d5..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_warmer.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// IndicesDeleteWarmerService allows to delete a warmer. -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-warmers.html. -type IndicesDeleteWarmerService struct { - client *Client - pretty bool - index []string - name []string - masterTimeout string -} - -// NewIndicesDeleteWarmerService creates a new IndicesDeleteWarmerService. -func NewIndicesDeleteWarmerService(client *Client) *IndicesDeleteWarmerService { - return &IndicesDeleteWarmerService{ - client: client, - index: make([]string, 0), - name: make([]string, 0), - } -} - -// Index is a list of index names the mapping should be added to -// (supports wildcards); use `_all` or omit to add the mapping on all indices. -func (s *IndicesDeleteWarmerService) Index(indices ...string) *IndicesDeleteWarmerService { - s.index = append(s.index, indices...) - return s -} - -// Name is a list of warmer names to delete (supports wildcards); -// use `_all` to delete all warmers in the specified indices. -func (s *IndicesDeleteWarmerService) Name(name ...string) *IndicesDeleteWarmerService { - s.name = append(s.name, name...) - return s -} - -// MasterTimeout specifies the timeout for connection to master. -func (s *IndicesDeleteWarmerService) MasterTimeout(masterTimeout string) *IndicesDeleteWarmerService { - s.masterTimeout = masterTimeout - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *IndicesDeleteWarmerService) Pretty(pretty bool) *IndicesDeleteWarmerService { - s.pretty = pretty - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesDeleteWarmerService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/{index}/_warmer/{name}", map[string]string{ - "index": strings.Join(s.index, ","), - "name": strings.Join(s.name, ","), - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if len(s.name) > 0 { - params.Set("name", strings.Join(s.name, ",")) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesDeleteWarmerService) Validate() error { - var invalid []string - if len(s.index) == 0 { - invalid = append(invalid, "Index") - } - if len(s.name) == 0 { - invalid = append(invalid, "Name") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IndicesDeleteWarmerService) Do() (*DeleteWarmerResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest("DELETE", path, params, nil) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(DeleteWarmerResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// DeleteWarmerResponse is the response of IndicesDeleteWarmerService.Do. -type DeleteWarmerResponse struct { - Acknowledged bool `json:"acknowledged"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_warmer_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_warmer_test.go deleted file mode 100644 index 3d811ea59..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_delete_warmer_test.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "testing" - -func TestDeleteWarmerBuildURL(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tests := []struct { - Indices []string - Names []string - Expected string - }{ - { - []string{"test"}, - []string{"warmer_1"}, - "/test/_warmer/warmer_1", - }, - { - []string{"*"}, - []string{"warmer_1"}, - "/%2A/_warmer/warmer_1", - }, - { - []string{"_all"}, - []string{"warmer_1"}, - "/_all/_warmer/warmer_1", - }, - { - []string{"index-1", "index-2"}, - []string{"warmer_1", "warmer_2"}, - "/index-1%2Cindex-2/_warmer/warmer_1%2Cwarmer_2", - }, - } - - for _, test := range tests { - path, _, err := client.DeleteWarmer().Index(test.Indices...).Name(test.Names...).buildURL() - if err != nil { - t.Fatal(err) - } - if path != test.Expected { - t.Errorf("expected %q; got: %q", test.Expected, path) - } - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists.go deleted file mode 100644 index 92f9974f2..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "fmt" - "net/http" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// IndicesExistsService checks if an index or indices exist or not. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-exists.html -// for details. -type IndicesExistsService struct { - client *Client - pretty bool - index []string - ignoreUnavailable *bool - allowNoIndices *bool - expandWildcards string - local *bool -} - -// NewIndicesExistsService creates and initializes a new IndicesExistsService. -func NewIndicesExistsService(client *Client) *IndicesExistsService { - return &IndicesExistsService{ - client: client, - index: make([]string, 0), - } -} - -// Index is a list of one or more indices to check. -func (s *IndicesExistsService) Index(index []string) *IndicesExistsService { - s.index = index - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices expression -// resolves into no concrete indices. (This includes `_all` string or -// when no indices have been specified). -func (s *IndicesExistsService) AllowNoIndices(allowNoIndices bool) *IndicesExistsService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both. -func (s *IndicesExistsService) ExpandWildcards(expandWildcards string) *IndicesExistsService { - s.expandWildcards = expandWildcards - return s -} - -// Local, when set, returns local information and does not retrieve the state -// from master node (default: false). -func (s *IndicesExistsService) Local(local bool) *IndicesExistsService { - s.local = &local - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *IndicesExistsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesExistsService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *IndicesExistsService) Pretty(pretty bool) *IndicesExistsService { - s.pretty = pretty - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesExistsService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/{index}", map[string]string{ - "index": strings.Join(s.index, ","), - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.local != nil { - params.Set("local", fmt.Sprintf("%v", *s.local)) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesExistsService) Validate() error { - var invalid []string - if len(s.index) == 0 { - invalid = append(invalid, "Index") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IndicesExistsService) Do() (bool, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return false, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return false, err - } - - // Get HTTP response - res, err := s.client.PerformRequest("HEAD", path, params, nil, 404) - if err != nil { - return false, err - } - - // Return operation response - switch res.StatusCode { - case http.StatusOK: - return true, nil - case http.StatusNotFound: - return false, nil - default: - return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_template.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_template.go deleted file mode 100644 index 7587a8786..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_template.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "fmt" - "net/http" - "net/url" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// IndicesExistsTemplateService checks if a given template exists. -// See http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html#indices-templates-exists -// for documentation. -type IndicesExistsTemplateService struct { - client *Client - pretty bool - name string - local *bool -} - -// NewIndicesExistsTemplateService creates a new IndicesExistsTemplateService. -func NewIndicesExistsTemplateService(client *Client) *IndicesExistsTemplateService { - return &IndicesExistsTemplateService{ - client: client, - } -} - -// Name is the name of the template. -func (s *IndicesExistsTemplateService) Name(name string) *IndicesExistsTemplateService { - s.name = name - return s -} - -// Local indicates whether to return local information, i.e. do not retrieve -// the state from master node (default: false). -func (s *IndicesExistsTemplateService) Local(local bool) *IndicesExistsTemplateService { - s.local = &local - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *IndicesExistsTemplateService) Pretty(pretty bool) *IndicesExistsTemplateService { - s.pretty = pretty - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesExistsTemplateService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_template/{name}", map[string]string{ - "name": s.name, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.local != nil { - params.Set("local", fmt.Sprintf("%v", *s.local)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesExistsTemplateService) Validate() error { - var invalid []string - if s.name == "" { - invalid = append(invalid, "Name") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IndicesExistsTemplateService) Do() (bool, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return false, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return false, err - } - - // Get HTTP response - res, err := s.client.PerformRequest("HEAD", path, params, nil, 404) - if err != nil { - return false, err - } - - // Return operation response - switch res.StatusCode { - case http.StatusOK: - return true, nil - case http.StatusNotFound: - return false, nil - default: - return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_template_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_template_test.go deleted file mode 100644 index 32fb82ad3..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_template_test.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "testing" -) - -func TestIndexExistsTemplate(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tmpl := `{ - "template":"elastic-test*", - "settings":{ - "number_of_shards":1, - "number_of_replicas":0 - }, - "mappings":{ - "tweet":{ - "properties":{ - "tags":{ - "type":"string" - }, - "location":{ - "type":"geo_point" - }, - "suggest_field":{ - "type":"completion", - "payloads":true - } - } - } - } -}` - putres, err := client.IndexPutTemplate("elastic-template").BodyString(tmpl).Do() - if err != nil { - t.Fatalf("expected no error; got: %v", err) - } - if putres == nil { - t.Fatalf("expected response; got: %v", putres) - } - if !putres.Acknowledged { - t.Fatalf("expected index template to be ack'd; got: %v", putres.Acknowledged) - } - - // Always delete template - defer client.IndexDeleteTemplate("elastic-template").Do() - - // Check if template exists - exists, err := client.IndexTemplateExists("elastic-template").Do() - if err != nil { - t.Fatalf("expected no error; got: %v", err) - } - if !exists { - t.Fatalf("expected index template %q to exist; got: %v", "elastic-template", exists) - } - - // Get template - getres, err := client.IndexGetTemplate("elastic-template").Do() - if err != nil { - t.Fatalf("expected no error; got: %v", err) - } - if getres == nil { - t.Fatalf("expected to get index template %q; got: %v", "elastic-template", getres) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_test.go deleted file mode 100644 index 8cb6f5fab..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_test.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "testing" - -func TestIndicesExistsWithoutIndex(t *testing.T) { - client := setupTestClient(t) - - // No index name -> fail with error - res, err := NewIndicesExistsService(client).Do() - if err == nil { - t.Fatalf("expected IndicesExists to fail without index name") - } - if res != false { - t.Fatalf("expected result to be false; got: %v", res) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_type.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_type.go deleted file mode 100644 index 631f773fe..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_type.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "fmt" - "net/http" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// IndicesExistsTypeService checks if one or more types exist in one or more indices. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-types-exists.html -// for details. -type IndicesExistsTypeService struct { - client *Client - pretty bool - typ []string - index []string - expandWildcards string - local *bool - ignoreUnavailable *bool - allowNoIndices *bool -} - -// NewIndicesExistsTypeService creates a new IndicesExistsTypeService. -func NewIndicesExistsTypeService(client *Client) *IndicesExistsTypeService { - return &IndicesExistsTypeService{ - client: client, - index: make([]string, 0), - typ: make([]string, 0), - } -} - -// Index is a list of index names; use `_all` to check the types across all indices. -func (s *IndicesExistsTypeService) Index(indices ...string) *IndicesExistsTypeService { - s.index = append(s.index, indices...) - return s -} - -// Type is a list of document types to check. -func (s *IndicesExistsTypeService) Type(types ...string) *IndicesExistsTypeService { - s.typ = append(s.typ, types...) - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *IndicesExistsTypeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesExistsTypeService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. -// (This includes `_all` string or when no indices have been specified). -func (s *IndicesExistsTypeService) AllowNoIndices(allowNoIndices bool) *IndicesExistsTypeService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both. -func (s *IndicesExistsTypeService) ExpandWildcards(expandWildcards string) *IndicesExistsTypeService { - s.expandWildcards = expandWildcards - return s -} - -// Local specifies whether to return local information, i.e. do not retrieve -// the state from master node (default: false). -func (s *IndicesExistsTypeService) Local(local bool) *IndicesExistsTypeService { - s.local = &local - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *IndicesExistsTypeService) Pretty(pretty bool) *IndicesExistsTypeService { - s.pretty = pretty - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesExistsTypeService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/{index}/{type}", map[string]string{ - "index": strings.Join(s.index, ","), - "type": strings.Join(s.typ, ","), - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.local != nil { - params.Set("local", fmt.Sprintf("%v", *s.local)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesExistsTypeService) Validate() error { - var invalid []string - if len(s.index) == 0 { - invalid = append(invalid, "Index") - } - if len(s.typ) == 0 { - invalid = append(invalid, "Type") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IndicesExistsTypeService) Do() (bool, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return false, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return false, err - } - - // Get HTTP response - res, err := s.client.PerformRequest("HEAD", path, params, nil, 404) - if err != nil { - return false, err - } - - // Return operation response - switch res.StatusCode { - case http.StatusOK: - return true, nil - case http.StatusNotFound: - return false, nil - default: - return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_type_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_type_test.go deleted file mode 100644 index 51721b125..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_exists_type_test.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "testing" -) - -func TestIndicesExistsTypeBuildURL(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tests := []struct { - Indices []string - Types []string - Expected string - ExpectValidateFailure bool - }{ - { - []string{}, - []string{}, - "", - true, - }, - { - []string{"index1"}, - []string{}, - "", - true, - }, - { - []string{}, - []string{"type1"}, - "", - true, - }, - { - []string{"index1"}, - []string{"type1"}, - "/index1/type1", - false, - }, - { - []string{"index1", "index2"}, - []string{"type1"}, - "/index1%2Cindex2/type1", - false, - }, - { - []string{"index1", "index2"}, - []string{"type1", "type2"}, - "/index1%2Cindex2/type1%2Ctype2", - false, - }, - } - - for i, test := range tests { - err := client.TypeExists().Index(test.Indices...).Type(test.Types...).Validate() - if err == nil && test.ExpectValidateFailure { - t.Errorf("case #%d: expected validate to fail", i+1) - continue - } - if err != nil && !test.ExpectValidateFailure { - t.Errorf("case #%d: expected validate to succeed", i+1) - continue - } - if !test.ExpectValidateFailure { - path, _, err := client.TypeExists().Index(test.Indices...).Type(test.Types...).buildURL() - if err != nil { - t.Fatalf("case #%d: %v", i+1, err) - } - if path != test.Expected { - t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) - } - } - } -} - -func TestIndicesExistsType(t *testing.T) { - client := setupTestClient(t) - - // Create index with tweet type - createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do() - if err != nil { - t.Fatal(err) - } - if createIndex == nil { - t.Errorf("expected result to be != nil; got: %v", createIndex) - } - if !createIndex.Acknowledged { - t.Errorf("expected CreateIndexResult.Acknowledged %v; got %v", true, createIndex.Acknowledged) - } - - // Check if type exists - exists, err := client.TypeExists().Index(testIndexName).Type("tweet").Do() - if err != nil { - t.Fatal(err) - } - if !exists { - t.Fatalf("type %s should exist in index %s, but doesn't\n", "tweet", testIndexName) - } - - // Delete index - deleteIndex, err := client.DeleteIndex(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if !deleteIndex.Acknowledged { - t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged) - } - - // Check if type exists - exists, err = client.TypeExists().Index(testIndexName).Type("tweet").Do() - if err != nil { - t.Fatal(err) - } - if exists { - t.Fatalf("type %s should not exist in index %s, but it does\n", "tweet", testIndexName) - } -} - -func TestIndicesExistsTypeValidate(t *testing.T) { - client := setupTestClient(t) - - // No index name -> fail with error - res, err := NewIndicesExistsTypeService(client).Do() - if err == nil { - t.Fatalf("expected IndicesExistsType to fail without index name") - } - if res != false { - t.Fatalf("expected result to be false; got: %v", res) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_flush.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_flush.go deleted file mode 100644 index 3d101f9bd..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_flush.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// Flush allows to flush one or more indices. The flush process of an index -// basically frees memory from the index by flushing data to the index -// storage and clearing the internal transaction log. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html -// for details. -type IndicesFlushService struct { - client *Client - pretty bool - index []string - force *bool - waitIfOngoing *bool - ignoreUnavailable *bool - allowNoIndices *bool - expandWildcards string -} - -// NewIndicesFlushService creates a new IndicesFlushService. -func NewIndicesFlushService(client *Client) *IndicesFlushService { - return &IndicesFlushService{ - client: client, - index: make([]string, 0), - } -} - -// Index is a list of index names; use `_all` or empty string for all indices. -func (s *IndicesFlushService) Index(indices ...string) *IndicesFlushService { - s.index = append(s.index, indices...) - return s -} - -// Force indicates whether a flush should be forced even if it is not -// necessarily needed ie. if no changes will be committed to the index. -// This is useful if transaction log IDs should be incremented even if -// no uncommitted changes are present. (This setting can be considered as internal). -func (s *IndicesFlushService) Force(force bool) *IndicesFlushService { - s.force = &force - return s -} - -// WaitIfOngoing, if set to true, indicates that the flush operation will -// block until the flush can be executed if another flush operation is -// already executing. The default is false and will cause an exception -// to be thrown on the shard level if another flush operation is already running.. -func (s *IndicesFlushService) WaitIfOngoing(waitIfOngoing bool) *IndicesFlushService { - s.waitIfOngoing = &waitIfOngoing - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *IndicesFlushService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesFlushService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices expression -// resolves into no concrete indices. (This includes `_all` string or when -// no indices have been specified). -func (s *IndicesFlushService) AllowNoIndices(allowNoIndices bool) *IndicesFlushService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards specifies whether to expand wildcard expression to -// concrete indices that are open, closed or both.. -func (s *IndicesFlushService) ExpandWildcards(expandWildcards string) *IndicesFlushService { - s.expandWildcards = expandWildcards - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *IndicesFlushService) Pretty(pretty bool) *IndicesFlushService { - s.pretty = pretty - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesFlushService) buildURL() (string, url.Values, error) { - // Build URL - var err error - var path string - - if len(s.index) > 0 { - path, err = uritemplates.Expand("/{index}/_flush", map[string]string{ - "index": strings.Join(s.index, ","), - }) - } else { - path = "/_flush" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.force != nil { - params.Set("force", fmt.Sprintf("%v", *s.force)) - } - if s.waitIfOngoing != nil { - params.Set("wait_if_ongoing", fmt.Sprintf("%v", *s.waitIfOngoing)) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesFlushService) Validate() error { - return nil -} - -// Do executes the service. -func (s *IndicesFlushService) Do() (*IndicesFlushResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest("POST", path, params, nil) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IndicesFlushResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// -- Result of a flush request. - -type IndicesFlushResponse struct { - Shards shardsInfo `json:"_shards"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_flush_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_flush_test.go deleted file mode 100644 index 4e30a000b..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_flush_test.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "testing" -) - -func TestFlush(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - // Flush all indices - res, err := client.Flush().Do() - if err != nil { - t.Fatal(err) - } - if res == nil { - t.Errorf("expected res to be != nil; got: %v", res) - } -} - -func TestFlushBuildURL(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tests := []struct { - Indices []string - Expected string - ExpectValidateFailure bool - }{ - { - []string{}, - "/_flush", - false, - }, - { - []string{"index1"}, - "/index1/_flush", - false, - }, - { - []string{"index1", "index2"}, - "/index1%2Cindex2/_flush", - false, - }, - } - - for i, test := range tests { - err := NewIndicesFlushService(client).Index(test.Indices...).Validate() - if err == nil && test.ExpectValidateFailure { - t.Errorf("case #%d: expected validate to fail", i+1) - continue - } - if err != nil && !test.ExpectValidateFailure { - t.Errorf("case #%d: expected validate to succeed", i+1) - continue - } - if !test.ExpectValidateFailure { - path, _, err := NewIndicesFlushService(client).Index(test.Indices...).buildURL() - if err != nil { - t.Fatalf("case #%d: %v", i+1, err) - } - if path != test.Expected { - t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) - } - } - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_forcemerge.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_forcemerge.go deleted file mode 100644 index 6ca7b5b8f..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_forcemerge.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// IndicesForcemergeService allows to force merging of one or more indices. -// The merge relates to the number of segments a Lucene index holds -// within each shard. The force merge operation allows to reduce the number -// of segments by merging them. -// -// See http://www.elastic.co/guide/en/elasticsearch/reference/2.1/indices-forcemerge.html -// for more information. -type IndicesForcemergeService struct { - client *Client - pretty bool - index []string - allowNoIndices *bool - expandWildcards string - flush *bool - ignoreUnavailable *bool - maxNumSegments interface{} - onlyExpungeDeletes *bool - operationThreading interface{} - waitForMerge *bool -} - -// NewIndicesForcemergeService creates a new IndicesForcemergeService. -func NewIndicesForcemergeService(client *Client) *IndicesForcemergeService { - return &IndicesForcemergeService{ - client: client, - index: make([]string, 0), - } -} - -// Index is a list of index names; use `_all` or empty string to perform -// the operation on all indices. -func (s *IndicesForcemergeService) Index(index ...string) *IndicesForcemergeService { - if s.index == nil { - s.index = make([]string, 0) - } - s.index = append(s.index, index...) - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. -// (This includes `_all` string or when no indices have been specified). -func (s *IndicesForcemergeService) AllowNoIndices(allowNoIndices bool) *IndicesForcemergeService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both.. -func (s *IndicesForcemergeService) ExpandWildcards(expandWildcards string) *IndicesForcemergeService { - s.expandWildcards = expandWildcards - return s -} - -// Flush specifies whether the index should be flushed after performing -// the operation (default: true). -func (s *IndicesForcemergeService) Flush(flush bool) *IndicesForcemergeService { - s.flush = &flush - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should -// be ignored when unavailable (missing or closed). -func (s *IndicesForcemergeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesForcemergeService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// MaxNumSegments specifies the number of segments the index should be -// merged into (default: dynamic). -func (s *IndicesForcemergeService) MaxNumSegments(maxNumSegments interface{}) *IndicesForcemergeService { - s.maxNumSegments = maxNumSegments - return s -} - -// OnlyExpungeDeletes specifies whether the operation should only expunge -// deleted documents. -func (s *IndicesForcemergeService) OnlyExpungeDeletes(onlyExpungeDeletes bool) *IndicesForcemergeService { - s.onlyExpungeDeletes = &onlyExpungeDeletes - return s -} - -func (s *IndicesForcemergeService) OperationThreading(operationThreading interface{}) *IndicesForcemergeService { - s.operationThreading = operationThreading - return s -} - -// WaitForMerge specifies whether the request should block until the -// merge process is finished (default: true). -func (s *IndicesForcemergeService) WaitForMerge(waitForMerge bool) *IndicesForcemergeService { - s.waitForMerge = &waitForMerge - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *IndicesForcemergeService) Pretty(pretty bool) *IndicesForcemergeService { - s.pretty = pretty - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesForcemergeService) buildURL() (string, url.Values, error) { - var err error - var path string - - // Build URL - if len(s.index) > 0 { - path, err = uritemplates.Expand("/{index}/_forcemerge", map[string]string{ - "index": strings.Join(s.index, ","), - }) - } else { - path = "/_forcemerge" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.flush != nil { - params.Set("flush", fmt.Sprintf("%v", *s.flush)) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.maxNumSegments != nil { - params.Set("max_num_segments", fmt.Sprintf("%v", s.maxNumSegments)) - } - if s.onlyExpungeDeletes != nil { - params.Set("only_expunge_deletes", fmt.Sprintf("%v", *s.onlyExpungeDeletes)) - } - if s.operationThreading != nil { - params.Set("operation_threading", fmt.Sprintf("%v", s.operationThreading)) - } - if s.waitForMerge != nil { - params.Set("wait_for_merge", fmt.Sprintf("%v", *s.waitForMerge)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesForcemergeService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *IndicesForcemergeService) Do() (*IndicesForcemergeResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest("POST", path, params, nil) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IndicesForcemergeResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndicesForcemergeResponse is the response of IndicesForcemergeService.Do. -type IndicesForcemergeResponse struct { - Shards shardsInfo `json:"_shards"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_forcemerge_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_forcemerge_test.go deleted file mode 100644 index c620654cc..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_forcemerge_test.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "testing" -) - -func TestIndicesForcemergeBuildURL(t *testing.T) { - client := setupTestClient(t) - - tests := []struct { - Indices []string - Expected string - }{ - { - []string{}, - "/_forcemerge", - }, - { - []string{"index1"}, - "/index1/_forcemerge", - }, - { - []string{"index1", "index2"}, - "/index1%2Cindex2/_forcemerge", - }, - } - - for i, test := range tests { - path, _, err := client.Forcemerge().Index(test.Indices...).buildURL() - if err != nil { - t.Errorf("case #%d: %v", i+1, err) - continue - } - if path != test.Expected { - t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) - } - } -} - -func TestIndicesForcemerge(t *testing.T) { - client := setupTestClientAndCreateIndexAndAddDocs(t) - - _, err := client.Forcemerge(testIndexName).MaxNumSegments(1).WaitForMerge(true).Do() - if err != nil { - t.Fatal(err) - } - /* - if !ok { - t.Fatalf("expected forcemerge to succeed; got: %v", ok) - } - */ -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get.go deleted file mode 100644 index 355184394..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// IndicesGetService retrieves information about one or more indices. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-index.html -// for more details. -type IndicesGetService struct { - client *Client - pretty bool - index []string - feature []string - local *bool - ignoreUnavailable *bool - allowNoIndices *bool - expandWildcards string - flatSettings *bool - human *bool -} - -// NewIndicesGetService creates a new IndicesGetService. -func NewIndicesGetService(client *Client) *IndicesGetService { - return &IndicesGetService{ - client: client, - index: make([]string, 0), - feature: make([]string, 0), - } -} - -// Index is a list of index names. -func (s *IndicesGetService) Index(indices ...string) *IndicesGetService { - s.index = append(s.index, indices...) - return s -} - -// Feature is a list of features. -func (s *IndicesGetService) Feature(features ...string) *IndicesGetService { - s.feature = append(s.feature, features...) - return s -} - -// Local indicates whether to return local information, i.e. do not retrieve -// the state from master node (default: false). -func (s *IndicesGetService) Local(local bool) *IndicesGetService { - s.local = &local - return s -} - -// IgnoreUnavailable indicates whether to ignore unavailable indexes (default: false). -func (s *IndicesGetService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard expression -// resolves to no concrete indices (default: false). -func (s *IndicesGetService) AllowNoIndices(allowNoIndices bool) *IndicesGetService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether wildcard expressions should get -// expanded to open or closed indices (default: open). -func (s *IndicesGetService) ExpandWildcards(expandWildcards string) *IndicesGetService { - s.expandWildcards = expandWildcards - return s -} - -/* Disabled because serialization would fail in that case. */ -/* -// FlatSettings make the service return settings in flat format (default: false). -func (s *IndicesGetService) FlatSettings(flatSettings bool) *IndicesGetService { - s.flatSettings = &flatSettings - return s -} -*/ - -// Human indicates whether to return version and creation date values -// in human-readable format (default: false). -func (s *IndicesGetService) Human(human bool) *IndicesGetService { - s.human = &human - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *IndicesGetService) Pretty(pretty bool) *IndicesGetService { - s.pretty = pretty - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesGetService) buildURL() (string, url.Values, error) { - var err error - var path string - var index []string - - if len(s.index) > 0 { - index = s.index - } else { - index = []string{"_all"} - } - - if len(s.feature) > 0 { - // Build URL - path, err = uritemplates.Expand("/{index}/{feature}", map[string]string{ - "index": strings.Join(index, ","), - "feature": strings.Join(s.feature, ","), - }) - } else { - // Build URL - path, err = uritemplates.Expand("/{index}", map[string]string{ - "index": strings.Join(index, ","), - }) - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.flatSettings != nil { - params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) - } - if s.human != nil { - params.Set("human", fmt.Sprintf("%v", *s.human)) - } - if s.local != nil { - params.Set("local", fmt.Sprintf("%v", *s.local)) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesGetService) Validate() error { - var invalid []string - if len(s.index) == 0 { - invalid = append(invalid, "Index") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IndicesGetService) Do() (map[string]*IndicesGetResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest("GET", path, params, nil) - if err != nil { - return nil, err - } - - // Return operation response - var ret map[string]*IndicesGetResponse - if err := json.Unmarshal(res.Body, &ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndicesGetResponse is part of the response of IndicesGetService.Do. -type IndicesGetResponse struct { - Aliases map[string]interface{} `json:"aliases"` - Mappings map[string]interface{} `json:"mappings"` - Settings map[string]interface{} `json:"settings"` - Warmers map[string]interface{} `json:"warmers"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_aliases.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_aliases.go deleted file mode 100644 index 4de88c63d..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_aliases.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -type AliasesService struct { - client *Client - indices []string - pretty bool -} - -func NewAliasesService(client *Client) *AliasesService { - builder := &AliasesService{ - client: client, - indices: make([]string, 0), - } - return builder -} - -func (s *AliasesService) Pretty(pretty bool) *AliasesService { - s.pretty = pretty - return s -} - -func (s *AliasesService) Index(indices ...string) *AliasesService { - s.indices = append(s.indices, indices...) - return s -} - -func (s *AliasesService) Do() (*AliasesResult, error) { - var err error - - // Build url - path := "/" - - // Indices part - indexPart := make([]string, 0) - for _, index := range s.indices { - index, err = uritemplates.Expand("{index}", map[string]string{ - "index": index, - }) - if err != nil { - return nil, err - } - indexPart = append(indexPart, index) - } - path += strings.Join(indexPart, ",") - - // TODO Add types here - - // Search - path += "/_aliases" - - // Parameters - params := make(url.Values) - if s.pretty { - params.Set("pretty", fmt.Sprintf("%v", s.pretty)) - } - - // Get response - res, err := s.client.PerformRequest("GET", path, params, nil) - if err != nil { - return nil, err - } - - // { - // "indexName" : { - // "aliases" : { - // "alias1" : { }, - // "alias2" : { } - // } - // }, - // "indexName2" : { - // ... - // }, - // } - indexMap := make(map[string]interface{}) - if err := json.Unmarshal(res.Body, &indexMap); err != nil { - return nil, err - } - - // Each (indexName, _) - ret := &AliasesResult{ - Indices: make(map[string]indexResult), - } - for indexName, indexData := range indexMap { - indexOut, found := ret.Indices[indexName] - if !found { - indexOut = indexResult{Aliases: make([]aliasResult, 0)} - } - - // { "aliases" : { ... } } - indexDataMap, ok := indexData.(map[string]interface{}) - if ok { - aliasesData, ok := indexDataMap["aliases"].(map[string]interface{}) - if ok { - for aliasName, _ := range aliasesData { - aliasRes := aliasResult{AliasName: aliasName} - indexOut.Aliases = append(indexOut.Aliases, aliasRes) - } - } - } - - ret.Indices[indexName] = indexOut - } - - return ret, nil -} - -// -- Result of an alias request. - -type AliasesResult struct { - Indices map[string]indexResult -} - -type indexResult struct { - Aliases []aliasResult -} - -type aliasResult struct { - AliasName string -} - -func (ar AliasesResult) IndicesByAlias(aliasName string) []string { - indices := make([]string, 0) - - for indexName, indexInfo := range ar.Indices { - for _, aliasInfo := range indexInfo.Aliases { - if aliasInfo.AliasName == aliasName { - indices = append(indices, indexName) - } - } - } - - return indices -} - -func (ir indexResult) HasAlias(aliasName string) bool { - for _, alias := range ir.Aliases { - if alias.AliasName == aliasName { - return true - } - } - return false -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_aliases_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_aliases_test.go deleted file mode 100644 index 6094f426e..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_aliases_test.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "testing" -) - -func TestAliases(t *testing.T) { - var err error - - client := setupTestClientAndCreateIndex(t) - - // Some tweets - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "sandrae", Message: "Cycling is fun."} - tweet3 := tweet{User: "olivere", Message: "Another unrelated topic."} - - // Add tweets to first index - _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - // Add tweets to second index - _, err = client.Index().Index(testIndexName2).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - // Flush - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Flush().Index(testIndexName2).Do() - if err != nil { - t.Fatal(err) - } - - // Alias should not yet exist - aliasesResult1, err := client.Aliases(). - Index(testIndexName, testIndexName2). - //Pretty(true). - Do() - if err != nil { - t.Fatal(err) - } - if len(aliasesResult1.Indices) != 2 { - t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult1.Indices)) - } - for indexName, indexDetails := range aliasesResult1.Indices { - if len(indexDetails.Aliases) != 0 { - t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 0, len(indexDetails.Aliases)) - } - } - - // Add both indices to a new alias - aliasCreate, err := client.Alias(). - Add(testIndexName, testAliasName). - Add(testIndexName2, testAliasName). - //Pretty(true). - Do() - if err != nil { - t.Fatal(err) - } - if !aliasCreate.Acknowledged { - t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasCreate.Acknowledged) - } - - // Alias should now exist - aliasesResult2, err := client.Aliases(). - Index(testIndexName, testIndexName2). - //Pretty(true). - Do() - if err != nil { - t.Fatal(err) - } - if len(aliasesResult2.Indices) != 2 { - t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult2.Indices)) - } - for indexName, indexDetails := range aliasesResult2.Indices { - if len(indexDetails.Aliases) != 1 { - t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 1, len(indexDetails.Aliases)) - } - } - - // Check the reverse function: - indexInfo1, found := aliasesResult2.Indices[testIndexName] - if !found { - t.Errorf("expected info about index %s = %v; got %v", testIndexName, true, found) - } - aliasFound := indexInfo1.HasAlias(testAliasName) - if !aliasFound { - t.Errorf("expected alias %s to include index %s; got %v", testAliasName, testIndexName, aliasFound) - } - - // Check the reverse function: - indexInfo2, found := aliasesResult2.Indices[testIndexName2] - if !found { - t.Errorf("expected info about index %s = %v; got %v", testIndexName, true, found) - } - aliasFound = indexInfo2.HasAlias(testAliasName) - if !aliasFound { - t.Errorf("expected alias %s to include index %s; got %v", testAliasName, testIndexName2, aliasFound) - } - - // Remove first index should remove two tweets, so should only yield 1 - aliasRemove1, err := client.Alias(). - Remove(testIndexName, testAliasName). - //Pretty(true). - Do() - if err != nil { - t.Fatal(err) - } - if !aliasRemove1.Acknowledged { - t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasRemove1.Acknowledged) - } - - // Alias should now exist only for index 2 - aliasesResult3, err := client.Aliases().Index(testIndexName, testIndexName2).Do() - if err != nil { - t.Fatal(err) - } - if len(aliasesResult3.Indices) != 2 { - t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult3.Indices)) - } - for indexName, indexDetails := range aliasesResult3.Indices { - if indexName == testIndexName { - if len(indexDetails.Aliases) != 0 { - t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 0, len(indexDetails.Aliases)) - } - } else if indexName == testIndexName2 { - if len(indexDetails.Aliases) != 1 { - t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 1, len(indexDetails.Aliases)) - } - } else { - t.Errorf("got index %s", indexName) - } - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_mapping.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_mapping.go deleted file mode 100644 index 5526cfcb8..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_mapping.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// IndicesGetMappingService retrieves the mapping definitions for an index or -// index/type. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-mapping.html -// for details. -type IndicesGetMappingService struct { - client *Client - pretty bool - index []string - typ []string - local *bool - ignoreUnavailable *bool - allowNoIndices *bool - expandWildcards string -} - -// NewGetMappingService is an alias for NewIndicesGetMappingService. -// Use NewIndicesGetMappingService. -func NewGetMappingService(client *Client) *IndicesGetMappingService { - return NewIndicesGetMappingService(client) -} - -// NewIndicesGetMappingService creates a new IndicesGetMappingService. -func NewIndicesGetMappingService(client *Client) *IndicesGetMappingService { - return &IndicesGetMappingService{ - client: client, - index: make([]string, 0), - typ: make([]string, 0), - } -} - -// Index is a list of index names. -func (s *IndicesGetMappingService) Index(indices ...string) *IndicesGetMappingService { - s.index = append(s.index, indices...) - return s -} - -// Type is a list of document types. -func (s *IndicesGetMappingService) Type(types ...string) *IndicesGetMappingService { - s.typ = append(s.typ, types...) - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. -// This includes `_all` string or when no indices have been specified. -func (s *IndicesGetMappingService) AllowNoIndices(allowNoIndices bool) *IndicesGetMappingService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both.. -func (s *IndicesGetMappingService) ExpandWildcards(expandWildcards string) *IndicesGetMappingService { - s.expandWildcards = expandWildcards - return s -} - -// Local indicates whether to return local information, do not retrieve -// the state from master node (default: false). -func (s *IndicesGetMappingService) Local(local bool) *IndicesGetMappingService { - s.local = &local - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *IndicesGetMappingService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetMappingService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *IndicesGetMappingService) Pretty(pretty bool) *IndicesGetMappingService { - s.pretty = pretty - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesGetMappingService) buildURL() (string, url.Values, error) { - var index, typ []string - - if len(s.index) > 0 { - index = s.index - } else { - index = []string{"_all"} - } - - if len(s.typ) > 0 { - typ = s.typ - } else { - typ = []string{"_all"} - } - - // Build URL - path, err := uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{ - "index": strings.Join(index, ","), - "type": strings.Join(typ, ","), - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.local != nil { - params.Set("local", fmt.Sprintf("%v", *s.local)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesGetMappingService) Validate() error { - return nil -} - -// Do executes the operation. It returns mapping definitions for an index -// or index/type. -func (s *IndicesGetMappingService) Do() (map[string]interface{}, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest("GET", path, params, nil) - if err != nil { - return nil, err - } - - // Return operation response - var ret map[string]interface{} - if err := json.Unmarshal(res.Body, &ret); err != nil { - return nil, err - } - return ret, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_mapping_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_mapping_test.go deleted file mode 100644 index ccfa27fed..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_mapping_test.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "testing" -) - -func TestIndicesGetMappingURL(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tests := []struct { - Indices []string - Types []string - Expected string - }{ - { - []string{}, - []string{}, - "/_all/_mapping/_all", - }, - { - []string{}, - []string{"tweet"}, - "/_all/_mapping/tweet", - }, - { - []string{"twitter"}, - []string{"tweet"}, - "/twitter/_mapping/tweet", - }, - { - []string{"store-1", "store-2"}, - []string{"tweet", "user"}, - "/store-1%2Cstore-2/_mapping/tweet%2Cuser", - }, - } - - for _, test := range tests { - path, _, err := client.GetMapping().Index(test.Indices...).Type(test.Types...).buildURL() - if err != nil { - t.Fatal(err) - } - if path != test.Expected { - t.Errorf("expected %q; got: %q", test.Expected, path) - } - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_settings.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_settings.go deleted file mode 100644 index 4820cb656..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_settings.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// IndicesGetSettingsService allows to retrieve settings of one -// or more indices. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-settings.html -// for more details. -type IndicesGetSettingsService struct { - client *Client - pretty bool - index []string - name []string - ignoreUnavailable *bool - allowNoIndices *bool - expandWildcards string - flatSettings *bool - local *bool -} - -// NewIndicesGetSettingsService creates a new IndicesGetSettingsService. -func NewIndicesGetSettingsService(client *Client) *IndicesGetSettingsService { - return &IndicesGetSettingsService{ - client: client, - index: make([]string, 0), - name: make([]string, 0), - } -} - -// Index is a list of index names; use `_all` or empty string to perform -// the operation on all indices. -func (s *IndicesGetSettingsService) Index(indices ...string) *IndicesGetSettingsService { - s.index = append(s.index, indices...) - return s -} - -// Name are the names of the settings that should be included. -func (s *IndicesGetSettingsService) Name(name ...string) *IndicesGetSettingsService { - s.name = append(s.name, name...) - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should -// be ignored when unavailable (missing or closed). -func (s *IndicesGetSettingsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetSettingsService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. -// (This includes `_all` string or when no indices have been specified). -func (s *IndicesGetSettingsService) AllowNoIndices(allowNoIndices bool) *IndicesGetSettingsService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression -// to concrete indices that are open, closed or both. -// Options: open, closed, none, all. Default: open,closed. -func (s *IndicesGetSettingsService) ExpandWildcards(expandWildcards string) *IndicesGetSettingsService { - s.expandWildcards = expandWildcards - return s -} - -// FlatSettings indicates whether to return settings in flat format (default: false). -func (s *IndicesGetSettingsService) FlatSettings(flatSettings bool) *IndicesGetSettingsService { - s.flatSettings = &flatSettings - return s -} - -// Local indicates whether to return local information, do not retrieve -// the state from master node (default: false). -func (s *IndicesGetSettingsService) Local(local bool) *IndicesGetSettingsService { - s.local = &local - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *IndicesGetSettingsService) Pretty(pretty bool) *IndicesGetSettingsService { - s.pretty = pretty - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesGetSettingsService) buildURL() (string, url.Values, error) { - var err error - var path string - var index []string - - if len(s.index) > 0 { - index = s.index - } else { - index = []string{"_all"} - } - - if len(s.name) > 0 { - // Build URL - path, err = uritemplates.Expand("/{index}/_settings/{name}", map[string]string{ - "index": strings.Join(index, ","), - "name": strings.Join(s.name, ","), - }) - } else { - // Build URL - path, err = uritemplates.Expand("/{index}/_settings", map[string]string{ - "index": strings.Join(index, ","), - }) - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.flatSettings != nil { - params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) - } - if s.local != nil { - params.Set("local", fmt.Sprintf("%v", *s.local)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesGetSettingsService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *IndicesGetSettingsService) Do() (map[string]*IndicesGetSettingsResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest("GET", path, params, nil) - if err != nil { - return nil, err - } - - // Return operation response - var ret map[string]*IndicesGetSettingsResponse - if err := json.Unmarshal(res.Body, &ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndicesGetSettingsResponse is the response of IndicesGetSettingsService.Do. -type IndicesGetSettingsResponse struct { - Settings map[string]interface{} `json:"settings"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_settings_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_settings_test.go deleted file mode 100644 index f53512d53..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_settings_test.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "testing" -) - -func TestIndexGetSettingsURL(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tests := []struct { - Indices []string - Names []string - Expected string - }{ - { - []string{}, - []string{}, - "/_all/_settings", - }, - { - []string{}, - []string{"index.merge.*"}, - "/_all/_settings/index.merge.%2A", - }, - { - []string{"twitter-*"}, - []string{"index.merge.*", "_settings"}, - "/twitter-%2A/_settings/index.merge.%2A%2C_settings", - }, - { - []string{"store-1", "store-2"}, - []string{"index.merge.*", "_settings"}, - "/store-1%2Cstore-2/_settings/index.merge.%2A%2C_settings", - }, - } - - for _, test := range tests { - path, _, err := client.IndexGetSettings().Index(test.Indices...).Name(test.Names...).buildURL() - if err != nil { - t.Fatal(err) - } - if path != test.Expected { - t.Errorf("expected %q; got: %q", test.Expected, path) - } - } -} - -func TestIndexGetSettingsService(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - esversion, err := client.ElasticsearchVersion(DefaultURL) - if err != nil { - t.Fatal(err) - } - if esversion < "1.4.0" { - t.Skip("Index Get API is available since 1.4") - return - } - - res, err := client.IndexGetSettings().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if res == nil { - t.Fatalf("expected result; got: %v", res) - } - info, found := res[testIndexName] - if !found { - t.Fatalf("expected index %q to be found; got: %v", testIndexName, found) - } - if info == nil { - t.Fatalf("expected index %q to be != nil; got: %v", testIndexName, info) - } - if info.Settings == nil { - t.Fatalf("expected index settings of %q to be != nil; got: %v", testIndexName, info.Settings) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_template.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_template.go deleted file mode 100644 index b0e66d3f9..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_template.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// IndicesGetTemplateService returns an index template. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html. -type IndicesGetTemplateService struct { - client *Client - pretty bool - name []string - flatSettings *bool - local *bool -} - -// NewIndicesGetTemplateService creates a new IndicesGetTemplateService. -func NewIndicesGetTemplateService(client *Client) *IndicesGetTemplateService { - return &IndicesGetTemplateService{ - client: client, - name: make([]string, 0), - } -} - -// Name is the name of the index template. -func (s *IndicesGetTemplateService) Name(name ...string) *IndicesGetTemplateService { - s.name = append(s.name, name...) - return s -} - -// FlatSettings is returns settings in flat format (default: false). -func (s *IndicesGetTemplateService) FlatSettings(flatSettings bool) *IndicesGetTemplateService { - s.flatSettings = &flatSettings - return s -} - -// Local indicates whether to return local information, i.e. do not retrieve -// the state from master node (default: false). -func (s *IndicesGetTemplateService) Local(local bool) *IndicesGetTemplateService { - s.local = &local - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *IndicesGetTemplateService) Pretty(pretty bool) *IndicesGetTemplateService { - s.pretty = pretty - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesGetTemplateService) buildURL() (string, url.Values, error) { - // Build URL - var err error - var path string - if len(s.name) > 0 { - path, err = uritemplates.Expand("/_template/{name}", map[string]string{ - "name": strings.Join(s.name, ","), - }) - } else { - path = "/_template" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.flatSettings != nil { - params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) - } - if s.local != nil { - params.Set("local", fmt.Sprintf("%v", *s.local)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesGetTemplateService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *IndicesGetTemplateService) Do() (map[string]*IndicesGetTemplateResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest("GET", path, params, nil) - if err != nil { - return nil, err - } - - // Return operation response - var ret map[string]*IndicesGetTemplateResponse - if err := json.Unmarshal(res.Body, &ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndicesGetTemplateResponse is the response of IndicesGetTemplateService.Do. -type IndicesGetTemplateResponse struct { - Order int `json:"order,omitempty"` - Template string `json:"template,omitempty"` - Settings map[string]interface{} `json:"settings,omitempty"` - Mappings map[string]interface{} `json:"mappings,omitempty"` - Aliases map[string]interface{} `json:"aliases,omitempty"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_template_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_template_test.go deleted file mode 100644 index 693cde5ea..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_template_test.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "testing" -) - -func TestIndexGetTemplateURL(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tests := []struct { - Names []string - Expected string - }{ - { - []string{}, - "/_template", - }, - { - []string{"index1"}, - "/_template/index1", - }, - { - []string{"index1", "index2"}, - "/_template/index1%2Cindex2", - }, - } - - for _, test := range tests { - path, _, err := client.IndexGetTemplate().Name(test.Names...).buildURL() - if err != nil { - t.Fatal(err) - } - if path != test.Expected { - t.Errorf("expected %q; got: %q", test.Expected, path) - } - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_test.go deleted file mode 100644 index fcdee54db..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_test.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "testing" -) - -func TestIndicesGetValidate(t *testing.T) { - client := setupTestClient(t) - - // No index name -> fail with error - res, err := NewIndicesGetService(client).Index("").Do() - if err == nil { - t.Fatalf("expected IndicesGet to fail without index name") - } - if res != nil { - t.Fatalf("expected result to be == nil; got: %v", res) - } -} - -func TestIndicesGetURL(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tests := []struct { - Indices []string - Features []string - Expected string - }{ - { - []string{}, - []string{}, - "/_all", - }, - { - []string{}, - []string{"_mappings"}, - "/_all/_mappings", - }, - { - []string{"twitter"}, - []string{"_mappings", "_settings"}, - "/twitter/_mappings%2C_settings", - }, - { - []string{"store-1", "store-2"}, - []string{"_mappings", "_settings"}, - "/store-1%2Cstore-2/_mappings%2C_settings", - }, - } - - for _, test := range tests { - path, _, err := NewIndicesGetService(client).Index(test.Indices...).Feature(test.Features...).buildURL() - if err != nil { - t.Fatal(err) - } - if path != test.Expected { - t.Errorf("expected %q; got: %q", test.Expected, path) - } - } -} - -func TestIndicesGetService(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - esversion, err := client.ElasticsearchVersion(DefaultURL) - if err != nil { - t.Fatal(err) - } - if esversion < "1.4.0" { - t.Skip("Index Get API is available since 1.4") - return - } - - res, err := client.IndexGet().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if res == nil { - t.Fatalf("expected result; got: %v", res) - } - info, found := res[testIndexName] - if !found { - t.Fatalf("expected index %q to be found; got: %v", testIndexName, found) - } - if info == nil { - t.Fatalf("expected index %q to be != nil; got: %v", testIndexName, info) - } - if info.Mappings == nil { - t.Errorf("expected mappings to be != nil; got: %v", info.Mappings) - } - if info.Settings == nil { - t.Errorf("expected settings to be != nil; got: %v", info.Settings) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_warmer.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_warmer.go deleted file mode 100644 index 29bc6cbfd..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_warmer.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// IndicesGetWarmerService allows to get the definition of a warmer for a -// specific index (or alias, or several indices) based on its name. -// The provided name can be a simple wildcard expression or omitted to get -// all warmers. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-warmers.html -// for more information. -type IndicesGetWarmerService struct { - client *Client - pretty bool - index []string - name []string - typ []string - allowNoIndices *bool - expandWildcards string - ignoreUnavailable *bool - local *bool -} - -// NewIndicesGetWarmerService creates a new IndicesGetWarmerService. -func NewIndicesGetWarmerService(client *Client) *IndicesGetWarmerService { - return &IndicesGetWarmerService{ - client: client, - typ: make([]string, 0), - index: make([]string, 0), - name: make([]string, 0), - } -} - -// Index is a list of index names to restrict the operation; use `_all` to perform the operation on all indices. -func (s *IndicesGetWarmerService) Index(indices ...string) *IndicesGetWarmerService { - s.index = append(s.index, indices...) - return s -} - -// Name is the name of the warmer (supports wildcards); leave empty to get all warmers. -func (s *IndicesGetWarmerService) Name(name ...string) *IndicesGetWarmerService { - s.name = append(s.name, name...) - return s -} - -// Type is a list of type names the mapping should be added to -// (supports wildcards); use `_all` or omit to add the mapping on all types. -func (s *IndicesGetWarmerService) Type(typ ...string) *IndicesGetWarmerService { - s.typ = append(s.typ, typ...) - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. -// This includes `_all` string or when no indices have been specified. -func (s *IndicesGetWarmerService) AllowNoIndices(allowNoIndices bool) *IndicesGetWarmerService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both. -func (s *IndicesGetWarmerService) ExpandWildcards(expandWildcards string) *IndicesGetWarmerService { - s.expandWildcards = expandWildcards - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *IndicesGetWarmerService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetWarmerService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// Local indicates wether or not to return local information, -// do not retrieve the state from master node (default: false). -func (s *IndicesGetWarmerService) Local(local bool) *IndicesGetWarmerService { - s.local = &local - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *IndicesGetWarmerService) Pretty(pretty bool) *IndicesGetWarmerService { - s.pretty = pretty - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesGetWarmerService) buildURL() (string, url.Values, error) { - var err error - var path string - - if len(s.index) == 0 && len(s.typ) == 0 && len(s.name) == 0 { - path = "/_warmer" - } else if len(s.index) == 0 && len(s.typ) == 0 && len(s.name) > 0 { - path, err = uritemplates.Expand("/_warmer/{name}", map[string]string{ - "name": strings.Join(s.name, ","), - }) - } else if len(s.index) == 0 && len(s.typ) > 0 && len(s.name) == 0 { - path, err = uritemplates.Expand("/_all/{type}/_warmer", map[string]string{ - "type": strings.Join(s.typ, ","), - }) - } else if len(s.index) == 0 && len(s.typ) > 0 && len(s.name) > 0 { - path, err = uritemplates.Expand("/_all/{type}/_warmer/{name}", map[string]string{ - "type": strings.Join(s.typ, ","), - "name": strings.Join(s.name, ","), - }) - } else if len(s.index) > 0 && len(s.typ) == 0 && len(s.name) == 0 { - path, err = uritemplates.Expand("/{index}/_warmer", map[string]string{ - "index": strings.Join(s.index, ","), - }) - } else if len(s.index) > 0 && len(s.typ) == 0 && len(s.name) > 0 { - path, err = uritemplates.Expand("/{index}/_warmer/{name}", map[string]string{ - "index": strings.Join(s.index, ","), - "name": strings.Join(s.name, ","), - }) - } else if len(s.index) > 0 && len(s.typ) > 0 && len(s.name) == 0 { - path, err = uritemplates.Expand("/{index}/{type}/_warmer", map[string]string{ - "index": strings.Join(s.index, ","), - "type": strings.Join(s.typ, ","), - }) - } else if len(s.index) > 0 && len(s.typ) > 0 && len(s.name) > 0 { - path, err = uritemplates.Expand("/{index}/{type}/_warmer/{name}", map[string]string{ - "index": strings.Join(s.index, ","), - "type": strings.Join(s.typ, ","), - "name": strings.Join(s.name, ","), - }) - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.local != nil { - params.Set("local", fmt.Sprintf("%v", *s.local)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesGetWarmerService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *IndicesGetWarmerService) Do() (map[string]interface{}, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest("GET", path, params, nil) - if err != nil { - return nil, err - } - - // Return operation response - var ret map[string]interface{} - if err := json.Unmarshal(res.Body, &ret); err != nil { - return nil, err - } - return ret, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_warmer_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_warmer_test.go deleted file mode 100644 index ea01a628e..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_get_warmer_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "testing" - -func TestGetWarmerBuildURL(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tests := []struct { - Indices []string - Types []string - Names []string - Expected string - }{ - { - []string{}, - []string{}, - []string{}, - "/_warmer", - }, - { - []string{}, - []string{}, - []string{"warmer_1"}, - "/_warmer/warmer_1", - }, - { - []string{}, - []string{"tweet"}, - []string{}, - "/_all/tweet/_warmer", - }, - { - []string{}, - []string{"tweet"}, - []string{"warmer_1"}, - "/_all/tweet/_warmer/warmer_1", - }, - { - []string{"test"}, - []string{}, - []string{}, - "/test/_warmer", - }, - { - []string{"test"}, - []string{}, - []string{"warmer_1"}, - "/test/_warmer/warmer_1", - }, - { - []string{"*"}, - []string{}, - []string{"warmer_1"}, - "/%2A/_warmer/warmer_1", - }, - { - []string{"test"}, - []string{"tweet"}, - []string{"warmer_1"}, - "/test/tweet/_warmer/warmer_1", - }, - { - []string{"index-1", "index-2"}, - []string{"type-1", "type-2"}, - []string{"warmer_1", "warmer_2"}, - "/index-1%2Cindex-2/type-1%2Ctype-2/_warmer/warmer_1%2Cwarmer_2", - }, - } - - for _, test := range tests { - path, _, err := client.GetWarmer().Index(test.Indices...).Type(test.Types...).Name(test.Names...).buildURL() - if err != nil { - t.Fatal(err) - } - if path != test.Expected { - t.Errorf("expected %q; got: %q", test.Expected, path) - } - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_open.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_open.go deleted file mode 100644 index 85a45bb1d..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_open.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// IndicesOpenService opens an index. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html -// for details. -type IndicesOpenService struct { - client *Client - pretty bool - index string - timeout string - masterTimeout string - ignoreUnavailable *bool - allowNoIndices *bool - expandWildcards string -} - -// NewIndicesOpenService creates and initializes a new IndicesOpenService. -func NewIndicesOpenService(client *Client) *IndicesOpenService { - return &IndicesOpenService{client: client} -} - -// Index is the name of the index to open. -func (s *IndicesOpenService) Index(index string) *IndicesOpenService { - s.index = index - return s -} - -// Timeout is an explicit operation timeout. -func (s *IndicesOpenService) Timeout(timeout string) *IndicesOpenService { - s.timeout = timeout - return s -} - -// MasterTimeout specifies the timeout for connection to master. -func (s *IndicesOpenService) MasterTimeout(masterTimeout string) *IndicesOpenService { - s.masterTimeout = masterTimeout - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should -// be ignored when unavailable (missing or closed). -func (s *IndicesOpenService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesOpenService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. -// (This includes `_all` string or when no indices have been specified). -func (s *IndicesOpenService) AllowNoIndices(allowNoIndices bool) *IndicesOpenService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both.. -func (s *IndicesOpenService) ExpandWildcards(expandWildcards string) *IndicesOpenService { - s.expandWildcards = expandWildcards - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *IndicesOpenService) Pretty(pretty bool) *IndicesOpenService { - s.pretty = pretty - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesOpenService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/{index}/_open", map[string]string{ - "index": s.index, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesOpenService) Validate() error { - var invalid []string - if s.index == "" { - invalid = append(invalid, "Index") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IndicesOpenService) Do() (*IndicesOpenResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest("POST", path, params, nil) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IndicesOpenResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndicesOpenResponse is the response of IndicesOpenService.Do. -type IndicesOpenResponse struct { - Acknowledged bool `json:"acknowledged"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_open_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_open_test.go deleted file mode 100644 index 352bb479b..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_open_test.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "testing" - -func TestIndicesOpenValidate(t *testing.T) { - client := setupTestClient(t) - - // No index name -> fail with error - res, err := NewIndicesOpenService(client).Do() - if err == nil { - t.Fatalf("expected IndicesOpen to fail without index name") - } - if res != nil { - t.Fatalf("expected result to be == nil; got: %v", res) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_alias.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_alias.go deleted file mode 100644 index d8515036b..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_alias.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" -) - -type AliasService struct { - client *Client - actions []aliasAction - pretty bool -} - -type aliasAction struct { - // "add" or "remove" - Type string - // Index name - Index string - // Alias name - Alias string - // Filter - Filter Query -} - -func NewAliasService(client *Client) *AliasService { - builder := &AliasService{ - client: client, - actions: make([]aliasAction, 0), - } - return builder -} - -func (s *AliasService) Pretty(pretty bool) *AliasService { - s.pretty = pretty - return s -} - -func (s *AliasService) Add(indexName string, aliasName string) *AliasService { - action := aliasAction{Type: "add", Index: indexName, Alias: aliasName} - s.actions = append(s.actions, action) - return s -} - -func (s *AliasService) AddWithFilter(indexName string, aliasName string, filter Query) *AliasService { - action := aliasAction{Type: "add", Index: indexName, Alias: aliasName, Filter: filter} - s.actions = append(s.actions, action) - return s -} - -func (s *AliasService) Remove(indexName string, aliasName string) *AliasService { - action := aliasAction{Type: "remove", Index: indexName, Alias: aliasName} - s.actions = append(s.actions, action) - return s -} - -func (s *AliasService) Do() (*AliasResult, error) { - // Build url - path := "/_aliases" - - // Parameters - params := make(url.Values) - if s.pretty { - params.Set("pretty", fmt.Sprintf("%v", s.pretty)) - } - - // Actions - body := make(map[string]interface{}) - actionsJson := make([]interface{}, 0) - - for _, action := range s.actions { - actionJson := make(map[string]interface{}) - detailsJson := make(map[string]interface{}) - detailsJson["index"] = action.Index - detailsJson["alias"] = action.Alias - if action.Filter != nil { - src, err := action.Filter.Source() - if err != nil { - return nil, err - } - detailsJson["filter"] = src - } - actionJson[action.Type] = detailsJson - actionsJson = append(actionsJson, actionJson) - } - - body["actions"] = actionsJson - - // Get response - res, err := s.client.PerformRequest("POST", path, params, body) - if err != nil { - return nil, err - } - - // Return results - ret := new(AliasResult) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// -- Result of an alias request. - -type AliasResult struct { - Acknowledged bool `json:"acknowledged"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_alias_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_alias_test.go deleted file mode 100644 index 3e4e797b0..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_alias_test.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "testing" -) - -const ( - testAliasName = "elastic-test-alias" -) - -func TestAliasLifecycle(t *testing.T) { - var err error - - client := setupTestClientAndCreateIndex(t) - - // Some tweets - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "sandrae", Message: "Cycling is fun."} - tweet3 := tweet{User: "olivere", Message: "Another unrelated topic."} - - // Add tweets to first index - _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - // Add tweets to second index - _, err = client.Index().Index(testIndexName2).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - // Flush - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Flush().Index(testIndexName2).Do() - if err != nil { - t.Fatal(err) - } - - /* - // Alias should not yet exist - aliasesResult1, err := client.Aliases().Do() - if err != nil { - t.Fatal(err) - } - if len(aliasesResult1.Indices) != 0 { - t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 0, len(aliasesResult1.Indices)) - } - */ - - // Add both indices to a new alias - aliasCreate, err := client.Alias(). - Add(testIndexName, testAliasName). - Add(testIndexName2, testAliasName). - //Pretty(true). - Do() - if err != nil { - t.Fatal(err) - } - if !aliasCreate.Acknowledged { - t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasCreate.Acknowledged) - } - - // Search should return all 3 tweets - matchAll := NewMatchAllQuery() - searchResult1, err := client.Search().Index(testAliasName).Query(matchAll).Do() - if err != nil { - t.Fatal(err) - } - if searchResult1.Hits == nil { - t.Errorf("expected SearchResult.Hits != nil; got nil") - } - if searchResult1.Hits.TotalHits != 3 { - t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult1.Hits.TotalHits) - } - - /* - // Alias should return both indices - aliasesResult2, err := client.Aliases().Do() - if err != nil { - t.Fatal(err) - } - if len(aliasesResult2.Indices) != 2 { - t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult2.Indices)) - } - */ - - // Remove first index should remove two tweets, so should only yield 1 - aliasRemove1, err := client.Alias(). - Remove(testIndexName, testAliasName). - //Pretty(true). - Do() - if err != nil { - t.Fatal(err) - } - if !aliasRemove1.Acknowledged { - t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasRemove1.Acknowledged) - } - - searchResult2, err := client.Search().Index(testAliasName).Query(matchAll).Do() - if err != nil { - t.Fatal(err) - } - if searchResult2.Hits == nil { - t.Errorf("expected SearchResult.Hits != nil; got nil") - } - if searchResult2.Hits.TotalHits != 1 { - t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult2.Hits.TotalHits) - } - -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_mapping.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_mapping.go deleted file mode 100644 index 5a23165b0..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_mapping.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// IndicesPutMappingService allows to register specific mapping definition -// for a specific type. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-mapping.html -// for details. -type IndicesPutMappingService struct { - client *Client - pretty bool - typ string - index []string - masterTimeout string - ignoreUnavailable *bool - allowNoIndices *bool - expandWildcards string - ignoreConflicts *bool - timeout string - bodyJson map[string]interface{} - bodyString string -} - -// NewPutMappingService is an alias for NewIndicesPutMappingService. -// Use NewIndicesPutMappingService. -func NewPutMappingService(client *Client) *IndicesPutMappingService { - return NewIndicesPutMappingService(client) -} - -// NewIndicesPutMappingService creates a new IndicesPutMappingService. -func NewIndicesPutMappingService(client *Client) *IndicesPutMappingService { - return &IndicesPutMappingService{ - client: client, - index: make([]string, 0), - } -} - -// Index is a list of index names the mapping should be added to -// (supports wildcards); use `_all` or omit to add the mapping on all indices. -func (s *IndicesPutMappingService) Index(indices ...string) *IndicesPutMappingService { - s.index = append(s.index, indices...) - return s -} - -// Type is the name of the document type. -func (s *IndicesPutMappingService) Type(typ string) *IndicesPutMappingService { - s.typ = typ - return s -} - -// Timeout is an explicit operation timeout. -func (s *IndicesPutMappingService) Timeout(timeout string) *IndicesPutMappingService { - s.timeout = timeout - return s -} - -// MasterTimeout specifies the timeout for connection to master. -func (s *IndicesPutMappingService) MasterTimeout(masterTimeout string) *IndicesPutMappingService { - s.masterTimeout = masterTimeout - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *IndicesPutMappingService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesPutMappingService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. -// This includes `_all` string or when no indices have been specified. -func (s *IndicesPutMappingService) AllowNoIndices(allowNoIndices bool) *IndicesPutMappingService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both. -func (s *IndicesPutMappingService) ExpandWildcards(expandWildcards string) *IndicesPutMappingService { - s.expandWildcards = expandWildcards - return s -} - -// IgnoreConflicts specifies whether to ignore conflicts while updating -// the mapping (default: false). -func (s *IndicesPutMappingService) IgnoreConflicts(ignoreConflicts bool) *IndicesPutMappingService { - s.ignoreConflicts = &ignoreConflicts - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *IndicesPutMappingService) Pretty(pretty bool) *IndicesPutMappingService { - s.pretty = pretty - return s -} - -// BodyJson contains the mapping definition. -func (s *IndicesPutMappingService) BodyJson(mapping map[string]interface{}) *IndicesPutMappingService { - s.bodyJson = mapping - return s -} - -// BodyString is the mapping definition serialized as a string. -func (s *IndicesPutMappingService) BodyString(mapping string) *IndicesPutMappingService { - s.bodyString = mapping - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesPutMappingService) buildURL() (string, url.Values, error) { - var err error - var path string - - // Build URL: Typ MUST be specified and is verified in Validate. - if len(s.index) > 0 { - path, err = uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{ - "index": strings.Join(s.index, ","), - "type": s.typ, - }) - } else { - path, err = uritemplates.Expand("/_mapping/{type}", map[string]string{ - "type": s.typ, - }) - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.ignoreConflicts != nil { - params.Set("ignore_conflicts", fmt.Sprintf("%v", *s.ignoreConflicts)) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesPutMappingService) Validate() error { - var invalid []string - if s.typ == "" { - invalid = append(invalid, "Type") - } - if s.bodyString == "" && s.bodyJson == nil { - invalid = append(invalid, "BodyJson") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IndicesPutMappingService) Do() (*PutMappingResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else { - body = s.bodyString - } - - // Get HTTP response - res, err := s.client.PerformRequest("PUT", path, params, body) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(PutMappingResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// PutMappingResponse is the response of IndicesPutMappingService.Do. -type PutMappingResponse struct { - Acknowledged bool `json:"acknowledged"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_mapping_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_mapping_test.go deleted file mode 100644 index 356aa2728..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_mapping_test.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "testing" - -func TestPutMappingURL(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tests := []struct { - Indices []string - Type string - Expected string - }{ - { - []string{}, - "tweet", - "/_mapping/tweet", - }, - { - []string{"*"}, - "tweet", - "/%2A/_mapping/tweet", - }, - { - []string{"store-1", "store-2"}, - "tweet", - "/store-1%2Cstore-2/_mapping/tweet", - }, - } - - for _, test := range tests { - path, _, err := client.PutMapping().Index(test.Indices...).Type(test.Type).buildURL() - if err != nil { - t.Fatal(err) - } - if path != test.Expected { - t.Errorf("expected %q; got: %q", test.Expected, path) - } - } -} - -func TestMappingLifecycle(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - mapping := `{ - "tweetdoc":{ - "properties":{ - "message":{ - "type":"string" - } - } - } - }` - - putresp, err := client.PutMapping().Index(testIndexName2).Type("tweetdoc").BodyString(mapping).Do() - if err != nil { - t.Fatalf("expected put mapping to succeed; got: %v", err) - } - if putresp == nil { - t.Fatalf("expected put mapping response; got: %v", putresp) - } - if !putresp.Acknowledged { - t.Fatalf("expected put mapping ack; got: %v", putresp.Acknowledged) - } - - getresp, err := client.GetMapping().Index(testIndexName2).Type("tweetdoc").Do() - if err != nil { - t.Fatalf("expected get mapping to succeed; got: %v", err) - } - if getresp == nil { - t.Fatalf("expected get mapping response; got: %v", getresp) - } - props, ok := getresp[testIndexName2] - if !ok { - t.Fatalf("expected JSON root to be of type map[string]interface{}; got: %#v", props) - } - - // NOTE There is no Delete Mapping API in Elasticsearch 2.0 -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_settings.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_settings.go deleted file mode 100644 index 4cdd3e1cb..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_settings.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2012-2016 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// IndicesPutSettingsService changes specific index level settings in -// real time. -// -// See the documentation at -// https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-update-settings.html. -type IndicesPutSettingsService struct { - client *Client - pretty bool - index []string - allowNoIndices *bool - expandWildcards string - flatSettings *bool - ignoreUnavailable *bool - masterTimeout string - bodyJson interface{} - bodyString string -} - -// NewIndicesPutSettingsService creates a new IndicesPutSettingsService. -func NewIndicesPutSettingsService(client *Client) *IndicesPutSettingsService { - return &IndicesPutSettingsService{ - client: client, - index: make([]string, 0), - } -} - -// Index is a list of index names the mapping should be added to -// (supports wildcards); use `_all` or omit to add the mapping on all indices. -func (s *IndicesPutSettingsService) Index(indices ...string) *IndicesPutSettingsService { - s.index = append(s.index, indices...) - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. (This includes `_all` -// string or when no indices have been specified). -func (s *IndicesPutSettingsService) AllowNoIndices(allowNoIndices bool) *IndicesPutSettingsService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards specifies whether to expand wildcard expression to -// concrete indices that are open, closed or both. -func (s *IndicesPutSettingsService) ExpandWildcards(expandWildcards string) *IndicesPutSettingsService { - s.expandWildcards = expandWildcards - return s -} - -// FlatSettings indicates whether to return settings in flat format (default: false). -func (s *IndicesPutSettingsService) FlatSettings(flatSettings bool) *IndicesPutSettingsService { - s.flatSettings = &flatSettings - return s -} - -// IgnoreUnavailable specifies whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *IndicesPutSettingsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesPutSettingsService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// MasterTimeout is the timeout for connection to master. -func (s *IndicesPutSettingsService) MasterTimeout(masterTimeout string) *IndicesPutSettingsService { - s.masterTimeout = masterTimeout - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *IndicesPutSettingsService) Pretty(pretty bool) *IndicesPutSettingsService { - s.pretty = pretty - return s -} - -// BodyJson is documented as: The index settings to be updated. -func (s *IndicesPutSettingsService) BodyJson(body interface{}) *IndicesPutSettingsService { - s.bodyJson = body - return s -} - -// BodyString is documented as: The index settings to be updated. -func (s *IndicesPutSettingsService) BodyString(body string) *IndicesPutSettingsService { - s.bodyString = body - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesPutSettingsService) buildURL() (string, url.Values, error) { - // Build URL - var err error - var path string - - if len(s.index) > 0 { - path, err = uritemplates.Expand("/{index}/_settings", map[string]string{ - "index": strings.Join(s.index, ","), - }) - } else { - path = "/_settings" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.flatSettings != nil { - params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesPutSettingsService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *IndicesPutSettingsService) Do() (*IndicesPutSettingsResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else { - body = s.bodyString - } - - // Get HTTP response - res, err := s.client.PerformRequest("PUT", path, params, body) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IndicesPutSettingsResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndicesPutSettingsResponse is the response of IndicesPutSettingsService.Do. -type IndicesPutSettingsResponse struct { - Acknowledged bool `json:"acknowledged"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_settings_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_settings_test.go deleted file mode 100644 index 4bc86e18e..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_settings_test.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "testing" - -func TestIndicesPutSettingsBuildURL(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tests := []struct { - Indices []string - Expected string - }{ - { - []string{}, - "/_settings", - }, - { - []string{"*"}, - "/%2A/_settings", - }, - { - []string{"store-1", "store-2"}, - "/store-1%2Cstore-2/_settings", - }, - } - - for _, test := range tests { - path, _, err := client.IndexPutSettings().Index(test.Indices...).buildURL() - if err != nil { - t.Fatal(err) - } - if path != test.Expected { - t.Errorf("expected %q; got: %q", test.Expected, path) - } - } -} - -func TestIndicesSettingsLifecycle(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - body := `{ - "index":{ - "refresh_interval":"-1" - } - }` - - // Put settings - putres, err := client.IndexPutSettings().Index(testIndexName).BodyString(body).Do() - if err != nil { - t.Fatalf("expected put settings to succeed; got: %v", err) - } - if putres == nil { - t.Fatalf("expected put settings response; got: %v", putres) - } - if !putres.Acknowledged { - t.Fatalf("expected put settings ack; got: %v", putres.Acknowledged) - } - - // Read settings - getres, err := client.IndexGetSettings().Index(testIndexName).Do() - if err != nil { - t.Fatalf("expected get mapping to succeed; got: %v", err) - } - if getres == nil { - t.Fatalf("expected get mapping response; got: %v", getres) - } - - // Check settings - index, found := getres[testIndexName] - if !found { - t.Fatalf("expected to return settings for index %q; got: %#v", testIndexName, getres) - } - // Retrieve "index" section of the settings for index testIndexName - sectionIntf, ok := index.Settings["index"] - if !ok { - t.Fatalf("expected settings to have %q field; got: %#v", "index", getres) - } - section, ok := sectionIntf.(map[string]interface{}) - if !ok { - t.Fatalf("expected settings to be of type map[string]interface{}; got: %#v", getres) - } - refintv, ok := section["refresh_interval"] - if !ok { - t.Fatalf(`expected JSON to include "refresh_interval" field; got: %#v`, getres) - } - if got, want := refintv, "-1"; got != want { - t.Fatalf("expected refresh_interval = %v; got: %v", want, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_template.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_template.go deleted file mode 100644 index 72947f311..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_template.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// IndicesPutTemplateService creates or updates index mappings. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html. -type IndicesPutTemplateService struct { - client *Client - pretty bool - name string - order interface{} - create *bool - timeout string - masterTimeout string - flatSettings *bool - bodyJson interface{} - bodyString string -} - -// NewIndicesPutTemplateService creates a new IndicesPutTemplateService. -func NewIndicesPutTemplateService(client *Client) *IndicesPutTemplateService { - return &IndicesPutTemplateService{ - client: client, - } -} - -// Name is the name of the index template. -func (s *IndicesPutTemplateService) Name(name string) *IndicesPutTemplateService { - s.name = name - return s -} - -// Timeout is an explicit operation timeout. -func (s *IndicesPutTemplateService) Timeout(timeout string) *IndicesPutTemplateService { - s.timeout = timeout - return s -} - -// MasterTimeout specifies the timeout for connection to master. -func (s *IndicesPutTemplateService) MasterTimeout(masterTimeout string) *IndicesPutTemplateService { - s.masterTimeout = masterTimeout - return s -} - -// FlatSettings indicates whether to return settings in flat format (default: false). -func (s *IndicesPutTemplateService) FlatSettings(flatSettings bool) *IndicesPutTemplateService { - s.flatSettings = &flatSettings - return s -} - -// Order is the order for this template when merging multiple matching ones -// (higher numbers are merged later, overriding the lower numbers). -func (s *IndicesPutTemplateService) Order(order interface{}) *IndicesPutTemplateService { - s.order = order - return s -} - -// Create indicates whether the index template should only be added if -// new or can also replace an existing one. -func (s *IndicesPutTemplateService) Create(create bool) *IndicesPutTemplateService { - s.create = &create - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *IndicesPutTemplateService) Pretty(pretty bool) *IndicesPutTemplateService { - s.pretty = pretty - return s -} - -// BodyJson is documented as: The template definition. -func (s *IndicesPutTemplateService) BodyJson(body interface{}) *IndicesPutTemplateService { - s.bodyJson = body - return s -} - -// BodyString is documented as: The template definition. -func (s *IndicesPutTemplateService) BodyString(body string) *IndicesPutTemplateService { - s.bodyString = body - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesPutTemplateService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_template/{name}", map[string]string{ - "name": s.name, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.order != nil { - params.Set("order", fmt.Sprintf("%v", s.order)) - } - if s.create != nil { - params.Set("create", fmt.Sprintf("%v", *s.create)) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if s.flatSettings != nil { - params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesPutTemplateService) Validate() error { - var invalid []string - if s.name == "" { - invalid = append(invalid, "Name") - } - if s.bodyString == "" && s.bodyJson == nil { - invalid = append(invalid, "BodyJson") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IndicesPutTemplateService) Do() (*IndicesPutTemplateResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else { - body = s.bodyString - } - - // Get HTTP response - res, err := s.client.PerformRequest("PUT", path, params, body) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IndicesPutTemplateResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndicesPutTemplateResponse is the response of IndicesPutTemplateService.Do. -type IndicesPutTemplateResponse struct { - Acknowledged bool `json:"acknowledged,omitempty"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_warmer.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_warmer.go deleted file mode 100644 index 6e1f3ae66..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_warmer.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// IndicesPutWarmerService allows to register a warmer. -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-warmers.html. -type IndicesPutWarmerService struct { - client *Client - pretty bool - typ []string - index []string - name string - masterTimeout string - ignoreUnavailable *bool - allowNoIndices *bool - requestCache *bool - expandWildcards string - bodyJson map[string]interface{} - bodyString string -} - -// NewIndicesPutWarmerService creates a new IndicesPutWarmerService. -func NewIndicesPutWarmerService(client *Client) *IndicesPutWarmerService { - return &IndicesPutWarmerService{ - client: client, - index: make([]string, 0), - typ: make([]string, 0), - } -} - -// Index is a list of index names the mapping should be added to -// (supports wildcards); use `_all` or omit to add the mapping on all indices. -func (s *IndicesPutWarmerService) Index(indices ...string) *IndicesPutWarmerService { - s.index = append(s.index, indices...) - return s -} - -// Type is a list of type names the mapping should be added to -// (supports wildcards); use `_all` or omit to add the mapping on all types. -func (s *IndicesPutWarmerService) Type(typ ...string) *IndicesPutWarmerService { - s.typ = append(s.typ, typ...) - return s -} - -// Name specifies the name of the warmer (supports wildcards); -// leave empty to get all warmers -func (s *IndicesPutWarmerService) Name(name string) *IndicesPutWarmerService { - s.name = name - return s -} - -// MasterTimeout specifies the timeout for connection to master. -func (s *IndicesPutWarmerService) MasterTimeout(masterTimeout string) *IndicesPutWarmerService { - s.masterTimeout = masterTimeout - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *IndicesPutWarmerService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesPutWarmerService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. -// This includes `_all` string or when no indices have been specified. -func (s *IndicesPutWarmerService) AllowNoIndices(allowNoIndices bool) *IndicesPutWarmerService { - s.allowNoIndices = &allowNoIndices - return s -} - -// RequestCache specifies whether the request to be warmed should use the request cache, -// defaults to index level setting -func (s *IndicesPutWarmerService) RequestCache(requestCache bool) *IndicesPutWarmerService { - s.requestCache = &requestCache - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both. -func (s *IndicesPutWarmerService) ExpandWildcards(expandWildcards string) *IndicesPutWarmerService { - s.expandWildcards = expandWildcards - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *IndicesPutWarmerService) Pretty(pretty bool) *IndicesPutWarmerService { - s.pretty = pretty - return s -} - -// BodyJson contains the mapping definition. -func (s *IndicesPutWarmerService) BodyJson(mapping map[string]interface{}) *IndicesPutWarmerService { - s.bodyJson = mapping - return s -} - -// BodyString is the mapping definition serialized as a string. -func (s *IndicesPutWarmerService) BodyString(mapping string) *IndicesPutWarmerService { - s.bodyString = mapping - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesPutWarmerService) buildURL() (string, url.Values, error) { - var err error - var path string - - if len(s.index) == 0 && len(s.typ) == 0 { - path, err = uritemplates.Expand("/_warmer/{name}", map[string]string{ - "name": s.name, - }) - } else if len(s.index) == 0 && len(s.typ) > 0 { - path, err = uritemplates.Expand("/_all/{type}/_warmer/{name}", map[string]string{ - "type": strings.Join(s.typ, ","), - "name": s.name, - }) - } else if len(s.index) > 0 && len(s.typ) == 0 { - path, err = uritemplates.Expand("/{index}/_warmer/{name}", map[string]string{ - "index": strings.Join(s.index, ","), - "name": s.name, - }) - } else { - path, err = uritemplates.Expand("/{index}/{type}/_warmer/{name}", map[string]string{ - "index": strings.Join(s.index, ","), - "type": strings.Join(s.typ, ","), - "name": s.name, - }) - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.requestCache != nil { - params.Set("request_cache", fmt.Sprintf("%v", *s.requestCache)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesPutWarmerService) Validate() error { - var invalid []string - if s.name == "" { - invalid = append(invalid, "Name") - } - if s.bodyString == "" && s.bodyJson == nil { - invalid = append(invalid, "BodyJson") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IndicesPutWarmerService) Do() (*PutWarmerResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else { - body = s.bodyString - } - - // Get HTTP response - res, err := s.client.PerformRequest("PUT", path, params, body) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(PutWarmerResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// PutWarmerResponse is the response of IndicesPutWarmerService.Do. -type PutWarmerResponse struct { - Acknowledged bool `json:"acknowledged"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_warmer_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_warmer_test.go deleted file mode 100644 index 25a1f3ecb..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_put_warmer_test.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "testing" - -func TestPutWarmerBuildURL(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tests := []struct { - Indices []string - Types []string - Name string - Expected string - }{ - { - []string{}, - []string{}, - "warmer_1", - "/_warmer/warmer_1", - }, - { - []string{"*"}, - []string{}, - "warmer_1", - "/%2A/_warmer/warmer_1", - }, - { - []string{}, - []string{"*"}, - "warmer_1", - "/_all/%2A/_warmer/warmer_1", - }, - { - []string{"index-1", "index-2"}, - []string{"type-1", "type-2"}, - "warmer_1", - "/index-1%2Cindex-2/type-1%2Ctype-2/_warmer/warmer_1", - }, - } - - for _, test := range tests { - path, _, err := client.PutWarmer().Index(test.Indices...).Type(test.Types...).Name(test.Name).buildURL() - if err != nil { - t.Fatal(err) - } - if path != test.Expected { - t.Errorf("expected %q; got: %q", test.Expected, path) - } - } -} - -func TestWarmerLifecycle(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - mapping := `{ - "query": { - "match_all": {} - } - }` - - // Ensure well prepared test index - client.Flush(testIndexName2).Do() - - putresp, err := client.PutWarmer().Index(testIndexName2).Type("tweet").Name("warmer_1").BodyString(mapping).Do() - if err != nil { - t.Fatalf("expected put warmer to succeed; got: %v", err) - } - if putresp == nil { - t.Fatalf("expected put warmer response; got: %v", putresp) - } - if !putresp.Acknowledged { - t.Fatalf("expected put warmer ack; got: %v", putresp.Acknowledged) - } - - getresp, err := client.GetWarmer().Index(testIndexName2).Name("warmer_1").Do() - if err != nil { - t.Fatalf("expected get warmer to succeed; got: %v", err) - } - if getresp == nil { - t.Fatalf("expected get warmer response; got: %v", getresp) - } - props, ok := getresp[testIndexName2] - if !ok { - t.Fatalf("expected JSON root to be of type map[string]interface{}; got: %#v", props) - } - - delresp, err := client.DeleteWarmer().Index(testIndexName2).Name("warmer_1").Do() - if err != nil { - t.Fatalf("expected del warmer to succeed; got: %v", err) - } - if delresp == nil { - t.Fatalf("expected del warmer response; got: %v", getresp) - } - if !delresp.Acknowledged { - t.Fatalf("expected del warmer ack; got: %v", delresp.Acknowledged) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_refresh.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_refresh.go deleted file mode 100644 index 392a8d393..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_refresh.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -type RefreshService struct { - client *Client - indices []string - force *bool - pretty bool -} - -func NewRefreshService(client *Client) *RefreshService { - builder := &RefreshService{ - client: client, - indices: make([]string, 0), - } - return builder -} - -func (s *RefreshService) Index(indices ...string) *RefreshService { - s.indices = append(s.indices, indices...) - return s -} - -func (s *RefreshService) Force(force bool) *RefreshService { - s.force = &force - return s -} - -func (s *RefreshService) Pretty(pretty bool) *RefreshService { - s.pretty = pretty - return s -} - -func (s *RefreshService) Do() (*RefreshResult, error) { - // Build url - path := "/" - - // Indices part - indexPart := make([]string, 0) - for _, index := range s.indices { - index, err := uritemplates.Expand("{index}", map[string]string{ - "index": index, - }) - if err != nil { - return nil, err - } - indexPart = append(indexPart, index) - } - if len(indexPart) > 0 { - path += strings.Join(indexPart, ",") - } - - path += "/_refresh" - - // Parameters - params := make(url.Values) - if s.force != nil { - params.Set("force", fmt.Sprintf("%v", *s.force)) - } - if s.pretty { - params.Set("pretty", fmt.Sprintf("%v", s.pretty)) - } - - // Get response - res, err := s.client.PerformRequest("POST", path, params, nil) - if err != nil { - return nil, err - } - - // Return result - ret := new(RefreshResult) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// -- Result of a refresh request. - -type RefreshResult struct { - Shards shardsInfo `json:"_shards,omitempty"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_refresh_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_refresh_test.go deleted file mode 100644 index 885e63365..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_refresh_test.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "testing" -) - -func TestRefresh(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} - - // Add some documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Refresh indices - res, err := client.Refresh(testIndexName, testIndexName2).Do() - if err != nil { - t.Fatal(err) - } - if res == nil { - t.Fatal("expected result; got nil") - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_stats.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_stats.go deleted file mode 100644 index b9255c094..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_stats.go +++ /dev/null @@ -1,385 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// IndicesStatsService provides stats on various metrics of one or more -// indices. See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-stats.html. -type IndicesStatsService struct { - client *Client - pretty bool - metric []string - index []string - level string - types []string - completionFields []string - fielddataFields []string - fields []string - groups []string - human *bool -} - -// NewIndicesStatsService creates a new IndicesStatsService. -func NewIndicesStatsService(client *Client) *IndicesStatsService { - return &IndicesStatsService{ - client: client, - index: make([]string, 0), - metric: make([]string, 0), - completionFields: make([]string, 0), - fielddataFields: make([]string, 0), - fields: make([]string, 0), - groups: make([]string, 0), - types: make([]string, 0), - } -} - -// Metric limits the information returned the specific metrics. Options are: -// docs, store, indexing, get, search, completion, fielddata, flush, merge, -// query_cache, refresh, suggest, and warmer. -func (s *IndicesStatsService) Metric(metric ...string) *IndicesStatsService { - s.metric = append(s.metric, metric...) - return s -} - -// Index is the list of index names; use `_all` or empty string to perform -// the operation on all indices. -func (s *IndicesStatsService) Index(indices ...string) *IndicesStatsService { - s.index = append(s.index, indices...) - return s -} - -// Type is a list of document types for the `indexing` index metric. -func (s *IndicesStatsService) Type(types ...string) *IndicesStatsService { - s.types = append(s.types, types...) - return s -} - -// Level returns stats aggregated at cluster, index or shard level. -func (s *IndicesStatsService) Level(level string) *IndicesStatsService { - s.level = level - return s -} - -// CompletionFields is a list of fields for `fielddata` and `suggest` -// index metric (supports wildcards). -func (s *IndicesStatsService) CompletionFields(completionFields ...string) *IndicesStatsService { - s.completionFields = append(s.completionFields, completionFields...) - return s -} - -// FielddataFields is a list of fields for `fielddata` index metric (supports wildcards). -func (s *IndicesStatsService) FielddataFields(fielddataFields ...string) *IndicesStatsService { - s.fielddataFields = append(s.fielddataFields, fielddataFields...) - return s -} - -// Fields is a list of fields for `fielddata` and `completion` index metric -// (supports wildcards). -func (s *IndicesStatsService) Fields(fields ...string) *IndicesStatsService { - s.fields = append(s.fields, fields...) - return s -} - -// Groups is a list of search groups for `search` index metric. -func (s *IndicesStatsService) Groups(groups ...string) *IndicesStatsService { - s.groups = append(s.groups, groups...) - return s -} - -// Human indicates whether to return time and byte values in human-readable format.. -func (s *IndicesStatsService) Human(human bool) *IndicesStatsService { - s.human = &human - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *IndicesStatsService) Pretty(pretty bool) *IndicesStatsService { - s.pretty = pretty - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesStatsService) buildURL() (string, url.Values, error) { - var err error - var path string - if len(s.index) > 0 && len(s.metric) > 0 { - path, err = uritemplates.Expand("/{index}/_stats/{metric}", map[string]string{ - "index": strings.Join(s.index, ","), - "metric": strings.Join(s.metric, ","), - }) - } else if len(s.index) > 0 { - path, err = uritemplates.Expand("/{index}/_stats", map[string]string{ - "index": strings.Join(s.index, ","), - }) - } else if len(s.metric) > 0 { - path, err = uritemplates.Expand("/_stats/{metric}", map[string]string{ - "metric": strings.Join(s.metric, ","), - }) - } else { - path = "/_stats" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if len(s.groups) > 0 { - params.Set("groups", strings.Join(s.groups, ",")) - } - if s.human != nil { - params.Set("human", fmt.Sprintf("%v", *s.human)) - } - if s.level != "" { - params.Set("level", s.level) - } - if len(s.types) > 0 { - params.Set("types", strings.Join(s.types, ",")) - } - if len(s.completionFields) > 0 { - params.Set("completion_fields", strings.Join(s.completionFields, ",")) - } - if len(s.fielddataFields) > 0 { - params.Set("fielddata_fields", strings.Join(s.fielddataFields, ",")) - } - if len(s.fields) > 0 { - params.Set("fields", strings.Join(s.fields, ",")) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesStatsService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *IndicesStatsService) Do() (*IndicesStatsResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest("GET", path, params, nil) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IndicesStatsResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndicesStatsResponse is the response of IndicesStatsService.Do. -type IndicesStatsResponse struct { - // Shards provides information returned from shards. - Shards shardsInfo `json:"_shards"` - - // All provides summary stats about all indices. - All *IndexStats `json:"_all,omitempty"` - - // Indices provides a map into the stats of an index. The key of the - // map is the index name. - Indices map[string]*IndexStats `json:"indices,omitempty"` -} - -// IndexStats is index stats for a specific index. -type IndexStats struct { - Primaries *IndexStatsDetails `json:"primaries,omitempty"` - Total *IndexStatsDetails `json:"total,omitempty"` -} - -type IndexStatsDetails struct { - Docs *IndexStatsDocs `json:"docs,omitempty"` - Store *IndexStatsStore `json:"store,omitempty"` - Indexing *IndexStatsIndexing `json:"indexing,omitempty"` - Get *IndexStatsGet `json:"get,omitempty"` - Search *IndexStatsSearch `json:"search,omitempty"` - Merges *IndexStatsMerges `json:"merges,omitempty"` - Refresh *IndexStatsRefresh `json:"refresh,omitempty"` - Flush *IndexStatsFlush `json:"flush,omitempty"` - Warmer *IndexStatsWarmer `json:"warmer,omitempty"` - FilterCache *IndexStatsFilterCache `json:"filter_cache,omitempty"` - IdCache *IndexStatsIdCache `json:"id_cache,omitempty"` - Fielddata *IndexStatsFielddata `json:"fielddata,omitempty"` - Percolate *IndexStatsPercolate `json:"percolate,omitempty"` - Completion *IndexStatsCompletion `json:"completion,omitempty"` - Segments *IndexStatsSegments `json:"segments,omitempty"` - Translog *IndexStatsTranslog `json:"translog,omitempty"` - Suggest *IndexStatsSuggest `json:"suggest,omitempty"` - QueryCache *IndexStatsQueryCache `json:"query_cache,omitempty"` -} - -type IndexStatsDocs struct { - Count int64 `json:"count,omitempty"` - Deleted int64 `json:"deleted,omitempty"` -} - -type IndexStatsStore struct { - Size string `json:"size,omitempty"` // human size, e.g. 119.3mb - SizeInBytes int64 `json:"size_in_bytes,omitempty"` - ThrottleTime string `json:"throttle_time,omitempty"` // human time, e.g. 0s - ThrottleTimeInMillis int64 `json:"throttle_time_in_millis,omitempty"` -} - -type IndexStatsIndexing struct { - IndexTotal int64 `json:"index_total,omitempty"` - IndexTime string `json:"index_time,omitempty"` - IndexTimeInMillis int64 `json:"index_time_in_millis,omitempty"` - IndexCurrent int64 `json:"index_current,omitempty"` - DeleteTotal int64 `json:"delete_total,omitempty"` - DeleteTime string `json:"delete_time,omitempty"` - DeleteTimeInMillis int64 `json:"delete_time_in_millis,omitempty"` - DeleteCurrent int64 `json:"delete_current,omitempty"` - NoopUpdateTotal int64 `json:"noop_update_total,omitempty"` - IsThrottled bool `json:"is_throttled,omitempty"` - ThrottleTime string `json:"throttle_time,omitempty"` - ThrottleTimeInMillis int64 `json:"throttle_time_in_millis,omitempty"` -} - -type IndexStatsGet struct { - Total int64 `json:"total,omitempty"` - GetTime string `json:"get_time,omitempty"` - TimeInMillis int64 `json:"time_in_millis,omitempty"` - ExistsTotal int64 `json:"exists_total,omitempty"` - ExistsTime string `json:"exists_time,omitempty"` - ExistsTimeInMillis int64 `json:"exists_time_in_millis,omitempty"` - MissingTotal int64 `json:"missing_total,omitempty"` - MissingTime string `json:"missing_time,omitempty"` - MissingTimeInMillis int64 `json:"missing_time_in_millis,omitempty"` - Current int64 `json:"current,omitempty"` -} - -type IndexStatsSearch struct { - OpenContexts int64 `json:"open_contexts,omitempty"` - QueryTotal int64 `json:"query_total,omitempty"` - QueryTime string `json:"query_time,omitempty"` - QueryTimeInMillis int64 `json:"query_time_in_millis,omitempty"` - QueryCurrent int64 `json:"query_current,omitempty"` - FetchTotal int64 `json:"fetch_total,omitempty"` - FetchTime string `json:"fetch_time,omitempty"` - FetchTimeInMillis int64 `json:"fetch_time_in_millis,omitempty"` - FetchCurrent int64 `json:"fetch_current,omitempty"` -} - -type IndexStatsMerges struct { - Current int64 `json:"current,omitempty"` - CurrentDocs int64 `json:"current_docs,omitempty"` - CurrentSize string `json:"current_size,omitempty"` - CurrentSizeInBytes int64 `json:"current_size_in_bytes,omitempty"` - Total int64 `json:"total,omitempty"` - TotalTime string `json:"total_time,omitempty"` - TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` - TotalDocs int64 `json:"total_docs,omitempty"` - TotalSize string `json:"total_size,omitempty"` - TotalSizeInBytes int64 `json:"total_size_in_bytes,omitempty"` -} - -type IndexStatsRefresh struct { - Total int64 `json:"total,omitempty"` - TotalTime string `json:"total_time,omitempty"` - TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` -} - -type IndexStatsFlush struct { - Total int64 `json:"total,omitempty"` - TotalTime string `json:"total_time,omitempty"` - TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` -} - -type IndexStatsWarmer struct { - Current int64 `json:"current,omitempty"` - Total int64 `json:"total,omitempty"` - TotalTime string `json:"total_time,omitempty"` - TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` -} - -type IndexStatsFilterCache struct { - MemorySize string `json:"memory_size,omitempty"` - MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` - Evictions int64 `json:"evictions,omitempty"` -} - -type IndexStatsIdCache struct { - MemorySize string `json:"memory_size,omitempty"` - MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` -} - -type IndexStatsFielddata struct { - MemorySize string `json:"memory_size,omitempty"` - MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` - Evictions int64 `json:"evictions,omitempty"` -} - -type IndexStatsPercolate struct { - Total int64 `json:"total,omitempty"` - GetTime string `json:"get_time,omitempty"` - TimeInMillis int64 `json:"time_in_millis,omitempty"` - Current int64 `json:"current,omitempty"` - MemorySize string `json:"memory_size,omitempty"` - MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` - Queries int64 `json:"queries,omitempty"` -} - -type IndexStatsCompletion struct { - Size string `json:"size,omitempty"` - SizeInBytes int64 `json:"size_in_bytes,omitempty"` -} - -type IndexStatsSegments struct { - Count int64 `json:"count,omitempty"` - Memory string `json:"memory,omitempty"` - MemoryInBytes int64 `json:"memory_in_bytes,omitempty"` - IndexWriterMemory string `json:"index_writer_memory,omitempty"` - IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes,omitempty"` - IndexWriterMaxMemory string `json:"index_writer_max_memory,omitempty"` - IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes,omitempty"` - VersionMapMemory string `json:"version_map_memory,omitempty"` - VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes,omitempty"` - FixedBitSetMemory string `json:"fixed_bit_set,omitempty"` - FixedBitSetMemoryInBytes int64 `json:"fixed_bit_set_memory_in_bytes,omitempty"` -} - -type IndexStatsTranslog struct { - Operations int64 `json:"operations,omitempty"` - Size string `json:"size,omitempty"` - SizeInBytes int64 `json:"size_in_bytes,omitempty"` -} - -type IndexStatsSuggest struct { - Total int64 `json:"total,omitempty"` - Time string `json:"time,omitempty"` - TimeInMillis int64 `json:"time_in_millis,omitempty"` - Current int64 `json:"current,omitempty"` -} - -type IndexStatsQueryCache struct { - MemorySize string `json:"memory_size,omitempty"` - MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` - Evictions int64 `json:"evictions,omitempty"` - HitCount int64 `json:"hit_count,omitempty"` - MissCount int64 `json:"miss_count,omitempty"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/indices_stats_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/indices_stats_test.go deleted file mode 100644 index 2a72858d7..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/indices_stats_test.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "testing" -) - -func TestIndexStatsBuildURL(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tests := []struct { - Indices []string - Metrics []string - Expected string - }{ - { - []string{}, - []string{}, - "/_stats", - }, - { - []string{"index1"}, - []string{}, - "/index1/_stats", - }, - { - []string{}, - []string{"metric1"}, - "/_stats/metric1", - }, - { - []string{"index1"}, - []string{"metric1"}, - "/index1/_stats/metric1", - }, - { - []string{"index1", "index2"}, - []string{"metric1"}, - "/index1%2Cindex2/_stats/metric1", - }, - { - []string{"index1", "index2"}, - []string{"metric1", "metric2"}, - "/index1%2Cindex2/_stats/metric1%2Cmetric2", - }, - } - - for i, test := range tests { - path, _, err := client.IndexStats().Index(test.Indices...).Metric(test.Metrics...).buildURL() - if err != nil { - t.Fatalf("case #%d: %v", i+1, err) - } - if path != test.Expected { - t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path) - } - } -} - -func TestIndexStats(t *testing.T) { - client := setupTestClientAndCreateIndexAndAddDocs(t) - - stats, err := client.IndexStats(testIndexName).Do() - if err != nil { - t.Fatalf("expected no error; got: %v", err) - } - if stats == nil { - t.Fatalf("expected response; got: %v", stats) - } - stat, found := stats.Indices[testIndexName] - if !found { - t.Fatalf("expected stats about index %q; got: %v", testIndexName, found) - } - if stat.Total == nil { - t.Fatalf("expected total to be != nil; got: %v", stat.Total) - } - if stat.Total.Docs == nil { - t.Fatalf("expected total docs to be != nil; got: %v", stat.Total.Docs) - } - if stat.Total.Docs.Count == 0 { - t.Fatalf("expected total docs count to be > 0; got: %d", stat.Total.Docs.Count) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/inner_hit.go b/services/templeton/vendor/src/github.com/olivere/elastic/inner_hit.go deleted file mode 100644 index 1330df1ee..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/inner_hit.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// InnerHit implements a simple join for parent/child, nested, and even -// top-level documents in Elasticsearch. -// It is an experimental feature for Elasticsearch versions 1.5 (or greater). -// See http://www.elastic.co/guide/en/elasticsearch/reference/1.5/search-request-inner-hits.html -// for documentation. -// -// See the tests for SearchSource, HasChildFilter, HasChildQuery, -// HasParentFilter, HasParentQuery, NestedFilter, and NestedQuery -// for usage examples. -type InnerHit struct { - source *SearchSource - path string - typ string - - name string -} - -// NewInnerHit creates a new InnerHit. -func NewInnerHit() *InnerHit { - return &InnerHit{source: NewSearchSource()} -} - -func (hit *InnerHit) Path(path string) *InnerHit { - hit.path = path - return hit -} - -func (hit *InnerHit) Type(typ string) *InnerHit { - hit.typ = typ - return hit -} - -func (hit *InnerHit) Query(query Query) *InnerHit { - hit.source.Query(query) - return hit -} - -func (hit *InnerHit) From(from int) *InnerHit { - hit.source.From(from) - return hit -} - -func (hit *InnerHit) Size(size int) *InnerHit { - hit.source.Size(size) - return hit -} - -func (hit *InnerHit) TrackScores(trackScores bool) *InnerHit { - hit.source.TrackScores(trackScores) - return hit -} - -func (hit *InnerHit) Explain(explain bool) *InnerHit { - hit.source.Explain(explain) - return hit -} - -func (hit *InnerHit) Version(version bool) *InnerHit { - hit.source.Version(version) - return hit -} - -func (hit *InnerHit) Field(fieldName string) *InnerHit { - hit.source.Field(fieldName) - return hit -} - -func (hit *InnerHit) Fields(fieldNames ...string) *InnerHit { - hit.source.Fields(fieldNames...) - return hit -} - -func (hit *InnerHit) NoFields() *InnerHit { - hit.source.NoFields() - return hit -} - -func (hit *InnerHit) FetchSource(fetchSource bool) *InnerHit { - hit.source.FetchSource(fetchSource) - return hit -} - -func (hit *InnerHit) FetchSourceContext(fetchSourceContext *FetchSourceContext) *InnerHit { - hit.source.FetchSourceContext(fetchSourceContext) - return hit -} - -func (hit *InnerHit) FieldDataFields(fieldDataFields ...string) *InnerHit { - hit.source.FieldDataFields(fieldDataFields...) - return hit -} - -func (hit *InnerHit) FieldDataField(fieldDataField string) *InnerHit { - hit.source.FieldDataField(fieldDataField) - return hit -} - -func (hit *InnerHit) ScriptFields(scriptFields ...*ScriptField) *InnerHit { - hit.source.ScriptFields(scriptFields...) - return hit -} - -func (hit *InnerHit) ScriptField(scriptField *ScriptField) *InnerHit { - hit.source.ScriptField(scriptField) - return hit -} - -func (hit *InnerHit) Sort(field string, ascending bool) *InnerHit { - hit.source.Sort(field, ascending) - return hit -} - -func (hit *InnerHit) SortWithInfo(info SortInfo) *InnerHit { - hit.source.SortWithInfo(info) - return hit -} - -func (hit *InnerHit) SortBy(sorter ...Sorter) *InnerHit { - hit.source.SortBy(sorter...) - return hit -} - -func (hit *InnerHit) Highlight(highlight *Highlight) *InnerHit { - hit.source.Highlight(highlight) - return hit -} - -func (hit *InnerHit) Highlighter() *Highlight { - return hit.source.Highlighter() -} - -func (hit *InnerHit) Name(name string) *InnerHit { - hit.name = name - return hit -} - -func (hit *InnerHit) Source() (interface{}, error) { - src, err := hit.source.Source() - if err != nil { - return nil, err - } - source, ok := src.(map[string]interface{}) - if !ok { - return nil, nil - } - - // Notice that hit.typ and hit.path are not exported here. - // They are only used with SearchSource and serialized there. - - if hit.name != "" { - source["name"] = hit.name - } - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/inner_hit_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/inner_hit_test.go deleted file mode 100644 index c4a74dafa..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/inner_hit_test.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestInnerHitEmpty(t *testing.T) { - hit := NewInnerHit() - src, err := hit.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestInnerHitWithName(t *testing.T) { - hit := NewInnerHit().Name("comments") - src, err := hit.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"name":"comments"}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/logger.go b/services/templeton/vendor/src/github.com/olivere/elastic/logger.go deleted file mode 100644 index 0fb16b19f..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/logger.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2012-2016 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// Logger specifies the interface for all log operations. -type Logger interface { - Printf(format string, v ...interface{}) -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/mget.go b/services/templeton/vendor/src/github.com/olivere/elastic/mget.go deleted file mode 100644 index 6cc6b8d22..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/mget.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" -) - -// MgetService allows to get multiple documents based on an index, -// type (optional) and id (possibly routing). The response includes -// a docs array with all the fetched documents, each element similar -// in structure to a document provided by the Get API. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html -// for details. -type MgetService struct { - client *Client - pretty bool - preference string - realtime *bool - refresh *bool - items []*MultiGetItem -} - -func NewMgetService(client *Client) *MgetService { - builder := &MgetService{ - client: client, - items: make([]*MultiGetItem, 0), - } - return builder -} - -func (b *MgetService) Preference(preference string) *MgetService { - b.preference = preference - return b -} - -func (b *MgetService) Refresh(refresh bool) *MgetService { - b.refresh = &refresh - return b -} - -func (b *MgetService) Realtime(realtime bool) *MgetService { - b.realtime = &realtime - return b -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *MgetService) Pretty(pretty bool) *MgetService { - s.pretty = pretty - return s -} - -func (b *MgetService) Add(items ...*MultiGetItem) *MgetService { - b.items = append(b.items, items...) - return b -} - -func (b *MgetService) Source() (interface{}, error) { - source := make(map[string]interface{}) - items := make([]interface{}, len(b.items)) - for i, item := range b.items { - src, err := item.Source() - if err != nil { - return nil, err - } - items[i] = src - } - source["docs"] = items - return source, nil -} - -func (b *MgetService) Do() (*MgetResponse, error) { - // Build url - path := "/_mget" - - params := make(url.Values) - if b.realtime != nil { - params.Add("realtime", fmt.Sprintf("%v", *b.realtime)) - } - if b.preference != "" { - params.Add("preference", b.preference) - } - if b.refresh != nil { - params.Add("refresh", fmt.Sprintf("%v", *b.refresh)) - } - - // Set body - body, err := b.Source() - if err != nil { - return nil, err - } - - // Get response - res, err := b.client.PerformRequest("GET", path, params, body) - if err != nil { - return nil, err - } - - // Return result - ret := new(MgetResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// -- Multi Get Item -- - -// MultiGetItem is a single document to retrieve via the MgetService. -type MultiGetItem struct { - index string - typ string - id string - routing string - fields []string - version *int64 // see org.elasticsearch.common.lucene.uid.Versions - versionType string // see org.elasticsearch.index.VersionType - fsc *FetchSourceContext -} - -func NewMultiGetItem() *MultiGetItem { - return &MultiGetItem{} -} - -func (item *MultiGetItem) Index(index string) *MultiGetItem { - item.index = index - return item -} - -func (item *MultiGetItem) Type(typ string) *MultiGetItem { - item.typ = typ - return item -} - -func (item *MultiGetItem) Id(id string) *MultiGetItem { - item.id = id - return item -} - -func (item *MultiGetItem) Routing(routing string) *MultiGetItem { - item.routing = routing - return item -} - -func (item *MultiGetItem) Fields(fields ...string) *MultiGetItem { - if item.fields == nil { - item.fields = make([]string, 0) - } - item.fields = append(item.fields, fields...) - return item -} - -// Version can be MatchAny (-3), MatchAnyPre120 (0), NotFound (-1), -// or NotSet (-2). These are specified in org.elasticsearch.common.lucene.uid.Versions. -// The default in Elasticsearch is MatchAny (-3). -func (item *MultiGetItem) Version(version int64) *MultiGetItem { - item.version = &version - return item -} - -// VersionType can be "internal", "external", "external_gt", "external_gte", -// or "force". See org.elasticsearch.index.VersionType in Elasticsearch source. -// It is "internal" by default. -func (item *MultiGetItem) VersionType(versionType string) *MultiGetItem { - item.versionType = versionType - return item -} - -func (item *MultiGetItem) FetchSource(fetchSourceContext *FetchSourceContext) *MultiGetItem { - item.fsc = fetchSourceContext - return item -} - -// Source returns the serialized JSON to be sent to Elasticsearch as -// part of a MultiGet search. -func (item *MultiGetItem) Source() (interface{}, error) { - source := make(map[string]interface{}) - - source["_id"] = item.id - - if item.index != "" { - source["_index"] = item.index - } - if item.typ != "" { - source["_type"] = item.typ - } - if item.fsc != nil { - src, err := item.fsc.Source() - if err != nil { - return nil, err - } - source["_source"] = src - } - if item.fields != nil { - source["fields"] = item.fields - } - if item.routing != "" { - source["_routing"] = item.routing - } - if item.version != nil { - source["version"] = fmt.Sprintf("%d", *item.version) - } - if item.versionType != "" { - source["version_type"] = item.versionType - } - - return source, nil -} - -// -- Result of a Multi Get request. - -type MgetResponse struct { - Docs []*GetResult `json:"docs,omitempty"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/mget_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/mget_test.go deleted file mode 100644 index da78e3122..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/mget_test.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestMultiGet(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} - - // Add some documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Count documents - count, err := client.Count(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if count != 3 { - t.Errorf("expected Count = %d; got %d", 3, count) - } - - // Get documents 1 and 3 - res, err := client.MultiGet(). - Add(NewMultiGetItem().Index(testIndexName).Type("tweet").Id("1")). - Add(NewMultiGetItem().Index(testIndexName).Type("tweet").Id("3")). - Do() - if err != nil { - t.Fatal(err) - } - if res == nil { - t.Fatal("expected result to be != nil; got nil") - } - if res.Docs == nil { - t.Fatal("expected result docs to be != nil; got nil") - } - if len(res.Docs) != 2 { - t.Fatalf("expected to have 2 docs; got %d", len(res.Docs)) - } - - item := res.Docs[0] - if item.Error != nil { - t.Errorf("expected no error on item 0; got %v", item.Error) - } - if item.Source == nil { - t.Errorf("expected Source != nil; got %v", item.Source) - } - var doc tweet - if err := json.Unmarshal(*item.Source, &doc); err != nil { - t.Fatalf("expected to unmarshal item Source; got %v", err) - } - if doc.Message != tweet1.Message { - t.Errorf("expected Message of first tweet to be %q; got %q", tweet1.Message, doc.Message) - } - - item = res.Docs[1] - if item.Error != nil { - t.Errorf("expected no error on item 1; got %v", item.Error) - } - if item.Source == nil { - t.Errorf("expected Source != nil; got %v", item.Source) - } - if err := json.Unmarshal(*item.Source, &doc); err != nil { - t.Fatalf("expected to unmarshal item Source; got %v", err) - } - if doc.Message != tweet3.Message { - t.Errorf("expected Message of second tweet to be %q; got %q", tweet3.Message, doc.Message) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/msearch.go b/services/templeton/vendor/src/github.com/olivere/elastic/msearch.go deleted file mode 100644 index 2eb2b550e..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/msearch.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" -) - -// MultiSearch executes one or more searches in one roundtrip. -// See http://www.elasticsearch.org/guide/reference/api/multi-search/ -type MultiSearchService struct { - client *Client - requests []*SearchRequest - indices []string - pretty bool - routing string - preference string -} - -func NewMultiSearchService(client *Client) *MultiSearchService { - builder := &MultiSearchService{ - client: client, - requests: make([]*SearchRequest, 0), - indices: make([]string, 0), - } - return builder -} - -func (s *MultiSearchService) Add(requests ...*SearchRequest) *MultiSearchService { - s.requests = append(s.requests, requests...) - return s -} - -func (s *MultiSearchService) Index(indices ...string) *MultiSearchService { - s.indices = append(s.indices, indices...) - return s -} - -func (s *MultiSearchService) Pretty(pretty bool) *MultiSearchService { - s.pretty = pretty - return s -} - -func (s *MultiSearchService) Do() (*MultiSearchResult, error) { - // Build url - path := "/_msearch" - - // Parameters - params := make(url.Values) - if s.pretty { - params.Set("pretty", fmt.Sprintf("%v", s.pretty)) - } - - // Set body - lines := make([]string, 0) - for _, sr := range s.requests { - // Set default indices if not specified in the request - if !sr.HasIndices() && len(s.indices) > 0 { - sr = sr.Index(s.indices...) - } - - header, err := json.Marshal(sr.header()) - if err != nil { - return nil, err - } - body, err := json.Marshal(sr.body()) - if err != nil { - return nil, err - } - lines = append(lines, string(header)) - lines = append(lines, string(body)) - } - body := strings.Join(lines, "\n") + "\n" // Don't forget trailing \n - - // Get response - res, err := s.client.PerformRequest("GET", path, params, body) - if err != nil { - return nil, err - } - - // Return result - ret := new(MultiSearchResult) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -type MultiSearchResult struct { - Responses []*SearchResult `json:"responses,omitempty"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/msearch_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/msearch_test.go deleted file mode 100644 index 332ade2c6..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/msearch_test.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - _ "net/http" - "testing" -) - -func TestMultiSearch(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{ - User: "olivere", - Message: "Welcome to Golang and Elasticsearch.", - Tags: []string{"golang", "elasticsearch"}, - } - tweet2 := tweet{ - User: "olivere", - Message: "Another unrelated topic.", - Tags: []string{"golang"}, - } - tweet3 := tweet{ - User: "sandrae", - Message: "Cycling is fun.", - Tags: []string{"sports", "cycling"}, - } - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Spawn two search queries with one roundtrip - q1 := NewMatchAllQuery() - q2 := NewTermQuery("tags", "golang") - - sreq1 := NewSearchRequest().Index(testIndexName, testIndexName2). - Source(NewSearchSource().Query(q1).Size(10)) - sreq2 := NewSearchRequest().Index(testIndexName).Type("tweet"). - Source(NewSearchSource().Query(q2)) - - searchResult, err := client.MultiSearch(). - Add(sreq1, sreq2). - Do() - if err != nil { - t.Fatal(err) - } - if searchResult.Responses == nil { - t.Fatal("expected responses != nil; got nil") - } - if len(searchResult.Responses) != 2 { - t.Fatalf("expected 2 responses; got %d", len(searchResult.Responses)) - } - - sres := searchResult.Responses[0] - if sres.Hits == nil { - t.Errorf("expected Hits != nil; got nil") - } - if sres.Hits.TotalHits != 3 { - t.Errorf("expected Hits.TotalHits = %d; got %d", 3, sres.Hits.TotalHits) - } - if len(sres.Hits.Hits) != 3 { - t.Errorf("expected len(Hits.Hits) = %d; got %d", 3, len(sres.Hits.Hits)) - } - for _, hit := range sres.Hits.Hits { - if hit.Index != testIndexName { - t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) - } - item := make(map[string]interface{}) - err := json.Unmarshal(*hit.Source, &item) - if err != nil { - t.Fatal(err) - } - } - - sres = searchResult.Responses[1] - if sres.Hits == nil { - t.Errorf("expected Hits != nil; got nil") - } - if sres.Hits.TotalHits != 2 { - t.Errorf("expected Hits.TotalHits = %d; got %d", 2, sres.Hits.TotalHits) - } - if len(sres.Hits.Hits) != 2 { - t.Errorf("expected len(Hits.Hits) = %d; got %d", 2, len(sres.Hits.Hits)) - } - for _, hit := range sres.Hits.Hits { - if hit.Index != testIndexName { - t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) - } - item := make(map[string]interface{}) - err := json.Unmarshal(*hit.Source, &item) - if err != nil { - t.Fatal(err) - } - } -} - -func TestMultiSearchWithOneRequest(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{ - User: "olivere", - Message: "Welcome to Golang and Elasticsearch.", - Tags: []string{"golang", "elasticsearch"}, - } - tweet2 := tweet{ - User: "olivere", - Message: "Another unrelated topic.", - Tags: []string{"golang"}, - } - tweet3 := tweet{ - User: "sandrae", - Message: "Cycling is fun.", - Tags: []string{"sports", "cycling"}, - } - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Spawn two search queries with one roundtrip - query := NewMatchAllQuery() - source := NewSearchSource().Query(query).Size(10) - sreq := NewSearchRequest().Source(source) - - searchResult, err := client.MultiSearch(). - Index(testIndexName). - Add(sreq). - Do() - if err != nil { - t.Fatal(err) - } - if searchResult.Responses == nil { - t.Fatal("expected responses != nil; got nil") - } - if len(searchResult.Responses) != 1 { - t.Fatalf("expected 1 responses; got %d", len(searchResult.Responses)) - } - - sres := searchResult.Responses[0] - if sres.Hits == nil { - t.Errorf("expected Hits != nil; got nil") - } - if sres.Hits.TotalHits != 3 { - t.Errorf("expected Hits.TotalHits = %d; got %d", 3, sres.Hits.TotalHits) - } - if len(sres.Hits.Hits) != 3 { - t.Errorf("expected len(Hits.Hits) = %d; got %d", 3, len(sres.Hits.Hits)) - } - for _, hit := range sres.Hits.Hits { - if hit.Index != testIndexName { - t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) - } - item := make(map[string]interface{}) - err := json.Unmarshal(*hit.Source, &item) - if err != nil { - t.Fatal(err) - } - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/nodes_info.go b/services/templeton/vendor/src/github.com/olivere/elastic/nodes_info.go deleted file mode 100644 index 8a1c40fa9..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/nodes_info.go +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "log" - "net/url" - "strings" - "time" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -var ( - _ = fmt.Print - _ = log.Print - _ = strings.Index - _ = uritemplates.Expand - _ = url.Parse -) - -// NodesInfoService allows to retrieve one or more or all of the -// cluster nodes information. -// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cluster-nodes-info.html. -type NodesInfoService struct { - client *Client - pretty bool - nodeId []string - metric []string - flatSettings *bool - human *bool -} - -// NewNodesInfoService creates a new NodesInfoService. -func NewNodesInfoService(client *Client) *NodesInfoService { - return &NodesInfoService{ - client: client, - nodeId: []string{"_all"}, - metric: []string{"_all"}, - } -} - -// NodeId is a list of node IDs or names to limit the returned information. -// Use "_local" to return information from the node you're connecting to, -// leave empty to get information from all nodes. -func (s *NodesInfoService) NodeId(nodeId ...string) *NodesInfoService { - s.nodeId = append(s.nodeId, nodeId...) - return s -} - -// Metric is a list of metrics you wish returned. Leave empty to return all. -// Valid metrics are: settings, os, process, jvm, thread_pool, network, -// transport, http, and plugins. -func (s *NodesInfoService) Metric(metric ...string) *NodesInfoService { - s.metric = append(s.metric, metric...) - return s -} - -// FlatSettings returns settings in flat format (default: false). -func (s *NodesInfoService) FlatSettings(flatSettings bool) *NodesInfoService { - s.flatSettings = &flatSettings - return s -} - -// Human indicates whether to return time and byte values in human-readable format. -func (s *NodesInfoService) Human(human bool) *NodesInfoService { - s.human = &human - return s -} - -// Pretty indicates whether to indent the returned JSON. -func (s *NodesInfoService) Pretty(pretty bool) *NodesInfoService { - s.pretty = pretty - return s -} - -// buildURL builds the URL for the operation. -func (s *NodesInfoService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_nodes/{node_id}/{metric}", map[string]string{ - "node_id": strings.Join(s.nodeId, ","), - "metric": strings.Join(s.metric, ","), - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.flatSettings != nil { - params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) - } - if s.human != nil { - params.Set("human", fmt.Sprintf("%v", *s.human)) - } - if s.pretty { - params.Set("pretty", "1") - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *NodesInfoService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *NodesInfoService) Do() (*NodesInfoResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest("GET", path, params, nil) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(NodesInfoResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// NodesInfoResponse is the response of NodesInfoService.Do. -type NodesInfoResponse struct { - ClusterName string `json:"cluster_name"` - Nodes map[string]*NodesInfoNode `json:"nodes"` -} - -type NodesInfoNode struct { - // Name of the node, e.g. "Mister Fear" - Name string `json:"name"` - // TransportAddress, e.g. "127.0.0.1:9300" - TransportAddress string `json:"transport_address"` - // Host is the host name, e.g. "macbookair" - Host string `json:"host"` - // IP is the IP address, e.g. "192.168.1.2" - IP string `json:"ip"` - // Version is the Elasticsearch version running on the node, e.g. "1.4.3" - Version string `json:"version"` - // Build is the Elasticsearch build, e.g. "36a29a7" - Build string `json:"build"` - // HTTPAddress, e.g. "127.0.0.1:9200" - HTTPAddress string `json:"http_address"` - // HTTPSAddress, e.g. "127.0.0.1:9200" - HTTPSAddress string `json:"https_address"` - - // Attributes of the node. - Attributes map[string]interface{} `json:"attributes"` - - // Settings of the node, e.g. paths and pidfile. - Settings map[string]interface{} `json:"settings"` - - // OS information, e.g. CPU and memory. - OS *NodesInfoNodeOS `json:"os"` - - // Process information, e.g. max file descriptors. - Process *NodesInfoNodeProcess `json:"process"` - - // JVM information, e.g. VM version. - JVM *NodesInfoNodeProcess `json:"jvm"` - - // ThreadPool information. - ThreadPool *NodesInfoNodeThreadPool `json:"thread_pool"` - - // Network information. - Network *NodesInfoNodeNetwork `json:"network"` - - // Network information. - Transport *NodesInfoNodeTransport `json:"transport"` - - // HTTP information. - HTTP *NodesInfoNodeHTTP `json:"http"` - - // Plugins information. - Plugins []*NodesInfoNodePlugin `json:"plugins"` -} - -type NodesInfoNodeOS struct { - RefreshInterval string `json:"refresh_interval"` // e.g. 1s - RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000 - AvailableProcessors int `json:"available_processors"` // e.g. 4 - - // CPU information - CPU struct { - Vendor string `json:"vendor"` // e.g. Intel - Model string `json:"model"` // e.g. iMac15,1 - MHz int `json:"mhz"` // e.g. 3500 - TotalCores int `json:"total_cores"` // e.g. 4 - TotalSockets int `json:"total_sockets"` // e.g. 4 - CoresPerSocket int `json:"cores_per_socket"` // e.g. 16 - CacheSizeInBytes int `json:"cache_size_in_bytes"` // e.g. 256 - } `json:"cpu"` - - // Mem information - Mem struct { - Total string `json:"total"` // e.g. 16gb - TotalInBytes int `json:"total_in_bytes"` // e.g. 17179869184 - } `json:"mem"` - - // Swap information - Swap struct { - Total string `json:"total"` // e.g. 1gb - TotalInBytes int `json:"total_in_bytes"` // e.g. 1073741824 - } `json:"swap"` -} - -type NodesInfoNodeProcess struct { - RefreshInterval string `json:"refresh_interval"` // e.g. 1s - RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000 - ID int `json:"id"` // process id, e.g. 87079 - MaxFileDescriptors int `json:"max_file_descriptors"` // e.g. 32768 - Mlockall bool `json:"mlockall"` // e.g. false -} - -type NodesInfoNodeJVM struct { - PID int `json:"pid"` // process id, e.g. 87079 - Version string `json:"version"` // e.g. "1.8.0_25" - VMName string `json:"vm_name"` // e.g. "Java HotSpot(TM) 64-Bit Server VM" - VMVersion string `json:"vm_version"` // e.g. "25.25-b02" - VMVendor string `json:"vm_vendor"` // e.g. "Oracle Corporation" - StartTime time.Time `json:"start_time"` // e.g. "2015-01-03T15:18:30.982Z" - StartTimeInMillis int64 `json:"start_time_in_millis"` - - // Mem information - Mem struct { - HeapInit string `json:"heap_init"` // e.g. 1gb - HeapInitInBytes int `json:"heap_init_in_bytes"` - HeapMax string `json:"heap_max"` // e.g. 4gb - HeapMaxInBytes int `json:"heap_max_in_bytes"` - NonHeapInit string `json:"non_heap_init"` // e.g. 2.4mb - NonHeapInitInBytes int `json:"non_heap_init_in_bytes"` - NonHeapMax string `json:"non_heap_max"` // e.g. 0b - NonHeapMaxInBytes int `json:"non_heap_max_in_bytes"` - DirectMax string `json:"direct_max"` // e.g. 4gb - DirectMaxInBytes int `json:"direct_max_in_bytes"` - } `json:"mem"` - - GCCollectors []string `json:"gc_collectors"` // e.g. ["ParNew"] - MemoryPools []string `json:"memory_pools"` // e.g. ["Code Cache", "Metaspace"] -} - -type NodesInfoNodeThreadPool struct { - Percolate *NodesInfoNodeThreadPoolSection `json:"percolate"` - Bench *NodesInfoNodeThreadPoolSection `json:"bench"` - Listener *NodesInfoNodeThreadPoolSection `json:"listener"` - Index *NodesInfoNodeThreadPoolSection `json:"index"` - Refresh *NodesInfoNodeThreadPoolSection `json:"refresh"` - Suggest *NodesInfoNodeThreadPoolSection `json:"suggest"` - Generic *NodesInfoNodeThreadPoolSection `json:"generic"` - Warmer *NodesInfoNodeThreadPoolSection `json:"warmer"` - Search *NodesInfoNodeThreadPoolSection `json:"search"` - Flush *NodesInfoNodeThreadPoolSection `json:"flush"` - Optimize *NodesInfoNodeThreadPoolSection `json:"optimize"` - Management *NodesInfoNodeThreadPoolSection `json:"management"` - Get *NodesInfoNodeThreadPoolSection `json:"get"` - Merge *NodesInfoNodeThreadPoolSection `json:"merge"` - Bulk *NodesInfoNodeThreadPoolSection `json:"bulk"` - Snapshot *NodesInfoNodeThreadPoolSection `json:"snapshot"` -} - -type NodesInfoNodeThreadPoolSection struct { - Type string `json:"type"` // e.g. fixed - Min int `json:"min"` // e.g. 4 - Max int `json:"max"` // e.g. 4 - KeepAlive string `json:"keep_alive"` // e.g. "5m" - QueueSize interface{} `json:"queue_size"` // e.g. "1k" or -1 -} - -type NodesInfoNodeNetwork struct { - RefreshInterval string `json:"refresh_interval"` // e.g. 1s - RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000 - PrimaryInterface struct { - Address string `json:"address"` // e.g. 192.168.1.2 - Name string `json:"name"` // e.g. en0 - MACAddress string `json:"mac_address"` // e.g. 11:22:33:44:55:66 - } `json:"primary_interface"` -} - -type NodesInfoNodeTransport struct { - BoundAddress []string `json:"bound_address"` - PublishAddress string `json:"publish_address"` - Profiles map[string]*NodesInfoNodeTransportProfile `json:"profiles"` -} - -type NodesInfoNodeTransportProfile struct { - BoundAddress []string `json:"bound_address"` - PublishAddress string `json:"publish_address"` -} - -type NodesInfoNodeHTTP struct { - BoundAddress []string `json:"bound_address"` // e.g. ["127.0.0.1:9200", "[fe80::1]:9200", "[::1]:9200"] - PublishAddress string `json:"publish_address"` // e.g. "127.0.0.1:9300" - MaxContentLength string `json:"max_content_length"` // e.g. "100mb" - MaxContentLengthInBytes int64 `json:"max_content_length_in_bytes"` -} - -type NodesInfoNodePlugin struct { - Name string `json:"name"` - Description string `json:"description"` - Site bool `json:"site"` - JVM bool `json:"jvm"` - URL string `json:"url"` // e.g. /_plugin/dummy/ -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/nodes_info_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/nodes_info_test.go deleted file mode 100644 index 0402b2706..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/nodes_info_test.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "testing" - -func TestNodesInfo(t *testing.T) { - client, err := NewClient() - if err != nil { - t.Fatal(err) - } - - info, err := client.NodesInfo().Do() - if err != nil { - t.Fatal(err) - } - if info == nil { - t.Fatal("expected nodes info") - } - - if info.ClusterName == "" { - t.Errorf("expected cluster name; got: %q", info.ClusterName) - } - if len(info.Nodes) == 0 { - t.Errorf("expected some nodes; got: %d", len(info.Nodes)) - } - for id, node := range info.Nodes { - if id == "" { - t.Errorf("expected node id; got: %q", id) - } - if node == nil { - t.Fatalf("expected node info; got: %v", node) - } - if node.IP == "" { - t.Errorf("expected node IP; got: %q", node.IP) - } - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/optimize.go b/services/templeton/vendor/src/github.com/olivere/elastic/optimize.go deleted file mode 100644 index c9107f714..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/optimize.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -type OptimizeService struct { - client *Client - indices []string - maxNumSegments *int - onlyExpungeDeletes *bool - flush *bool - waitForMerge *bool - force *bool - pretty bool -} - -func NewOptimizeService(client *Client) *OptimizeService { - builder := &OptimizeService{ - client: client, - indices: make([]string, 0), - } - return builder -} - -func (s *OptimizeService) Index(indices ...string) *OptimizeService { - s.indices = append(s.indices, indices...) - return s -} - -func (s *OptimizeService) MaxNumSegments(maxNumSegments int) *OptimizeService { - s.maxNumSegments = &maxNumSegments - return s -} - -func (s *OptimizeService) OnlyExpungeDeletes(onlyExpungeDeletes bool) *OptimizeService { - s.onlyExpungeDeletes = &onlyExpungeDeletes - return s -} - -func (s *OptimizeService) Flush(flush bool) *OptimizeService { - s.flush = &flush - return s -} - -func (s *OptimizeService) WaitForMerge(waitForMerge bool) *OptimizeService { - s.waitForMerge = &waitForMerge - return s -} - -func (s *OptimizeService) Force(force bool) *OptimizeService { - s.force = &force - return s -} - -func (s *OptimizeService) Pretty(pretty bool) *OptimizeService { - s.pretty = pretty - return s -} - -func (s *OptimizeService) Do() (*OptimizeResult, error) { - // Build url - path := "/" - - // Indices part - indexPart := make([]string, 0) - for _, index := range s.indices { - index, err := uritemplates.Expand("{index}", map[string]string{ - "index": index, - }) - if err != nil { - return nil, err - } - indexPart = append(indexPart, index) - } - if len(indexPart) > 0 { - path += strings.Join(indexPart, ",") - } - - path += "/_optimize" - - // Parameters - params := make(url.Values) - if s.maxNumSegments != nil { - params.Set("max_num_segments", fmt.Sprintf("%d", *s.maxNumSegments)) - } - if s.onlyExpungeDeletes != nil { - params.Set("only_expunge_deletes", fmt.Sprintf("%v", *s.onlyExpungeDeletes)) - } - if s.flush != nil { - params.Set("flush", fmt.Sprintf("%v", *s.flush)) - } - if s.waitForMerge != nil { - params.Set("wait_for_merge", fmt.Sprintf("%v", *s.waitForMerge)) - } - if s.force != nil { - params.Set("force", fmt.Sprintf("%v", *s.force)) - } - if s.pretty { - params.Set("pretty", fmt.Sprintf("%v", s.pretty)) - } - - // Get response - res, err := s.client.PerformRequest("POST", path, params, nil) - if err != nil { - return nil, err - } - - // Return result - ret := new(OptimizeResult) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// -- Result of an optimize request. - -type OptimizeResult struct { - Shards shardsInfo `json:"_shards,omitempty"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/optimize_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/optimize_test.go deleted file mode 100644 index c47de3a94..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/optimize_test.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "testing" -) - -func TestOptimize(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} - - // Add some documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Optimize documents - res, err := client.Optimize(testIndexName, testIndexName2).Do() - if err != nil { - t.Fatal(err) - } - if res == nil { - t.Fatal("expected result; got nil") - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/percolate.go b/services/templeton/vendor/src/github.com/olivere/elastic/percolate.go deleted file mode 100644 index a2bd14ba2..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/percolate.go +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// PercolateService is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/search-percolate.html. -type PercolateService struct { - client *Client - pretty bool - index string - typ string - id string - version interface{} - versionType string - routing []string - preference string - ignoreUnavailable *bool - percolateIndex string - percolatePreference string - percolateRouting string - source string - allowNoIndices *bool - expandWildcards string - percolateFormat string - percolateType string - bodyJson interface{} - bodyString string -} - -// NewPercolateService creates a new PercolateService. -func NewPercolateService(client *Client) *PercolateService { - return &PercolateService{ - client: client, - routing: make([]string, 0), - } -} - -// Index is the name of the index of the document being percolated. -func (s *PercolateService) Index(index string) *PercolateService { - s.index = index - return s -} - -// Type is the type of the document being percolated. -func (s *PercolateService) Type(typ string) *PercolateService { - s.typ = typ - return s -} - -// Id is to substitute the document in the request body with a -// document that is known by the specified id. On top of the id, -// the index and type parameter will be used to retrieve -// the document from within the cluster. -func (s *PercolateService) Id(id string) *PercolateService { - s.id = id - return s -} - -// ExpandWildcards indicates whether to expand wildcard expressions -// to concrete indices that are open, closed or both. -func (s *PercolateService) ExpandWildcards(expandWildcards string) *PercolateService { - s.expandWildcards = expandWildcards - return s -} - -// PercolateFormat indicates whether to return an array of matching -// query IDs instead of objects. -func (s *PercolateService) PercolateFormat(percolateFormat string) *PercolateService { - s.percolateFormat = percolateFormat - return s -} - -// PercolateType is the type to percolate document into. Defaults to type. -func (s *PercolateService) PercolateType(percolateType string) *PercolateService { - s.percolateType = percolateType - return s -} - -// PercolateRouting is the routing value to use when percolating -// the existing document. -func (s *PercolateService) PercolateRouting(percolateRouting string) *PercolateService { - s.percolateRouting = percolateRouting - return s -} - -// Source is the URL-encoded request definition. -func (s *PercolateService) Source(source string) *PercolateService { - s.source = source - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. -// (This includes `_all` string or when no indices have been specified). -func (s *PercolateService) AllowNoIndices(allowNoIndices bool) *PercolateService { - s.allowNoIndices = &allowNoIndices - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should -// be ignored when unavailable (missing or closed). -func (s *PercolateService) IgnoreUnavailable(ignoreUnavailable bool) *PercolateService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// PercolateIndex is the index to percolate the document into. Defaults to index. -func (s *PercolateService) PercolateIndex(percolateIndex string) *PercolateService { - s.percolateIndex = percolateIndex - return s -} - -// PercolatePreference defines which shard to prefer when executing -// the percolate request. -func (s *PercolateService) PercolatePreference(percolatePreference string) *PercolateService { - s.percolatePreference = percolatePreference - return s -} - -// Version is an explicit version number for concurrency control. -func (s *PercolateService) Version(version interface{}) *PercolateService { - s.version = version - return s -} - -// VersionType is the specific version type. -func (s *PercolateService) VersionType(versionType string) *PercolateService { - s.versionType = versionType - return s -} - -// Routing is a list of specific routing values. -func (s *PercolateService) Routing(routing []string) *PercolateService { - s.routing = routing - return s -} - -// Preference specifies the node or shard the operation should be -// performed on (default: random). -func (s *PercolateService) Preference(preference string) *PercolateService { - s.preference = preference - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *PercolateService) Pretty(pretty bool) *PercolateService { - s.pretty = pretty - return s -} - -// Doc wraps the given document into the "doc" key of the body. -func (s *PercolateService) Doc(doc interface{}) *PercolateService { - return s.BodyJson(map[string]interface{}{"doc": doc}) -} - -// BodyJson is the percolator request definition using the percolate DSL. -func (s *PercolateService) BodyJson(body interface{}) *PercolateService { - s.bodyJson = body - return s -} - -// BodyString is the percolator request definition using the percolate DSL. -func (s *PercolateService) BodyString(body string) *PercolateService { - s.bodyString = body - return s -} - -// buildURL builds the URL for the operation. -func (s *PercolateService) buildURL() (string, url.Values, error) { - // Build URL - var path string - var err error - if s.id == "" { - path, err = uritemplates.Expand("/{index}/{type}/_percolate", map[string]string{ - "index": s.index, - "type": s.typ, - }) - } else { - path, err = uritemplates.Expand("/{index}/{type}/{id}/_percolate", map[string]string{ - "index": s.index, - "type": s.typ, - "id": s.id, - }) - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.version != nil { - params.Set("version", fmt.Sprintf("%v", s.version)) - } - if s.versionType != "" { - params.Set("version_type", s.versionType) - } - if len(s.routing) > 0 { - params.Set("routing", strings.Join(s.routing, ",")) - } - if s.preference != "" { - params.Set("preference", s.preference) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.percolateIndex != "" { - params.Set("percolate_index", s.percolateIndex) - } - if s.percolatePreference != "" { - params.Set("percolate_preference", s.percolatePreference) - } - if s.percolateRouting != "" { - params.Set("percolate_routing", s.percolateRouting) - } - if s.source != "" { - params.Set("source", s.source) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.percolateFormat != "" { - params.Set("percolate_format", s.percolateFormat) - } - if s.percolateType != "" { - params.Set("percolate_type", s.percolateType) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *PercolateService) Validate() error { - var invalid []string - if s.index == "" { - invalid = append(invalid, "Index") - } - if s.typ == "" { - invalid = append(invalid, "Type") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *PercolateService) Do() (*PercolateResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else { - body = s.bodyString - } - - // Get HTTP response - res, err := s.client.PerformRequest("GET", path, params, body) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(PercolateResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// PercolateResponse is the response of PercolateService.Do. -type PercolateResponse struct { - TookInMillis int64 `json:"took"` // search time in milliseconds - Total int64 `json:"total"` // total matches - Matches []*PercolateMatch `json:"matches,omitempty"` - Aggregations Aggregations `json:"aggregations,omitempty"` // results from aggregations -} - -// PercolateMatch returns a single match in a PercolateResponse. -type PercolateMatch struct { - Index string `json:"_index,omitempty"` - Id string `json:"_id"` - Score float64 `json:"_score,omitempty"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/percolate_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/percolate_test.go deleted file mode 100644 index 07b36fef7..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/percolate_test.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "testing" - -func TestPercolate(t *testing.T) { - client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0))) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - - // Add a document - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - // Register a query in the ".percolator" type. - search := NewSearchSource().Query(NewMatchQuery("message", "Golang")) - searchSrc, err := search.Source() - if err != nil { - t.Fatal(err) - } - _, err = client.Index(). - Index(testIndexName).Type(".percolator").Id("1"). - BodyJson(searchSrc). - Do() - if err != nil { - t.Fatal(err) - } - - // Percolate should return our registered query - newTweet := tweet{User: "olivere", Message: "Golang is fun."} - res, err := client.Percolate(). - Index(testIndexName).Type("tweet"). - Doc(newTweet). // shortcut for: BodyJson(map[string]interface{}{"doc": newTweet}). - Pretty(true). - Do() - if err != nil { - t.Fatal(err) - } - if res == nil { - t.Errorf("expected results != nil; got nil") - } - if res.Total != 1 { - t.Fatalf("expected 1 result; got: %d", res.Total) - } - if res.Matches == nil { - t.Fatalf("expected Matches; got: %v", res.Matches) - } - matches := res.Matches - if matches == nil { - t.Fatalf("expected matches as map; got: %v", matches) - } - if len(matches) != 1 { - t.Fatalf("expected %d registered matches; got: %d", 1, len(matches)) - } - if matches[0].Id != "1" { - t.Errorf("expected to return query %q; got: %q", "1", matches[0].Id) - } - - // Percolating an existsing document should return our registered query - res, err = client.Percolate(). - Index(testIndexName).Type("tweet"). - Id("1"). - Pretty(true). - Do() - if err != nil { - t.Fatal(err) - } - if res == nil { - t.Errorf("expected results != nil; got nil") - } - if res.Total != 1 { - t.Fatalf("expected 1 result; got: %d", res.Total) - } - if res.Matches == nil { - t.Fatalf("expected Matches; got: %v", res.Matches) - } - matches = res.Matches - if matches == nil { - t.Fatalf("expected matches as map; got: %v", matches) - } - if len(matches) != 1 { - t.Fatalf("expected %d registered matches; got: %d", 1, len(matches)) - } - if matches[0].Id != "1" { - t.Errorf("expected to return query %q; got: %q", "1", matches[0].Id) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/ping.go b/services/templeton/vendor/src/github.com/olivere/elastic/ping.go deleted file mode 100644 index fada22817..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/ping.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "net/http" - "net/url" -) - -// PingService checks if an Elasticsearch server on a given URL is alive. -// When asked for, it can also return various information about the -// Elasticsearch server, e.g. the Elasticsearch version number. -// -// Ping simply starts a HTTP GET request to the URL of the server. -// If the server responds with HTTP Status code 200 OK, the server is alive. -type PingService struct { - client *Client - url string - timeout string - httpHeadOnly bool - pretty bool -} - -// PingResult is the result returned from querying the Elasticsearch server. -type PingResult struct { - Name string `json:"name"` - ClusterName string `json:"cluster_name"` - Version struct { - Number string `json:"number"` - BuildHash string `json:"build_hash"` - BuildTimestamp string `json:"build_timestamp"` - BuildSnapshot bool `json:"build_snapshot"` - LuceneVersion string `json:"lucene_version"` - } `json:"version"` - TagLine string `json:"tagline"` -} - -func NewPingService(client *Client) *PingService { - return &PingService{ - client: client, - url: DefaultURL, - httpHeadOnly: false, - pretty: false, - } -} - -func (s *PingService) URL(url string) *PingService { - s.url = url - return s -} - -func (s *PingService) Timeout(timeout string) *PingService { - s.timeout = timeout - return s -} - -// HeadOnly makes the service to only return the status code in Do; -// the PingResult will be nil. -func (s *PingService) HttpHeadOnly(httpHeadOnly bool) *PingService { - s.httpHeadOnly = httpHeadOnly - return s -} - -func (s *PingService) Pretty(pretty bool) *PingService { - s.pretty = pretty - return s -} - -// Do returns the PingResult, the HTTP status code of the Elasticsearch -// server, and an error. -func (s *PingService) Do() (*PingResult, int, error) { - s.client.mu.RLock() - basicAuth := s.client.basicAuth - basicAuthUsername := s.client.basicAuthUsername - basicAuthPassword := s.client.basicAuthPassword - s.client.mu.RUnlock() - - url_ := s.url + "/" - - params := make(url.Values) - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.pretty { - params.Set("pretty", "1") - } - if len(params) > 0 { - url_ += "?" + params.Encode() - } - - var method string - if s.httpHeadOnly { - method = "HEAD" - } else { - method = "GET" - } - - // Notice: This service must NOT use PerformRequest! - req, err := NewRequest(method, url_) - if err != nil { - return nil, 0, err - } - - if basicAuth { - req.SetBasicAuth(basicAuthUsername, basicAuthPassword) - } - - res, err := s.client.c.Do((*http.Request)(req)) - if err != nil { - return nil, 0, err - } - defer res.Body.Close() - - var ret *PingResult - if !s.httpHeadOnly { - ret = new(PingResult) - if err := json.NewDecoder(res.Body).Decode(ret); err != nil { - return nil, res.StatusCode, err - } - } - - return ret, res.StatusCode, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/ping_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/ping_test.go deleted file mode 100644 index 9891c2025..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/ping_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "net/http" - "testing" -) - -func TestPingGet(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - res, code, err := client.Ping(DefaultURL).Do() - if err != nil { - t.Fatal(err) - } - if code != http.StatusOK { - t.Errorf("expected status code = %d; got %d", http.StatusOK, code) - } - if res == nil { - t.Fatalf("expected to return result, got: %v", res) - } - if res.Name == "" { - t.Errorf("expected Name != \"\"; got %q", res.Name) - } - if res.Version.Number == "" { - t.Errorf("expected Version.Number != \"\"; got %q", res.Version.Number) - } -} - -func TestPingHead(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - res, code, err := client.Ping(DefaultURL).HttpHeadOnly(true).Do() - if err != nil { - t.Fatal(err) - } - if code != http.StatusOK { - t.Errorf("expected status code = %d; got %d", http.StatusOK, code) - } - if res != nil { - t.Errorf("expected not to return result, got: %v", res) - } -} - -func TestPingHeadFailure(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - res, code, err := client. - Ping("http://127.0.0.1:9299"). - HttpHeadOnly(true). - Do() - if err == nil { - t.Error("expected error, got nil") - } - if code == http.StatusOK { - t.Errorf("expected status code != %d; got %d", http.StatusOK, code) - } - if res != nil { - t.Errorf("expected not to return result, got: %v", res) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/plugins.go b/services/templeton/vendor/src/github.com/olivere/elastic/plugins.go deleted file mode 100644 index 3906d74d7..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/plugins.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// HasPlugin indicates whether the cluster has the named plugin. -func (c *Client) HasPlugin(name string) (bool, error) { - plugins, err := c.Plugins() - if err != nil { - return false, nil - } - for _, plugin := range plugins { - if plugin == name { - return true, nil - } - } - return false, nil -} - -// Plugins returns the list of all registered plugins. -func (c *Client) Plugins() ([]string, error) { - stats, err := c.ClusterStats().Do() - if err != nil { - return nil, err - } - if stats == nil { - return nil, err - } - if stats.Nodes == nil { - return nil, err - } - var plugins []string - for _, plugin := range stats.Nodes.Plugins { - plugins = append(plugins, plugin.Name) - } - return plugins, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/plugins_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/plugins_test.go deleted file mode 100644 index 112b80943..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/plugins_test.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "testing" - -func TestClientPlugins(t *testing.T) { - client, err := NewClient() - if err != nil { - t.Fatal(err) - } - _, err = client.Plugins() - if err != nil { - t.Fatal(err) - } -} - -func TestClientHasPlugin(t *testing.T) { - client, err := NewClient() - if err != nil { - t.Fatal(err) - } - found, err := client.HasPlugin("no-such-plugin") - if err != nil { - t.Fatal(err) - } - if found { - t.Fatalf("expected to not find plugin %q", "no-such-plugin") - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/query.go b/services/templeton/vendor/src/github.com/olivere/elastic/query.go deleted file mode 100644 index 0869eaecc..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/query.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// Query represents the generic query interface. A query's sole purpose -// is to return the source of the query as a JSON-serializable object. -// Returning map[string]interface{} is the norm for queries. -type Query interface { - // Source returns the JSON-serializable query request. - Source() (interface{}, error) -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/reindexer.go b/services/templeton/vendor/src/github.com/olivere/elastic/reindexer.go deleted file mode 100644 index 7193a1337..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/reindexer.go +++ /dev/null @@ -1,270 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "errors" -) - -// Reindexer simplifies the process of reindexing an index. You typically -// reindex a source index to a target index. However, you can also specify -// a query that filters out documents from the source index before bulk -// indexing them into the target index. The caller may also specify a -// different client for the target, e.g. when copying indices from one -// Elasticsearch cluster to another. -// -// Internally, the Reindex users a scan and scroll operation on the source -// index and bulk indexing to push data into the target index. -// -// By default the reindexer fetches the _source, _parent, and _routing -// attributes from the source index, using the provided CopyToTargetIndex -// will copy those attributes into the destinationIndex. -// This behaviour can be overridden by setting the ScanFields and providing a -// custom ReindexerFunc. -// -// The caller is responsible for setting up and/or clearing the target index -// before starting the reindex process. -// -// See http://www.elastic.co/guide/en/elasticsearch/guide/current/reindex.html -// for more information about reindexing. -type Reindexer struct { - sourceClient, targetClient *Client - sourceIndex string - query Query - scanFields []string - bulkSize int - size int - scroll string - reindexerFunc ReindexerFunc - progress ReindexerProgressFunc - statsOnly bool -} - -// A ReindexerFunc receives each hit from the sourceIndex. -// It can choose to add any number of BulkableRequests to the bulkService. -type ReindexerFunc func(hit *SearchHit, bulkService *BulkService) error - -// CopyToTargetIndex returns a ReindexerFunc that copies the SearchHit's -// _source, _parent, and _routing attributes into the targetIndex -func CopyToTargetIndex(targetIndex string) ReindexerFunc { - return func(hit *SearchHit, bulkService *BulkService) error { - // TODO(oe) Do we need to deserialize here? - source := make(map[string]interface{}) - if err := json.Unmarshal(*hit.Source, &source); err != nil { - return err - } - req := NewBulkIndexRequest().Index(targetIndex).Type(hit.Type).Id(hit.Id).Doc(source) - if hit.Parent != "" { - req = req.Parent(hit.Parent) - } - if hit.Routing != "" { - req = req.Routing(hit.Routing) - } - bulkService.Add(req) - return nil - } -} - -// ReindexerProgressFunc is a callback that can be used with Reindexer -// to report progress while reindexing data. -type ReindexerProgressFunc func(current, total int64) - -// ReindexerResponse is returned from the Do func in a Reindexer. -// By default, it returns the number of succeeded and failed bulk operations. -// To return details about all failed items, set StatsOnly to false in -// Reindexer. -type ReindexerResponse struct { - Success int64 - Failed int64 - Errors []*BulkResponseItem -} - -// NewReindexer returns a new Reindexer. -func NewReindexer(client *Client, source string, reindexerFunc ReindexerFunc) *Reindexer { - return &Reindexer{ - sourceClient: client, - sourceIndex: source, - reindexerFunc: reindexerFunc, - statsOnly: true, - } -} - -// TargetClient specifies a different client for the target. This is -// necessary when the target index is in a different Elasticsearch cluster. -// By default, the source and target clients are the same. -func (ix *Reindexer) TargetClient(c *Client) *Reindexer { - ix.targetClient = c - return ix -} - -// Query specifies the query to apply to the source. It filters out those -// documents to be indexed into target. A nil query does not filter out any -// documents. -func (ix *Reindexer) Query(q Query) *Reindexer { - ix.query = q - return ix -} - -// ScanFields specifies the fields the scan query should load. -// The default fields are _source, _parent, _routing. -func (ix *Reindexer) ScanFields(scanFields ...string) *Reindexer { - ix.scanFields = scanFields - return ix -} - -// BulkSize returns the number of documents to send to Elasticsearch per chunk. -// The default is 500. -func (ix *Reindexer) BulkSize(bulkSize int) *Reindexer { - ix.bulkSize = bulkSize - return ix -} - -// Size is the number of results to return per shard, not per request. -// So a size of 10 which hits 5 shards will return a maximum of 50 results -// per scan request. -func (ix *Reindexer) Size(size int) *Reindexer { - ix.size = size - return ix -} - -// Scroll specifies for how long the scroll operation on the source index -// should be maintained. The default is 5m. -func (ix *Reindexer) Scroll(timeout string) *Reindexer { - ix.scroll = timeout - return ix -} - -// Progress indicates a callback that will be called while indexing. -func (ix *Reindexer) Progress(f ReindexerProgressFunc) *Reindexer { - ix.progress = f - return ix -} - -// StatsOnly indicates whether the Do method should return details e.g. about -// the documents that failed while indexing. It is true by default, i.e. only -// the number of documents that succeeded/failed are returned. Set to false -// if you want all the details. -func (ix *Reindexer) StatsOnly(statsOnly bool) *Reindexer { - ix.statsOnly = statsOnly - return ix -} - -// Do starts the reindexing process. -func (ix *Reindexer) Do() (*ReindexerResponse, error) { - if ix.sourceClient == nil { - return nil, errors.New("no source client") - } - if ix.sourceIndex == "" { - return nil, errors.New("no source index") - } - if ix.targetClient == nil { - ix.targetClient = ix.sourceClient - } - if ix.scanFields == nil { - ix.scanFields = []string{"_source", "_parent", "_routing"} - } - if ix.bulkSize <= 0 { - ix.bulkSize = 500 - } - if ix.scroll == "" { - ix.scroll = "5m" - } - - // Count total to report progress (if necessary) - var err error - var current, total int64 - if ix.progress != nil { - total, err = ix.count() - if err != nil { - return nil, err - } - } - - // Prepare scan and scroll to iterate through the source index - scanner := ix.sourceClient.Scan(ix.sourceIndex).Scroll(ix.scroll).Fields(ix.scanFields...) - if ix.query != nil { - scanner = scanner.Query(ix.query) - } - if ix.size > 0 { - scanner = scanner.Size(ix.size) - } - cursor, err := scanner.Do() - - bulk := ix.targetClient.Bulk() - - ret := &ReindexerResponse{ - Errors: make([]*BulkResponseItem, 0), - } - - // Main loop iterates through the source index and bulk indexes into target. - for { - docs, err := cursor.Next() - if err == EOS { - break - } - if err != nil { - return ret, err - } - - if docs.TotalHits() > 0 { - for _, hit := range docs.Hits.Hits { - if ix.progress != nil { - current++ - ix.progress(current, total) - } - - err := ix.reindexerFunc(hit, bulk) - if err != nil { - return ret, err - } - - if bulk.NumberOfActions() >= ix.bulkSize { - bulk, err = ix.commit(bulk, ret) - if err != nil { - return ret, err - } - } - } - } - } - - // Final flush - if bulk.NumberOfActions() > 0 { - bulk, err = ix.commit(bulk, ret) - if err != nil { - return ret, err - } - bulk = nil - } - - return ret, nil -} - -// count returns the number of documents in the source index. -// The query is taken into account, if specified. -func (ix *Reindexer) count() (int64, error) { - service := ix.sourceClient.Count(ix.sourceIndex) - if ix.query != nil { - service = service.Query(ix.query) - } - return service.Do() -} - -// commit commits a bulk, updates the stats, and returns a fresh bulk service. -func (ix *Reindexer) commit(bulk *BulkService, ret *ReindexerResponse) (*BulkService, error) { - bres, err := bulk.Do() - if err != nil { - return nil, err - } - ret.Success += int64(len(bres.Succeeded())) - failed := bres.Failed() - ret.Failed += int64(len(failed)) - if !ix.statsOnly { - ret.Errors = append(ret.Errors, failed...) - } - bulk = ix.targetClient.Bulk() - return bulk, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/reindexer_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/reindexer_test.go deleted file mode 100644 index a21dff5c5..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/reindexer_test.go +++ /dev/null @@ -1,285 +0,0 @@ -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestReindexer(t *testing.T) { - client := setupTestClientAndCreateIndexAndAddDocs(t) - - sourceCount, err := client.Count(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if sourceCount <= 0 { - t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount) - } - - targetCount, err := client.Count(testIndexName2).Do() - if err != nil { - t.Fatal(err) - } - if targetCount != 0 { - t.Fatalf("expected %d documents; got: %d", 0, targetCount) - } - - r := NewReindexer(client, testIndexName, CopyToTargetIndex(testIndexName2)) - ret, err := r.Do() - if err != nil { - t.Fatal(err) - } - if ret == nil { - t.Fatalf("expected result != %v; got: %v", nil, ret) - } - if ret.Success != sourceCount { - t.Errorf("expected success = %d; got: %d", sourceCount, ret.Success) - } - if ret.Failed != 0 { - t.Errorf("expected failed = %d; got: %d", 0, ret.Failed) - } - if len(ret.Errors) != 0 { - t.Errorf("expected to return no errors by default; got: %v", ret.Errors) - } - - if _, err := client.Flush().Index(testIndexName2).Do(); err != nil { - t.Fatal(err) - } - - targetCount, err = client.Count(testIndexName2).Do() - if err != nil { - t.Fatal(err) - } - if targetCount != sourceCount { - t.Fatalf("expected %d documents; got: %d", sourceCount, targetCount) - } -} - -func TestReindexerWithQuery(t *testing.T) { - client := setupTestClientAndCreateIndexAndAddDocs(t) - - q := NewTermQuery("user", "olivere") - - sourceCount, err := client.Count(testIndexName).Query(q).Do() - if err != nil { - t.Fatal(err) - } - if sourceCount <= 0 { - t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount) - } - - targetCount, err := client.Count(testIndexName2).Do() - if err != nil { - t.Fatal(err) - } - if targetCount != 0 { - t.Fatalf("expected %d documents; got: %d", 0, targetCount) - } - - r := NewReindexer(client, testIndexName, CopyToTargetIndex(testIndexName2)) - r = r.Query(q) - ret, err := r.Do() - if err != nil { - t.Fatal(err) - } - if ret == nil { - t.Fatalf("expected result != %v; got: %v", nil, ret) - } - if ret.Success != sourceCount { - t.Errorf("expected success = %d; got: %d", sourceCount, ret.Success) - } - if ret.Failed != 0 { - t.Errorf("expected failed = %d; got: %d", 0, ret.Failed) - } - if len(ret.Errors) != 0 { - t.Errorf("expected to return no errors by default; got: %v", ret.Errors) - } - - if _, err := client.Flush().Index(testIndexName2).Do(); err != nil { - t.Fatal(err) - } - - targetCount, err = client.Count(testIndexName2).Do() - if err != nil { - t.Fatal(err) - } - if targetCount != sourceCount { - t.Fatalf("expected %d documents; got: %d", sourceCount, targetCount) - } -} - -func TestReindexerProgress(t *testing.T) { - client := setupTestClientAndCreateIndexAndAddDocs(t) - - sourceCount, err := client.Count(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if sourceCount <= 0 { - t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount) - } - - var calls int64 - totalsOk := true - progress := func(current, total int64) { - calls += 1 - totalsOk = totalsOk && total == sourceCount - } - - r := NewReindexer(client, testIndexName, CopyToTargetIndex(testIndexName2)) - r = r.Progress(progress) - ret, err := r.Do() - if err != nil { - t.Fatal(err) - } - if ret == nil { - t.Fatalf("expected result != %v; got: %v", nil, ret) - } - if ret.Success != sourceCount { - t.Errorf("expected success = %d; got: %d", sourceCount, ret.Success) - } - if ret.Failed != 0 { - t.Errorf("expected failed = %d; got: %d", 0, ret.Failed) - } - if len(ret.Errors) != 0 { - t.Errorf("expected to return no errors by default; got: %v", ret.Errors) - } - - if calls != sourceCount { - t.Errorf("expected progress to be called %d times; got: %d", sourceCount, calls) - } - if !totalsOk { - t.Errorf("expected totals in progress to be %d", sourceCount) - } -} - -func TestReindexerWithTargetClient(t *testing.T) { - sourceClient := setupTestClientAndCreateIndexAndAddDocs(t) - targetClient, err := NewClient() - if err != nil { - t.Fatal(err) - } - - sourceCount, err := sourceClient.Count(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if sourceCount <= 0 { - t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount) - } - - targetCount, err := targetClient.Count(testIndexName2).Do() - if err != nil { - t.Fatal(err) - } - if targetCount != 0 { - t.Fatalf("expected %d documents; got: %d", 0, targetCount) - } - - r := NewReindexer(sourceClient, testIndexName, CopyToTargetIndex(testIndexName2)) - r = r.TargetClient(targetClient) - ret, err := r.Do() - if err != nil { - t.Fatal(err) - } - if ret == nil { - t.Fatalf("expected result != %v; got: %v", nil, ret) - } - if ret.Success != sourceCount { - t.Errorf("expected success = %d; got: %d", sourceCount, ret.Success) - } - if ret.Failed != 0 { - t.Errorf("expected failed = %d; got: %d", 0, ret.Failed) - } - if len(ret.Errors) != 0 { - t.Errorf("expected to return no errors by default; got: %v", ret.Errors) - } - - if _, err := targetClient.Flush().Index(testIndexName2).Do(); err != nil { - t.Fatal(err) - } - - targetCount, err = targetClient.Count(testIndexName2).Do() - if err != nil { - t.Fatal(err) - } - if targetCount != sourceCount { - t.Fatalf("expected %d documents; got: %d", sourceCount, targetCount) - } -} - -// TestReindexerPreservingTTL shows how a caller can take control of the -// copying process by providing ScanFields and a custom ReindexerFunc. -func TestReindexerPreservingTTL(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").TTL("999999s").Version(10).VersionType("external").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - sourceCount, err := client.Count(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - if sourceCount <= 0 { - t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount) - } - - targetCount, err := client.Count(testIndexName2).Do() - if err != nil { - t.Fatal(err) - } - if targetCount != 0 { - t.Fatalf("expected %d documents; got: %d", 0, targetCount) - } - - // Carries over the source item's ttl to the reindexed item - copyWithTTL := func(hit *SearchHit, bulkService *BulkService) error { - source := make(map[string]interface{}) - if err := json.Unmarshal(*hit.Source, &source); err != nil { - return err - } - req := NewBulkIndexRequest().Index(testIndexName2).Type(hit.Type).Id(hit.Id).Doc(source) - if hit.TTL > 0 { - req = req.Ttl(hit.TTL) - } - bulkService.Add(req) - return nil - } - - r := NewReindexer(client, testIndexName, copyWithTTL).ScanFields("_source", "_ttl") - - ret, err := r.Do() - if err != nil { - t.Fatal(err) - } - if ret == nil { - t.Fatalf("expected result != %v; got: %v", nil, ret) - } - if ret.Success != sourceCount { - t.Errorf("expected success = %d; got: %d", sourceCount, ret.Success) - } - if ret.Failed != 0 { - t.Errorf("expected failed = %d; got: %d", 0, ret.Failed) - } - if len(ret.Errors) != 0 { - t.Errorf("expected to return no errors by default; got: %v", ret.Errors) - } - - getResult, err := client.Get().Index(testIndexName2).Id("1").Fields("_source", "_ttl").Do() - if err != nil { - t.Fatal(err) - } - - if getResult.TTL <= 0 { - t.Errorf("expected TTL field in reindexed document") - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/request.go b/services/templeton/vendor/src/github.com/olivere/elastic/request.go deleted file mode 100644 index 1347e1b6f..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/request.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "bytes" - "compress/gzip" - "encoding/json" - "io" - "io/ioutil" - "net/http" - "runtime" - "strings" -) - -// Elasticsearch-specific HTTP request -type Request http.Request - -// NewRequest is a http.Request and adds features such as encoding the body. -func NewRequest(method, url string) (*Request, error) { - req, err := http.NewRequest(method, url, nil) - if err != nil { - return nil, err - } - req.Header.Add("User-Agent", "elastic/"+Version+" ("+runtime.GOOS+"-"+runtime.GOARCH+")") - req.Header.Add("Accept", "application/json") - return (*Request)(req), nil -} - -// SetBasicAuth wraps http.Request's SetBasicAuth. -func (r *Request) SetBasicAuth(username, password string) { - ((*http.Request)(r)).SetBasicAuth(username, password) -} - -// SetBody encodes the body in the request. Optionally, it performs GZIP compression. -func (r *Request) SetBody(body interface{}, gzipCompress bool) error { - switch b := body.(type) { - case string: - if gzipCompress { - return r.setBodyGzip(b) - } else { - return r.setBodyString(b) - } - default: - if gzipCompress { - return r.setBodyGzip(body) - } else { - return r.setBodyJson(body) - } - } -} - -// setBodyJson encodes the body as a struct to be marshaled via json.Marshal. -func (r *Request) setBodyJson(data interface{}) error { - body, err := json.Marshal(data) - if err != nil { - return err - } - r.Header.Set("Content-Type", "application/json") - r.setBodyReader(bytes.NewReader(body)) - return nil -} - -// setBodyString encodes the body as a string. -func (r *Request) setBodyString(body string) error { - return r.setBodyReader(strings.NewReader(body)) -} - -// setBodyGzip gzip's the body. It accepts both strings and structs as body. -// The latter will be encoded via json.Marshal. -func (r *Request) setBodyGzip(body interface{}) error { - switch b := body.(type) { - case string: - buf := new(bytes.Buffer) - w := gzip.NewWriter(buf) - if _, err := w.Write([]byte(b)); err != nil { - return err - } - if err := w.Close(); err != nil { - return err - } - r.Header.Add("Content-Encoding", "gzip") - r.Header.Add("Vary", "Accept-Encoding") - return r.setBodyReader(bytes.NewReader(buf.Bytes())) - default: - data, err := json.Marshal(b) - if err != nil { - return err - } - buf := new(bytes.Buffer) - w := gzip.NewWriter(buf) - if _, err := w.Write(data); err != nil { - return err - } - if err := w.Close(); err != nil { - return err - } - r.Header.Add("Content-Encoding", "gzip") - r.Header.Add("Vary", "Accept-Encoding") - r.Header.Set("Content-Type", "application/json") - return r.setBodyReader(bytes.NewReader(buf.Bytes())) - } -} - -// setBodyReader writes the body from an io.Reader. -func (r *Request) setBodyReader(body io.Reader) error { - rc, ok := body.(io.ReadCloser) - if !ok && body != nil { - rc = ioutil.NopCloser(body) - } - r.Body = rc - if body != nil { - switch v := body.(type) { - case *strings.Reader: - r.ContentLength = int64(v.Len()) - case *bytes.Buffer: - r.ContentLength = int64(v.Len()) - } - } - return nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/rescore.go b/services/templeton/vendor/src/github.com/olivere/elastic/rescore.go deleted file mode 100644 index 0cbc06710..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/rescore.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -type Rescore struct { - rescorer Rescorer - windowSize *int - defaultRescoreWindowSize *int -} - -func NewRescore() *Rescore { - return &Rescore{} -} - -func (r *Rescore) WindowSize(windowSize int) *Rescore { - r.windowSize = &windowSize - return r -} - -func (r *Rescore) IsEmpty() bool { - return r.rescorer == nil -} - -func (r *Rescore) Rescorer(rescorer Rescorer) *Rescore { - r.rescorer = rescorer - return r -} - -func (r *Rescore) Source() (interface{}, error) { - source := make(map[string]interface{}) - if r.windowSize != nil { - source["window_size"] = *r.windowSize - } else if r.defaultRescoreWindowSize != nil { - source["window_size"] = *r.defaultRescoreWindowSize - } - rescorerSrc, err := r.rescorer.Source() - if err != nil { - return nil, err - } - source[r.rescorer.Name()] = rescorerSrc - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/rescorer.go b/services/templeton/vendor/src/github.com/olivere/elastic/rescorer.go deleted file mode 100644 index 28ad59cbb..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/rescorer.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -type Rescorer interface { - Name() string - Source() (interface{}, error) -} - -// -- Query Rescorer -- - -type QueryRescorer struct { - query Query - rescoreQueryWeight *float64 - queryWeight *float64 - scoreMode string -} - -func NewQueryRescorer(query Query) *QueryRescorer { - return &QueryRescorer{ - query: query, - } -} - -func (r *QueryRescorer) Name() string { - return "query" -} - -func (r *QueryRescorer) RescoreQueryWeight(rescoreQueryWeight float64) *QueryRescorer { - r.rescoreQueryWeight = &rescoreQueryWeight - return r -} - -func (r *QueryRescorer) QueryWeight(queryWeight float64) *QueryRescorer { - r.queryWeight = &queryWeight - return r -} - -func (r *QueryRescorer) ScoreMode(scoreMode string) *QueryRescorer { - r.scoreMode = scoreMode - return r -} - -func (r *QueryRescorer) Source() (interface{}, error) { - rescoreQuery, err := r.query.Source() - if err != nil { - return nil, err - } - - source := make(map[string]interface{}) - source["rescore_query"] = rescoreQuery - if r.queryWeight != nil { - source["query_weight"] = *r.queryWeight - } - if r.rescoreQueryWeight != nil { - source["rescore_query_weight"] = *r.rescoreQueryWeight - } - if r.scoreMode != "" { - source["score_mode"] = r.scoreMode - } - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/response.go b/services/templeton/vendor/src/github.com/olivere/elastic/response.go deleted file mode 100644 index 9426c23af..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/response.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "io/ioutil" - "net/http" -) - -// Response represents a response from Elasticsearch. -type Response struct { - // StatusCode is the HTTP status code, e.g. 200. - StatusCode int - // Header is the HTTP header from the HTTP response. - // Keys in the map are canonicalized (see http.CanonicalHeaderKey). - Header http.Header - // Body is the deserialized response body. - Body json.RawMessage -} - -// newResponse creates a new response from the HTTP response. -func (c *Client) newResponse(res *http.Response) (*Response, error) { - r := &Response{ - StatusCode: res.StatusCode, - Header: res.Header, - } - if res.Body != nil { - slurp, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } - // HEAD requests return a body but no content - if len(slurp) > 0 { - if err := c.decoder.Decode(slurp, &r.Body); err != nil { - return nil, err - } - } - } - return r, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/scan.go b/services/templeton/vendor/src/github.com/olivere/elastic/scan.go deleted file mode 100644 index 08822531b..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/scan.go +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "errors" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -const ( - defaultKeepAlive = "5m" -) - -var ( - // End of stream (or scan) - EOS = errors.New("EOS") - - // No ScrollId - ErrNoScrollId = errors.New("no scrollId") -) - -// ScanService manages a cursor through documents in Elasticsearch. -type ScanService struct { - client *Client - indices []string - types []string - keepAlive string - searchSource *SearchSource - pretty bool - routing string - preference string - size *int -} - -// NewScanService creates a new service to iterate through the results -// of a query. -func NewScanService(client *Client) *ScanService { - builder := &ScanService{ - client: client, - searchSource: NewSearchSource().Query(NewMatchAllQuery()), - } - return builder -} - -// Index sets the name(s) of the index to use for scan. -func (s *ScanService) Index(indices ...string) *ScanService { - if s.indices == nil { - s.indices = make([]string, 0) - } - s.indices = append(s.indices, indices...) - return s -} - -// Types allows to restrict the scan to a list of types. -func (s *ScanService) Type(types ...string) *ScanService { - if s.types == nil { - s.types = make([]string, 0) - } - s.types = append(s.types, types...) - return s -} - -// Scroll is an alias for KeepAlive, the time to keep -// the cursor alive (e.g. "5m" for 5 minutes). -func (s *ScanService) Scroll(keepAlive string) *ScanService { - s.keepAlive = keepAlive - return s -} - -// KeepAlive sets the maximum time the cursor will be -// available before expiration (e.g. "5m" for 5 minutes). -func (s *ScanService) KeepAlive(keepAlive string) *ScanService { - s.keepAlive = keepAlive - return s -} - -// Fields tells Elasticsearch to only load specific fields from a search hit. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-fields.html. -func (s *ScanService) Fields(fields ...string) *ScanService { - s.searchSource = s.searchSource.Fields(fields...) - return s -} - -// SearchSource sets the search source builder to use with this service. -func (s *ScanService) SearchSource(searchSource *SearchSource) *ScanService { - s.searchSource = searchSource - if s.searchSource == nil { - s.searchSource = NewSearchSource().Query(NewMatchAllQuery()) - } - return s -} - -// Routing allows for (a comma-separated) list of specific routing values. -func (s *ScanService) Routing(routings ...string) *ScanService { - s.routing = strings.Join(routings, ",") - return s -} - -// Preference specifies the node or shard the operation should be -// performed on (default: "random"). -func (s *ScanService) Preference(preference string) *ScanService { - s.preference = preference - return s -} - -// Query sets the query to perform, e.g. MatchAllQuery. -func (s *ScanService) Query(query Query) *ScanService { - s.searchSource = s.searchSource.Query(query) - return s -} - -// PostFilter is executed as the last filter. It only affects the -// search hits but not facets. See -// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-post-filter.html -// for details. -func (s *ScanService) PostFilter(postFilter Query) *ScanService { - s.searchSource = s.searchSource.PostFilter(postFilter) - return s -} - -// FetchSource indicates whether the response should contain the stored -// _source for every hit. -func (s *ScanService) FetchSource(fetchSource bool) *ScanService { - s.searchSource = s.searchSource.FetchSource(fetchSource) - return s -} - -// FetchSourceContext indicates how the _source should be fetched. -func (s *ScanService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *ScanService { - s.searchSource = s.searchSource.FetchSourceContext(fetchSourceContext) - return s -} - -// Version can be set to true to return a version for each search hit. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-version.html. -func (s *ScanService) Version(version bool) *ScanService { - s.searchSource = s.searchSource.Version(version) - return s -} - -// Sort the results by the given field, in the given order. -// Use the alternative SortWithInfo to use a struct to define the sorting. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html -// for detailed documentation of sorting. -func (s *ScanService) Sort(field string, ascending bool) *ScanService { - s.searchSource = s.searchSource.Sort(field, ascending) - return s -} - -// SortWithInfo defines how to sort results. -// Use the Sort func for a shortcut. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html -// for detailed documentation of sorting. -func (s *ScanService) SortWithInfo(info SortInfo) *ScanService { - s.searchSource = s.searchSource.SortWithInfo(info) - return s -} - -// SortBy defines how to sort results. -// Use the Sort func for a shortcut. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html -// for detailed documentation of sorting. -func (s *ScanService) SortBy(sorter ...Sorter) *ScanService { - s.searchSource = s.searchSource.SortBy(sorter...) - return s -} - -// Pretty enables the caller to indent the JSON output. -func (s *ScanService) Pretty(pretty bool) *ScanService { - s.pretty = pretty - return s -} - -// Size is the number of results to return per shard, not per request. -// So a size of 10 which hits 5 shards will return a maximum of 50 results -// per scan request. -func (s *ScanService) Size(size int) *ScanService { - s.size = &size - return s -} - -// Do executes the query and returns a "server-side cursor". -func (s *ScanService) Do() (*ScanCursor, error) { - // Build url - path := "/" - - // Indices part - indexPart := make([]string, 0) - for _, index := range s.indices { - index, err := uritemplates.Expand("{index}", map[string]string{ - "index": index, - }) - if err != nil { - return nil, err - } - indexPart = append(indexPart, index) - } - if len(indexPart) > 0 { - path += strings.Join(indexPart, ",") - } - - // Types - typesPart := make([]string, 0) - for _, typ := range s.types { - typ, err := uritemplates.Expand("{type}", map[string]string{ - "type": typ, - }) - if err != nil { - return nil, err - } - typesPart = append(typesPart, typ) - } - if len(typesPart) > 0 { - path += "/" + strings.Join(typesPart, ",") - } - - // Search - path += "/_search" - - // Parameters - params := make(url.Values) - if !s.searchSource.hasSort() { - // TODO: ES 2.1 deprecates search_type=scan. See https://www.elastic.co/guide/en/elasticsearch/reference/current/breaking_21_search_changes.html#_literal_search_type_scan_literal_deprecated. - params.Set("search_type", "scan") - } - if s.pretty { - params.Set("pretty", fmt.Sprintf("%v", s.pretty)) - } - if s.keepAlive != "" { - params.Set("scroll", s.keepAlive) - } else { - params.Set("scroll", defaultKeepAlive) - } - if s.size != nil && *s.size > 0 { - params.Set("size", fmt.Sprintf("%d", *s.size)) - } - if s.routing != "" { - params.Set("routing", s.routing) - } - - // Get response - body, err := s.searchSource.Source() - if err != nil { - return nil, err - } - res, err := s.client.PerformRequest("POST", path, params, body) - if err != nil { - return nil, err - } - - // Return result - searchResult := new(SearchResult) - if err := json.Unmarshal(res.Body, searchResult); err != nil { - return nil, err - } - - cursor := NewScanCursor(s.client, s.keepAlive, s.pretty, searchResult) - - return cursor, nil -} - -// scanCursor represents a single page of results from -// an Elasticsearch Scan operation. -type ScanCursor struct { - Results *SearchResult - - client *Client - keepAlive string - pretty bool - currentPage int -} - -// newScanCursor returns a new initialized instance -// of scanCursor. -func NewScanCursor(client *Client, keepAlive string, pretty bool, searchResult *SearchResult) *ScanCursor { - return &ScanCursor{ - client: client, - keepAlive: keepAlive, - pretty: pretty, - Results: searchResult, - } -} - -// TotalHits is a convenience method that returns the number -// of hits the cursor will iterate through. -func (c *ScanCursor) TotalHits() int64 { - if c.Results.Hits == nil { - return 0 - } - return c.Results.Hits.TotalHits -} - -// Next returns the next search result or nil when all -// documents have been scanned. -// -// Usage: -// -// for { -// res, err := cursor.Next() -// if err == elastic.EOS { -// // End of stream (or scan) -// break -// } -// if err != nil { -// // Handle error -// } -// // Work with res -// } -// -func (c *ScanCursor) Next() (*SearchResult, error) { - if c.currentPage > 0 { - if c.Results.Hits == nil || len(c.Results.Hits.Hits) == 0 || c.Results.Hits.TotalHits == 0 { - return nil, EOS - } - } - if c.Results.ScrollId == "" { - return nil, EOS - } - - // Build url - path := "/_search/scroll" - - // Parameters - params := make(url.Values) - if c.pretty { - params.Set("pretty", fmt.Sprintf("%v", c.pretty)) - } - if c.keepAlive != "" { - params.Set("scroll", c.keepAlive) - } else { - params.Set("scroll", defaultKeepAlive) - } - - // Set body - body := c.Results.ScrollId - - // Get response - res, err := c.client.PerformRequest("POST", path, params, body) - if err != nil { - return nil, err - } - - // Return result - c.Results = &SearchResult{ScrollId: body} - if err := json.Unmarshal(res.Body, c.Results); err != nil { - return nil, err - } - - c.currentPage += 1 - - return c.Results, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/scan_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/scan_test.go deleted file mode 100644 index b2a8f0ef9..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/scan_test.go +++ /dev/null @@ -1,559 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - _ "net/http" - "testing" -) - -func TestScan(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Match all should return all documents - cursor, err := client.Scan(testIndexName).Size(1).Do() - if err != nil { - t.Fatal(err) - } - - if cursor.Results == nil { - t.Errorf("expected results != nil; got nil") - } - if cursor.Results.Hits == nil { - t.Errorf("expected results.Hits != nil; got nil") - } - if cursor.Results.Hits.TotalHits != 3 { - t.Errorf("expected results.Hits.TotalHits = %d; got %d", 3, cursor.Results.Hits.TotalHits) - } - if len(cursor.Results.Hits.Hits) != 0 { - t.Errorf("expected len(results.Hits.Hits) = %d; got %d", 0, len(cursor.Results.Hits.Hits)) - } - - pages := 0 - numDocs := 0 - - for { - searchResult, err := cursor.Next() - if err == EOS { - break - } - if err != nil { - t.Fatal(err) - } - - pages += 1 - - for _, hit := range searchResult.Hits.Hits { - if hit.Index != testIndexName { - t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) - } - item := make(map[string]interface{}) - err := json.Unmarshal(*hit.Source, &item) - if err != nil { - t.Fatal(err) - } - numDocs += 1 - } - } - - if pages <= 0 { - t.Errorf("expected to retrieve at least 1 page; got %d", pages) - } - - if numDocs != 3 { - t.Errorf("expected to retrieve %d hits; got %d", 3, numDocs) - } -} - -func TestScanWithSort(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch.", Retweets: 4} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic.", Retweets: 10} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun.", Retweets: 3} - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // We sort on a numerical field, because sorting on the 'message' string field would - // raise the whole question of tokenizing and analyzing. - cursor, err := client.Scan(testIndexName).Sort("retweets", true).Size(1).Do() - if err != nil { - t.Fatal(err) - } - - if cursor.Results == nil { - t.Errorf("expected results != nil; got nil") - } - if cursor.Results.Hits == nil { - t.Errorf("expected results.Hits != nil; got nil") - } - if cursor.Results.Hits.TotalHits != 3 { - t.Errorf("expected results.Hits.TotalHits = %d; got %d", 3, cursor.Results.Hits.TotalHits) - } - if len(cursor.Results.Hits.Hits) != 1 { - t.Errorf("expected len(results.Hits.Hits) = %d; got %d", 1, len(cursor.Results.Hits.Hits)) - } - - if cursor.Results.Hits.Hits[0].Id != "3" { - t.Errorf("expected hitID = %v; got %v", "3", cursor.Results.Hits.Hits[0].Id) - } - - numDocs := 1 // The cursor already gave us a result - pages := 0 - - for { - searchResult, err := cursor.Next() - if err == EOS { - break - } - if err != nil { - t.Fatal(err) - } - - pages += 1 - - for _, hit := range searchResult.Hits.Hits { - if hit.Index != testIndexName { - t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) - } - item := make(map[string]interface{}) - err := json.Unmarshal(*hit.Source, &item) - if err != nil { - t.Fatal(err) - } - numDocs += 1 - } - } - - if pages <= 0 { - t.Errorf("expected to retrieve at least 1 page; got %d", pages) - } - - if numDocs != 3 { - t.Errorf("expected to retrieve %d hits; got %d", 3, numDocs) - } -} - -func TestScanWithSortByDoc(t *testing.T) { - // Sorting by doc is introduced in Elasticsearch 2.1, - // and replaces the deprecated search_type=scan. - // See https://www.elastic.co/guide/en/elasticsearch/reference/2.x/breaking_21_search_changes.html#_literal_search_type_scan_literal_deprecated - client := setupTestClientAndCreateIndex(t) - - esversion, err := client.ElasticsearchVersion(DefaultURL) - if err != nil { - t.Fatal(err) - } - if esversion < "2.1" { - t.Skipf(`Elasticsearch %s does not have {"sort":["_doc"]}`, esversion) - return - } - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - comment1 := comment{User: "nico", Comment: "You bet."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Index().Index(testIndexName).Type("comment").Id("1").Parent("1").BodyJson(&comment1).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Match all should return all documents - cursor, err := client.Scan(testIndexName).Sort("_doc", true).Size(1).Do() - if err != nil { - t.Fatal(err) - } - - numDocs := 0 - pages := 0 - - for { - searchResult, err := cursor.Next() - if err == EOS { - break - } - if err != nil { - t.Fatal(err) - } - - pages += 1 - - for range searchResult.Hits.Hits { - numDocs += 1 - } - } - - if pages != 3 { - t.Errorf("expected to retrieve %d pages; got %d", 2, pages) - } - if numDocs != 2 { - t.Errorf("expected to retrieve %d hits; got %d", 2, numDocs) - } -} - -func TestScanWithSearchSource(t *testing.T) { - //client := setupTestClientAndCreateIndexAndLog(t) - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch.", Retweets: 4} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic.", Retweets: 10} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun.", Retweets: 3} - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - src := NewSearchSource(). - Query(NewTermQuery("user", "olivere")). - FetchSourceContext(NewFetchSourceContext(true).Include("retweets")) - cursor, err := client.Scan(testIndexName).SearchSource(src).Size(1).Do() - if err != nil { - t.Fatal(err) - } - - if cursor.Results == nil { - t.Fatalf("expected results != nil; got nil") - } - if cursor.Results.Hits == nil { - t.Fatalf("expected results.Hits != nil; got nil") - } - if cursor.Results.Hits.TotalHits != 2 { - t.Fatalf("expected results.Hits.TotalHits = %d; got %d", 2, cursor.Results.Hits.TotalHits) - } - - numDocs := 0 - pages := 0 - - for { - searchResult, err := cursor.Next() - if err == EOS { - break - } - if err != nil { - t.Fatal(err) - } - - pages += 1 - - for _, hit := range searchResult.Hits.Hits { - if hit.Index != testIndexName { - t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) - } - item := make(map[string]interface{}) - err := json.Unmarshal(*hit.Source, &item) - if err != nil { - t.Fatal(err) - } - if _, found := item["message"]; found { - t.Fatalf("expected to not see field %q; got: %#v", "message", item) - } - numDocs += 1 - } - } - - if pages != 3 { - t.Errorf("expected to retrieve %d pages; got %d", 2, pages) - } - if numDocs != 2 { - t.Errorf("expected to retrieve %d hits; got %d", 2, numDocs) - } -} - -func TestScanWithQuery(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Return tweets from olivere only - termQuery := NewTermQuery("user", "olivere") - cursor, err := client.Scan(testIndexName). - Size(1). - Query(termQuery). - Do() - if err != nil { - t.Fatal(err) - } - - if cursor.Results == nil { - t.Errorf("expected results != nil; got nil") - } - if cursor.Results.Hits == nil { - t.Errorf("expected results.Hits != nil; got nil") - } - if cursor.Results.Hits.TotalHits != 2 { - t.Errorf("expected results.Hits.TotalHits = %d; got %d", 2, cursor.Results.Hits.TotalHits) - } - if len(cursor.Results.Hits.Hits) != 0 { - t.Errorf("expected len(results.Hits.Hits) = %d; got %d", 0, len(cursor.Results.Hits.Hits)) - } - - pages := 0 - numDocs := 0 - - for { - searchResult, err := cursor.Next() - if err == EOS { - break - } - if err != nil { - t.Fatal(err) - } - - pages += 1 - - for _, hit := range searchResult.Hits.Hits { - if hit.Index != testIndexName { - t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) - } - item := make(map[string]interface{}) - err := json.Unmarshal(*hit.Source, &item) - if err != nil { - t.Fatal(err) - } - numDocs += 1 - } - } - - if pages <= 0 { - t.Errorf("expected to retrieve at least 1 page; got %d", pages) - } - - if numDocs != 2 { - t.Errorf("expected to retrieve %d hits; got %d", 2, numDocs) - } -} - -func TestScanAndScrollWithMissingIndex(t *testing.T) { - client := setupTestClient(t) // does not create testIndexName - - cursor, err := client.Scan(testIndexName).Scroll("30s").Do() - if err == nil { - t.Fatalf("expected error != nil; got: %v", err) - } - if cursor != nil { - t.Fatalf("expected cursor == nil; got: %v", cursor) - } -} - -func TestScanAndScrollWithEmptyIndex(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - if isTravis() { - t.Skip("test on Travis failes regularly with " + - "Error 503 (Service Unavailable): SearchPhaseExecutionException[Failed to execute phase [init_scan], all shards failed]") - } - - _, err := client.Flush().Index(testIndexName).WaitIfOngoing(true).Do() - if err != nil { - t.Fatal(err) - } - - cursor, err := client.Scan(testIndexName).Scroll("30s").Do() - if err != nil { - t.Fatal(err) - } - if cursor == nil { - t.Fatalf("expected cursor; got: %v", cursor) - } - - // First request returns no error, but no hits - res, err := cursor.Next() - if err != nil { - t.Fatal(err) - } - if res == nil { - t.Fatalf("expected results != nil; got: nil") - } - if res.ScrollId == "" { - t.Errorf("expected scrollId in results; got: %q", res.ScrollId) - } - if res.TotalHits() != 0 { - t.Errorf("expected TotalHits() = %d; got %d", 0, res.TotalHits()) - } - if res.Hits == nil { - t.Errorf("expected results.Hits != nil; got: nil") - } - if res.Hits.TotalHits != 0 { - t.Errorf("expected results.Hits.TotalHits = %d; got %d", 0, res.Hits.TotalHits) - } - if res.Hits.Hits == nil { - t.Errorf("expected results.Hits.Hits != nil; got: %v", res.Hits.Hits) - } - if len(res.Hits.Hits) != 0 { - t.Errorf("expected len(results.Hits.Hits) == %d; got: %d", 0, len(res.Hits.Hits)) - } - - // Subsequent requests return EOS - res, err = cursor.Next() - if err != EOS { - t.Fatal(err) - } - if res != nil { - t.Fatalf("expected results == %v; got: %v", nil, res) - } - - res, err = cursor.Next() - if err != EOS { - t.Fatal(err) - } - if res != nil { - t.Fatalf("expected results == %v; got: %v", nil, res) - } -} - -func TestScanIssue119(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - comment1 := comment{User: "nico", Comment: "You bet."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Index().Index(testIndexName).Type("comment").Id("1").Parent("1").BodyJson(&comment1).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Match all should return all documents - cursor, err := client.Scan(testIndexName).Fields("_source", "_parent").Size(1).Do() - if err != nil { - t.Fatal(err) - } - - for { - searchResult, err := cursor.Next() - if err == EOS { - break - } - if err != nil { - t.Fatal(err) - } - - for _, hit := range searchResult.Hits.Hits { - if hit.Type == "tweet" { - if _, ok := hit.Fields["_parent"].(string); ok { - t.Errorf("Type `tweet` cannot have any parent...") - - toPrint, _ := json.MarshalIndent(hit, "", " ") - t.Fatal(string(toPrint)) - } - } - - item := make(map[string]interface{}) - err := json.Unmarshal(*hit.Source, &item) - if err != nil { - t.Fatal(err) - } - } - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/script.go b/services/templeton/vendor/src/github.com/olivere/elastic/script.go deleted file mode 100644 index a5c9e45e2..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/script.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "errors" - -// Script holds all the paramaters necessary to compile or find in cache -// and then execute a script. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html -// for details of scripting. -type Script struct { - script string - typ string - lang string - params map[string]interface{} -} - -// NewScript creates and initializes a new Script. -func NewScript(script string) *Script { - return &Script{ - script: script, - typ: "", // default type is "inline" - params: make(map[string]interface{}), - } -} - -// NewScriptInline creates and initializes a new Script of type "inline". -func NewScriptInline(script string) *Script { - return NewScript(script).Type("inline") -} - -// NewScriptId creates and initializes a new Script of type "id". -func NewScriptId(script string) *Script { - return NewScript(script).Type("id") -} - -// NewScriptFile creates and initializes a new Script of type "file". -func NewScriptFile(script string) *Script { - return NewScript(script).Type("file") -} - -// Script is either the cache key of the script to be compiled/executed -// or the actual script source code for inline scripts. For indexed -// scripts this is the id used in the request. For file scripts this is -// the file name. -func (s *Script) Script(script string) *Script { - s.script = script - return s -} - -// Type sets the type of script: "inline", "id", or "file". -func (s *Script) Type(typ string) *Script { - s.typ = typ - return s -} - -// Lang sets the language of the script. Permitted values are "groovy", -// "expression", "mustache", "mvel" (default), "javascript", "python". -// To use certain languages, you need to configure your server and/or -// add plugins. See https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html -// for details. -func (s *Script) Lang(lang string) *Script { - s.lang = lang - return s -} - -// Param adds a key/value pair to the parameters that this script will be executed with. -func (s *Script) Param(name string, value interface{}) *Script { - if s.params == nil { - s.params = make(map[string]interface{}) - } - s.params[name] = value - return s -} - -// Params sets the map of parameters this script will be executed with. -func (s *Script) Params(params map[string]interface{}) *Script { - s.params = params - return s -} - -// Source returns the JSON serializable data for this Script. -func (s *Script) Source() (interface{}, error) { - if s.typ == "" && s.lang == "" && len(s.params) == 0 { - return s.script, nil - } - source := make(map[string]interface{}) - if s.typ == "" { - source["inline"] = s.script - } else { - source[s.typ] = s.script - } - if s.lang != "" { - source["lang"] = s.lang - } - if len(s.params) > 0 { - source["params"] = s.params - } - return source, nil -} - -// -- Script Field -- - -// ScriptField is a single script field. -type ScriptField struct { - FieldName string // name of the field - - script *Script -} - -// NewScriptField creates and initializes a new ScriptField. -func NewScriptField(fieldName string, script *Script) *ScriptField { - return &ScriptField{FieldName: fieldName, script: script} -} - -// Source returns the serializable JSON for the ScriptField. -func (f *ScriptField) Source() (interface{}, error) { - if f.script == nil { - return nil, errors.New("ScriptField expects script") - } - source := make(map[string]interface{}) - src, err := f.script.Source() - if err != nil { - return nil, err - } - source["script"] = src - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/script_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/script_test.go deleted file mode 100644 index 552d92a02..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/script_test.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestScriptingDefault(t *testing.T) { - builder := NewScript("doc['field'].value * 2") - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `"doc['field'].value * 2"` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestScriptingInline(t *testing.T) { - builder := NewScriptInline("doc['field'].value * factor").Param("factor", 2.0) - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"inline":"doc['field'].value * factor","params":{"factor":2}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestScriptingId(t *testing.T) { - builder := NewScriptId("script-with-id").Param("factor", 2.0) - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"id":"script-with-id","params":{"factor":2}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestScriptingFile(t *testing.T) { - builder := NewScriptFile("script-file").Param("factor", 2.0).Lang("groovy") - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"file":"script-file","lang":"groovy","params":{"factor":2}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/scroll.go b/services/templeton/vendor/src/github.com/olivere/elastic/scroll.go deleted file mode 100644 index 1cab35c36..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/scroll.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// ScrollService manages a cursor through documents in Elasticsearch. -type ScrollService struct { - client *Client - indices []string - types []string - keepAlive string - query Query - size *int - pretty bool - scrollId string -} - -func NewScrollService(client *Client) *ScrollService { - builder := &ScrollService{ - client: client, - query: NewMatchAllQuery(), - } - return builder -} - -func (s *ScrollService) Index(indices ...string) *ScrollService { - if s.indices == nil { - s.indices = make([]string, 0) - } - s.indices = append(s.indices, indices...) - return s -} - -func (s *ScrollService) Type(types ...string) *ScrollService { - if s.types == nil { - s.types = make([]string, 0) - } - s.types = append(s.types, types...) - return s -} - -// Scroll is an alias for KeepAlive, the time to keep -// the cursor alive (e.g. "5m" for 5 minutes). -func (s *ScrollService) Scroll(keepAlive string) *ScrollService { - s.keepAlive = keepAlive - return s -} - -// KeepAlive sets the maximum time the cursor will be -// available before expiration (e.g. "5m" for 5 minutes). -func (s *ScrollService) KeepAlive(keepAlive string) *ScrollService { - s.keepAlive = keepAlive - return s -} - -func (s *ScrollService) Query(query Query) *ScrollService { - s.query = query - return s -} - -func (s *ScrollService) Pretty(pretty bool) *ScrollService { - s.pretty = pretty - return s -} - -func (s *ScrollService) Size(size int) *ScrollService { - s.size = &size - return s -} - -func (s *ScrollService) ScrollId(scrollId string) *ScrollService { - s.scrollId = scrollId - return s -} - -func (s *ScrollService) Do() (*SearchResult, error) { - if s.scrollId == "" { - return s.GetFirstPage() - } - return s.GetNextPage() -} - -func (s *ScrollService) GetFirstPage() (*SearchResult, error) { - // Build url - path := "/" - - // Indices part - indexPart := make([]string, 0) - for _, index := range s.indices { - index, err := uritemplates.Expand("{index}", map[string]string{ - "index": index, - }) - if err != nil { - return nil, err - } - indexPart = append(indexPart, index) - } - if len(indexPart) > 0 { - path += strings.Join(indexPart, ",") - } - - // Types - typesPart := make([]string, 0) - for _, typ := range s.types { - typ, err := uritemplates.Expand("{type}", map[string]string{ - "type": typ, - }) - if err != nil { - return nil, err - } - typesPart = append(typesPart, typ) - } - if len(typesPart) > 0 { - path += "/" + strings.Join(typesPart, ",") - } - - // Search - path += "/_search" - - // Parameters - params := make(url.Values) - // TODO: ES 2.1 deprecates search_type=scan. See https://www.elastic.co/guide/en/elasticsearch/reference/current/breaking_21_search_changes.html#_literal_search_type_scan_literal_deprecated. - params.Set("search_type", "scan") - if s.pretty { - params.Set("pretty", fmt.Sprintf("%v", s.pretty)) - } - if s.keepAlive != "" { - params.Set("scroll", s.keepAlive) - } else { - params.Set("scroll", defaultKeepAlive) - } - if s.size != nil && *s.size > 0 { - params.Set("size", fmt.Sprintf("%d", *s.size)) - } - - // Set body - body := make(map[string]interface{}) - if s.query != nil { - src, err := s.query.Source() - if err != nil { - return nil, err - } - body["query"] = src - } - - // Get response - res, err := s.client.PerformRequest("POST", path, params, body) - if err != nil { - return nil, err - } - - // Return result - searchResult := new(SearchResult) - if err := json.Unmarshal(res.Body, searchResult); err != nil { - return nil, err - } - - return searchResult, nil -} - -func (s *ScrollService) GetNextPage() (*SearchResult, error) { - if s.scrollId == "" { - return nil, EOS - } - - // Build url - path := "/_search/scroll" - - // Parameters - params := make(url.Values) - if s.pretty { - params.Set("pretty", fmt.Sprintf("%v", s.pretty)) - } - if s.keepAlive != "" { - params.Set("scroll", s.keepAlive) - } else { - params.Set("scroll", defaultKeepAlive) - } - - // Get response - res, err := s.client.PerformRequest("POST", path, params, s.scrollId) - if err != nil { - return nil, err - } - - // Return result - searchResult := new(SearchResult) - if err := json.Unmarshal(res.Body, searchResult); err != nil { - return nil, err - } - - // Determine last page - if searchResult == nil || searchResult.Hits == nil || len(searchResult.Hits.Hits) == 0 || searchResult.Hits.TotalHits == 0 { - return nil, EOS - } - - return searchResult, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/scroll_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/scroll_test.go deleted file mode 100644 index 4a5c48111..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/scroll_test.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - _ "net/http" - "testing" -) - -func TestScroll(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Match all should return all documents - res, err := client.Scroll(testIndexName).Size(1).Do() - if err != nil { - t.Fatal(err) - } - - if res == nil { - t.Errorf("expected results != nil; got nil") - } - if res.Hits == nil { - t.Errorf("expected results.Hits != nil; got nil") - } - if res.Hits.TotalHits != 3 { - t.Errorf("expected results.Hits.TotalHits = %d; got %d", 3, res.Hits.TotalHits) - } - if len(res.Hits.Hits) != 0 { - t.Errorf("expected len(results.Hits.Hits) = %d; got %d", 0, len(res.Hits.Hits)) - } - if res.ScrollId == "" { - t.Errorf("expected scrollId in results; got %q", res.ScrollId) - } - - pages := 0 - numDocs := 0 - scrollId := res.ScrollId - - for { - searchResult, err := client.Scroll(testIndexName). - Size(1). - ScrollId(scrollId). - Do() - if err == EOS { - break - } - if err != nil { - t.Fatal(err) - } - - pages += 1 - - for _, hit := range searchResult.Hits.Hits { - if hit.Index != testIndexName { - t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) - } - item := make(map[string]interface{}) - err := json.Unmarshal(*hit.Source, &item) - if err != nil { - t.Fatal(err) - } - numDocs += 1 - } - - scrollId = searchResult.ScrollId - if scrollId == "" { - t.Errorf("expeced scrollId in results; got %q", scrollId) - } - } - - if pages <= 0 { - t.Errorf("expected to retrieve at least 1 page; got %d", pages) - } - - if numDocs != 3 { - t.Errorf("expected to retrieve %d hits; got %d", 3, numDocs) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search.go b/services/templeton/vendor/src/github.com/olivere/elastic/search.go deleted file mode 100644 index 4811ee1ed..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search.go +++ /dev/null @@ -1,429 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "reflect" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// Search for documents in Elasticsearch. -type SearchService struct { - client *Client - searchSource *SearchSource - source interface{} - pretty bool - searchType string - indices []string - routing string - preference string - types []string -} - -// NewSearchService creates a new service for searching in Elasticsearch. -func NewSearchService(client *Client) *SearchService { - builder := &SearchService{ - client: client, - searchSource: NewSearchSource(), - } - return builder -} - -// SearchSource sets the search source builder to use with this service. -func (s *SearchService) SearchSource(searchSource *SearchSource) *SearchService { - s.searchSource = searchSource - if s.searchSource == nil { - s.searchSource = NewSearchSource() - } - return s -} - -// Source allows the user to set the request body manually without using -// any of the structs and interfaces in Elastic. -func (s *SearchService) Source(source interface{}) *SearchService { - s.source = source - return s -} - -// Index sets the names of the indices to use for search. -func (s *SearchService) Index(indices ...string) *SearchService { - if s.indices == nil { - s.indices = make([]string, 0) - } - s.indices = append(s.indices, indices...) - return s -} - -// Type allows to restrict the search to a list of types. -func (s *SearchService) Type(types ...string) *SearchService { - if s.types == nil { - s.types = make([]string, 0) - } - s.types = append(s.types, types...) - return s -} - -// Pretty enables the caller to indent the JSON output. -func (s *SearchService) Pretty(pretty bool) *SearchService { - s.pretty = pretty - return s -} - -// Timeout sets the timeout to use, e.g. "1s" or "1000ms". -func (s *SearchService) Timeout(timeout string) *SearchService { - s.searchSource = s.searchSource.Timeout(timeout) - return s -} - -// TimeoutInMillis sets the timeout in milliseconds. -func (s *SearchService) TimeoutInMillis(timeoutInMillis int) *SearchService { - s.searchSource = s.searchSource.TimeoutInMillis(timeoutInMillis) - return s -} - -// SearchType sets the search operation type. Valid values are: -// "query_then_fetch", "query_and_fetch", "dfs_query_then_fetch", -// "dfs_query_and_fetch", "count", "scan". -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-search-type.html -// for details. -func (s *SearchService) SearchType(searchType string) *SearchService { - s.searchType = searchType - return s -} - -// Routing is a list of specific routing values to control the shards -// the search will be executed on. -func (s *SearchService) Routing(routings ...string) *SearchService { - s.routing = strings.Join(routings, ",") - return s -} - -// Preference sets the preference to execute the search. Defaults to -// randomize across shards. Can be set to "_local" to prefer local shards, -// "_primary" to execute on primary shards only, or a custom value which -// guarantees that the same order will be used across different requests. -func (s *SearchService) Preference(preference string) *SearchService { - s.preference = preference - return s -} - -// Query sets the query to perform, e.g. MatchAllQuery. -func (s *SearchService) Query(query Query) *SearchService { - s.searchSource = s.searchSource.Query(query) - return s -} - -// PostFilter will be executed after the query has been executed and -// only affects the search hits, not the aggregations. -// This filter is always executed as the last filtering mechanism. -func (s *SearchService) PostFilter(postFilter Query) *SearchService { - s.searchSource = s.searchSource.PostFilter(postFilter) - return s -} - -// FetchSource indicates whether the response should contain the stored -// _source for every hit. -func (s *SearchService) FetchSource(fetchSource bool) *SearchService { - s.searchSource = s.searchSource.FetchSource(fetchSource) - return s -} - -// FetchSourceContext indicates how the _source should be fetched. -func (s *SearchService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *SearchService { - s.searchSource = s.searchSource.FetchSourceContext(fetchSourceContext) - return s -} - -// Highlight adds highlighting to the search. -func (s *SearchService) Highlight(highlight *Highlight) *SearchService { - s.searchSource = s.searchSource.Highlight(highlight) - return s -} - -// GlobalSuggestText defines the global text to use with all suggesters. -// This avoids repetition. -func (s *SearchService) GlobalSuggestText(globalText string) *SearchService { - s.searchSource = s.searchSource.GlobalSuggestText(globalText) - return s -} - -// Suggester adds a suggester to the search. -func (s *SearchService) Suggester(suggester Suggester) *SearchService { - s.searchSource = s.searchSource.Suggester(suggester) - return s -} - -// Aggregation adds an aggreation to perform as part of the search. -func (s *SearchService) Aggregation(name string, aggregation Aggregation) *SearchService { - s.searchSource = s.searchSource.Aggregation(name, aggregation) - return s -} - -// MinScore sets the minimum score below which docs will be filtered out. -func (s *SearchService) MinScore(minScore float64) *SearchService { - s.searchSource = s.searchSource.MinScore(minScore) - return s -} - -// From index to start the search from. Defaults to 0. -func (s *SearchService) From(from int) *SearchService { - s.searchSource = s.searchSource.From(from) - return s -} - -// Size is the number of search hits to return. Defaults to 10. -func (s *SearchService) Size(size int) *SearchService { - s.searchSource = s.searchSource.Size(size) - return s -} - -// Explain indicates whether each search hit should be returned with -// an explanation of the hit (ranking). -func (s *SearchService) Explain(explain bool) *SearchService { - s.searchSource = s.searchSource.Explain(explain) - return s -} - -// Version indicates whether each search hit should be returned with -// a version associated to it. -func (s *SearchService) Version(version bool) *SearchService { - s.searchSource = s.searchSource.Version(version) - return s -} - -// Sort adds a sort order. -func (s *SearchService) Sort(field string, ascending bool) *SearchService { - s.searchSource = s.searchSource.Sort(field, ascending) - return s -} - -// SortWithInfo adds a sort order. -func (s *SearchService) SortWithInfo(info SortInfo) *SearchService { - s.searchSource = s.searchSource.SortWithInfo(info) - return s -} - -// SortBy adds a sort order. -func (s *SearchService) SortBy(sorter ...Sorter) *SearchService { - s.searchSource = s.searchSource.SortBy(sorter...) - return s -} - -// NoFields indicates that no fields should be loaded, resulting in only -// id and type to be returned per field. -func (s *SearchService) NoFields() *SearchService { - s.searchSource = s.searchSource.NoFields() - return s -} - -// Field adds a single field to load and return (note, must be stored) as -// part of the search request. If none are specified, the source of the -// document will be returned. -func (s *SearchService) Field(fieldName string) *SearchService { - s.searchSource = s.searchSource.Field(fieldName) - return s -} - -// Fields sets the fields to load and return as part of the search request. -// If none are specified, the source of the document will be returned. -func (s *SearchService) Fields(fields ...string) *SearchService { - s.searchSource = s.searchSource.Fields(fields...) - return s -} - -// Do executes the search and returns a SearchResult. -func (s *SearchService) Do() (*SearchResult, error) { - // Build url - path := "/" - - // Indices part - indexPart := make([]string, 0) - for _, index := range s.indices { - index, err := uritemplates.Expand("{index}", map[string]string{ - "index": index, - }) - if err != nil { - return nil, err - } - indexPart = append(indexPart, index) - } - path += strings.Join(indexPart, ",") - - // Types part - if len(s.types) > 0 { - typesPart := make([]string, 0) - for _, typ := range s.types { - typ, err := uritemplates.Expand("{type}", map[string]string{ - "type": typ, - }) - if err != nil { - return nil, err - } - typesPart = append(typesPart, typ) - } - path += "/" - path += strings.Join(typesPart, ",") - } - - // Search - path += "/_search" - - // Parameters - params := make(url.Values) - if s.pretty { - params.Set("pretty", fmt.Sprintf("%v", s.pretty)) - } - if s.searchType != "" { - params.Set("search_type", s.searchType) - } - if s.routing != "" { - params.Set("routing", s.routing) - } - - // Perform request - var body interface{} - if s.source != nil { - body = s.source - } else { - src, err := s.searchSource.Source() - if err != nil { - return nil, err - } - body = src - } - res, err := s.client.PerformRequest("POST", path, params, body) - if err != nil { - return nil, err - } - - // Return search results - ret := new(SearchResult) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// SearchResult is the result of a search in Elasticsearch. -type SearchResult struct { - TookInMillis int64 `json:"took"` // search time in milliseconds - ScrollId string `json:"_scroll_id"` // only used with Scroll and Scan operations - Hits *SearchHits `json:"hits"` // the actual search hits - Suggest SearchSuggest `json:"suggest"` // results from suggesters - Aggregations Aggregations `json:"aggregations"` // results from aggregations - TimedOut bool `json:"timed_out"` // true if the search timed out - //Error string `json:"error,omitempty"` // used in MultiSearch only - // TODO double-check that MultiGet now returns details error information - Error *ErrorDetails `json:"error,omitempty"` // only used in MultiGet -} - -// TotalHits is a convenience function to return the number of hits for -// a search result. -func (r *SearchResult) TotalHits() int64 { - if r.Hits != nil { - return r.Hits.TotalHits - } - return 0 -} - -// Each is a utility function to iterate over all hits. It saves you from -// checking for nil values. Notice that Each will ignore errors in -// serializing JSON. -func (r *SearchResult) Each(typ reflect.Type) []interface{} { - if r.Hits == nil || r.Hits.Hits == nil || len(r.Hits.Hits) == 0 { - return nil - } - slice := make([]interface{}, 0) - for _, hit := range r.Hits.Hits { - v := reflect.New(typ).Elem() - if err := json.Unmarshal(*hit.Source, v.Addr().Interface()); err == nil { - slice = append(slice, v.Interface()) - } - } - return slice -} - -// SearchHits specifies the list of search hits. -type SearchHits struct { - TotalHits int64 `json:"total"` // total number of hits found - MaxScore *float64 `json:"max_score"` // maximum score of all hits - Hits []*SearchHit `json:"hits"` // the actual hits returned -} - -// SearchHit is a single hit. -type SearchHit struct { - Score *float64 `json:"_score"` // computed score - Index string `json:"_index"` // index name - Type string `json:"_type"` // type meta field - Id string `json:"_id"` // external or internal - Uid string `json:"_uid"` // uid meta field (see MapperService.java for all meta fields) - Timestamp int64 `json:"_timestamp"` // timestamp meta field - TTL int64 `json:"_ttl"` // ttl meta field - Routing string `json:"_routing"` // routing meta field - Parent string `json:"_parent"` // parent meta field - Version *int64 `json:"_version"` // version number, when Version is set to true in SearchService - Sort []interface{} `json:"sort"` // sort information - Highlight SearchHitHighlight `json:"highlight"` // highlighter information - Source *json.RawMessage `json:"_source"` // stored document source - Fields map[string]interface{} `json:"fields"` // returned fields - Explanation *SearchExplanation `json:"_explanation"` // explains how the score was computed - MatchedQueries []string `json:"matched_queries"` // matched queries - InnerHits map[string]*SearchHitInnerHits `json:"inner_hits"` // inner hits with ES >= 1.5.0 - - // Shard - // HighlightFields - // SortValues - // MatchedFilters -} - -type SearchHitInnerHits struct { - Hits *SearchHits `json:"hits"` -} - -// SearchExplanation explains how the score for a hit was computed. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-explain.html. -type SearchExplanation struct { - Value float64 `json:"value"` // e.g. 1.0 - Description string `json:"description"` // e.g. "boost" or "ConstantScore(*:*), product of:" - Details []SearchExplanation `json:"details,omitempty"` // recursive details -} - -// Suggest - -// SearchSuggest is a map of suggestions. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html. -type SearchSuggest map[string][]SearchSuggestion - -// SearchSuggestion is a single search suggestion. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html. -type SearchSuggestion struct { - Text string `json:"text"` - Offset int `json:"offset"` - Length int `json:"length"` - Options []SearchSuggestionOption `json:"options"` -} - -// SearchSuggestionOption is an option of a SearchSuggestion. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html. -type SearchSuggestionOption struct { - Text string `json:"text"` - Score float64 `json:"score"` - Freq int `json:"freq"` - Payload interface{} `json:"payload"` -} - -// Aggregations (see search_aggs.go) - -// Highlighting - -// SearchHitHighlight is the highlight information of a search hit. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-highlighting.html -// for a general discussion of highlighting. -type SearchHitHighlight map[string][]string diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs.go deleted file mode 100644 index 8e13a539a..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs.go +++ /dev/null @@ -1,1270 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "bytes" - "encoding/json" -) - -// Aggregations can be seen as a unit-of-work that build -// analytic information over a set of documents. It is -// (in many senses) the follow-up of facets in Elasticsearch. -// For more details about aggregations, visit: -// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations.html -type Aggregation interface { - // Source returns a JSON-serializable aggregation that is a fragment - // of the request sent to Elasticsearch. - Source() (interface{}, error) -} - -// Aggregations is a list of aggregations that are part of a search result. -type Aggregations map[string]*json.RawMessage - -// Min returns min aggregation results. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-min-aggregation.html -func (a Aggregations) Min(name string) (*AggregationValueMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationValueMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Max returns max aggregation results. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-max-aggregation.html -func (a Aggregations) Max(name string) (*AggregationValueMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationValueMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Sum returns sum aggregation results. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-sum-aggregation.html -func (a Aggregations) Sum(name string) (*AggregationValueMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationValueMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Avg returns average aggregation results. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-avg-aggregation.html -func (a Aggregations) Avg(name string) (*AggregationValueMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationValueMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// ValueCount returns value-count aggregation results. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-valuecount-aggregation.html -func (a Aggregations) ValueCount(name string) (*AggregationValueMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationValueMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Cardinality returns cardinality aggregation results. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html -func (a Aggregations) Cardinality(name string) (*AggregationValueMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationValueMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Stats returns stats aggregation results. -// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-stats-aggregation.html -func (a Aggregations) Stats(name string) (*AggregationStatsMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationStatsMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// ExtendedStats returns extended stats aggregation results. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-extendedstats-aggregation.html -func (a Aggregations) ExtendedStats(name string) (*AggregationExtendedStatsMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationExtendedStatsMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Percentiles returns percentiles results. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-aggregation.html -func (a Aggregations) Percentiles(name string) (*AggregationPercentilesMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationPercentilesMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// PercentileRanks returns percentile ranks results. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-rank-aggregation.html -func (a Aggregations) PercentileRanks(name string) (*AggregationPercentilesMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationPercentilesMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// TopHits returns top-hits aggregation results. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-top-hits-aggregation.html -func (a Aggregations) TopHits(name string) (*AggregationTopHitsMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationTopHitsMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Global returns global results. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-global-aggregation.html -func (a Aggregations) Global(name string) (*AggregationSingleBucket, bool) { - if raw, found := a[name]; found { - agg := new(AggregationSingleBucket) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Filter returns filter results. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filter-aggregation.html -func (a Aggregations) Filter(name string) (*AggregationSingleBucket, bool) { - if raw, found := a[name]; found { - agg := new(AggregationSingleBucket) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Filters returns filters results. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filters-aggregation.html -func (a Aggregations) Filters(name string) (*AggregationBucketFilters, bool) { - if raw, found := a[name]; found { - agg := new(AggregationBucketFilters) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Missing returns missing results. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-missing-aggregation.html -func (a Aggregations) Missing(name string) (*AggregationSingleBucket, bool) { - if raw, found := a[name]; found { - agg := new(AggregationSingleBucket) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Nested returns nested results. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-nested-aggregation.html -func (a Aggregations) Nested(name string) (*AggregationSingleBucket, bool) { - if raw, found := a[name]; found { - agg := new(AggregationSingleBucket) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// ReverseNested returns reverse-nested results. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-reverse-nested-aggregation.html -func (a Aggregations) ReverseNested(name string) (*AggregationSingleBucket, bool) { - if raw, found := a[name]; found { - agg := new(AggregationSingleBucket) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Children returns children results. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-children-aggregation.html -func (a Aggregations) Children(name string) (*AggregationSingleBucket, bool) { - if raw, found := a[name]; found { - agg := new(AggregationSingleBucket) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Terms returns terms aggregation results. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html -func (a Aggregations) Terms(name string) (*AggregationBucketKeyItems, bool) { - if raw, found := a[name]; found { - agg := new(AggregationBucketKeyItems) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// SignificantTerms returns significant terms aggregation results. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html -func (a Aggregations) SignificantTerms(name string) (*AggregationBucketSignificantTerms, bool) { - if raw, found := a[name]; found { - agg := new(AggregationBucketSignificantTerms) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Sampler returns sampler aggregation results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-sampler-aggregation.html -func (a Aggregations) Sampler(name string) (*AggregationSingleBucket, bool) { - if raw, found := a[name]; found { - agg := new(AggregationSingleBucket) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Range returns range aggregation results. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-range-aggregation.html -func (a Aggregations) Range(name string) (*AggregationBucketRangeItems, bool) { - if raw, found := a[name]; found { - agg := new(AggregationBucketRangeItems) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// KeyedRange returns keyed range aggregation results. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-range-aggregation.html. -func (a Aggregations) KeyedRange(name string) (*AggregationBucketKeyedRangeItems, bool) { - if raw, found := a[name]; found { - agg := new(AggregationBucketKeyedRangeItems) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// DateRange returns date range aggregation results. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-daterange-aggregation.html -func (a Aggregations) DateRange(name string) (*AggregationBucketRangeItems, bool) { - if raw, found := a[name]; found { - agg := new(AggregationBucketRangeItems) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// IPv4Range returns IPv4 range aggregation results. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-iprange-aggregation.html -func (a Aggregations) IPv4Range(name string) (*AggregationBucketRangeItems, bool) { - if raw, found := a[name]; found { - agg := new(AggregationBucketRangeItems) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Histogram returns histogram aggregation results. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-histogram-aggregation.html -func (a Aggregations) Histogram(name string) (*AggregationBucketHistogramItems, bool) { - if raw, found := a[name]; found { - agg := new(AggregationBucketHistogramItems) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// DateHistogram returns date histogram aggregation results. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html -func (a Aggregations) DateHistogram(name string) (*AggregationBucketHistogramItems, bool) { - if raw, found := a[name]; found { - agg := new(AggregationBucketHistogramItems) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// GeoBounds returns geo-bounds aggregation results. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-geobounds-aggregation.html -func (a Aggregations) GeoBounds(name string) (*AggregationGeoBoundsMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationGeoBoundsMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// GeoHash returns geo-hash aggregation results. -// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geohashgrid-aggregation.html -func (a Aggregations) GeoHash(name string) (*AggregationBucketKeyItems, bool) { - if raw, found := a[name]; found { - agg := new(AggregationBucketKeyItems) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// GeoDistance returns geo distance aggregation results. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geodistance-aggregation.html -func (a Aggregations) GeoDistance(name string) (*AggregationBucketRangeItems, bool) { - if raw, found := a[name]; found { - agg := new(AggregationBucketRangeItems) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// AvgBucket returns average bucket pipeline aggregation results. -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-avg-bucket-aggregation.html -func (a Aggregations) AvgBucket(name string) (*AggregationPipelineSimpleValue, bool) { - if raw, found := a[name]; found { - agg := new(AggregationPipelineSimpleValue) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// SumBucket returns sum bucket pipeline aggregation results. -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-sum-bucket-aggregation.html -func (a Aggregations) SumBucket(name string) (*AggregationPipelineSimpleValue, bool) { - if raw, found := a[name]; found { - agg := new(AggregationPipelineSimpleValue) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// MaxBucket returns maximum bucket pipeline aggregation results. -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-max-bucket-aggregation.html -func (a Aggregations) MaxBucket(name string) (*AggregationPipelineBucketMetricValue, bool) { - if raw, found := a[name]; found { - agg := new(AggregationPipelineBucketMetricValue) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// MinBucket returns minimum bucket pipeline aggregation results. -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-min-bucket-aggregation.html -func (a Aggregations) MinBucket(name string) (*AggregationPipelineBucketMetricValue, bool) { - if raw, found := a[name]; found { - agg := new(AggregationPipelineBucketMetricValue) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// MovAvg returns moving average pipeline aggregation results. -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html -func (a Aggregations) MovAvg(name string) (*AggregationPipelineSimpleValue, bool) { - if raw, found := a[name]; found { - agg := new(AggregationPipelineSimpleValue) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Derivative returns derivative pipeline aggregation results. -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-derivative-aggregation.html -func (a Aggregations) Derivative(name string) (*AggregationPipelineDerivative, bool) { - if raw, found := a[name]; found { - agg := new(AggregationPipelineDerivative) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// CumulativeSum returns a cumulative sum pipeline aggregation results. -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-cumulative-sum-aggregation.html -func (a Aggregations) CumulativeSum(name string) (*AggregationPipelineSimpleValue, bool) { - if raw, found := a[name]; found { - agg := new(AggregationPipelineSimpleValue) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// BucketScript returns bucket script pipeline aggregation results. -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-bucket-script-aggregation.html -func (a Aggregations) BucketScript(name string) (*AggregationPipelineSimpleValue, bool) { - if raw, found := a[name]; found { - agg := new(AggregationPipelineSimpleValue) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// SerialDiff returns serial differencing pipeline aggregation results. -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-serialdiff-aggregation.html -func (a Aggregations) SerialDiff(name string) (*AggregationPipelineSimpleValue, bool) { - if raw, found := a[name]; found { - agg := new(AggregationPipelineSimpleValue) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(*raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// -- Single value metric -- - -// AggregationValueMetric is a single-value metric, returned e.g. by a -// Min or Max aggregation. -type AggregationValueMetric struct { - Aggregations - - Value *float64 //`json:"value"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationValueMetric structure. -func (a *AggregationValueMetric) UnmarshalJSON(data []byte) error { - var aggs map[string]*json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["value"]; ok && v != nil { - json.Unmarshal(*v, &a.Value) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(*v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// -- Stats metric -- - -// AggregationStatsMetric is a multi-value metric, returned by a Stats aggregation. -type AggregationStatsMetric struct { - Aggregations - - Count int64 // `json:"count"` - Min *float64 //`json:"min,omitempty"` - Max *float64 //`json:"max,omitempty"` - Avg *float64 //`json:"avg,omitempty"` - Sum *float64 //`json:"sum,omitempty"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationStatsMetric structure. -func (a *AggregationStatsMetric) UnmarshalJSON(data []byte) error { - var aggs map[string]*json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["count"]; ok && v != nil { - json.Unmarshal(*v, &a.Count) - } - if v, ok := aggs["min"]; ok && v != nil { - json.Unmarshal(*v, &a.Min) - } - if v, ok := aggs["max"]; ok && v != nil { - json.Unmarshal(*v, &a.Max) - } - if v, ok := aggs["avg"]; ok && v != nil { - json.Unmarshal(*v, &a.Avg) - } - if v, ok := aggs["sum"]; ok && v != nil { - json.Unmarshal(*v, &a.Sum) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(*v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// -- Extended stats metric -- - -// AggregationExtendedStatsMetric is a multi-value metric, returned by an ExtendedStats aggregation. -type AggregationExtendedStatsMetric struct { - Aggregations - - Count int64 // `json:"count"` - Min *float64 //`json:"min,omitempty"` - Max *float64 //`json:"max,omitempty"` - Avg *float64 //`json:"avg,omitempty"` - Sum *float64 //`json:"sum,omitempty"` - SumOfSquares *float64 //`json:"sum_of_squares,omitempty"` - Variance *float64 //`json:"variance,omitempty"` - StdDeviation *float64 //`json:"std_deviation,omitempty"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationExtendedStatsMetric structure. -func (a *AggregationExtendedStatsMetric) UnmarshalJSON(data []byte) error { - var aggs map[string]*json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["count"]; ok && v != nil { - json.Unmarshal(*v, &a.Count) - } - if v, ok := aggs["min"]; ok && v != nil { - json.Unmarshal(*v, &a.Min) - } - if v, ok := aggs["max"]; ok && v != nil { - json.Unmarshal(*v, &a.Max) - } - if v, ok := aggs["avg"]; ok && v != nil { - json.Unmarshal(*v, &a.Avg) - } - if v, ok := aggs["sum"]; ok && v != nil { - json.Unmarshal(*v, &a.Sum) - } - if v, ok := aggs["sum_of_squares"]; ok && v != nil { - json.Unmarshal(*v, &a.SumOfSquares) - } - if v, ok := aggs["variance"]; ok && v != nil { - json.Unmarshal(*v, &a.Variance) - } - if v, ok := aggs["std_deviation"]; ok && v != nil { - json.Unmarshal(*v, &a.StdDeviation) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(*v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// -- Percentiles metric -- - -// AggregationPercentilesMetric is a multi-value metric, returned by a Percentiles aggregation. -type AggregationPercentilesMetric struct { - Aggregations - - Values map[string]float64 // `json:"values"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationPercentilesMetric structure. -func (a *AggregationPercentilesMetric) UnmarshalJSON(data []byte) error { - var aggs map[string]*json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["values"]; ok && v != nil { - json.Unmarshal(*v, &a.Values) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(*v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// -- Top-hits metric -- - -// AggregationTopHitsMetric is a metric returned by a TopHits aggregation. -type AggregationTopHitsMetric struct { - Aggregations - - Hits *SearchHits //`json:"hits"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationTopHitsMetric structure. -func (a *AggregationTopHitsMetric) UnmarshalJSON(data []byte) error { - var aggs map[string]*json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - a.Aggregations = aggs - a.Hits = new(SearchHits) - if v, ok := aggs["hits"]; ok && v != nil { - json.Unmarshal(*v, &a.Hits) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(*v, &a.Meta) - } - return nil -} - -// -- Geo-bounds metric -- - -// AggregationGeoBoundsMetric is a metric as returned by a GeoBounds aggregation. -type AggregationGeoBoundsMetric struct { - Aggregations - - Bounds struct { - TopLeft struct { - Latitude float64 `json:"lat"` - Longitude float64 `json:"lon"` - } `json:"top_left"` - BottomRight struct { - Latitude float64 `json:"lat"` - Longitude float64 `json:"lon"` - } `json:"bottom_right"` - } `json:"bounds"` - - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationGeoBoundsMetric structure. -func (a *AggregationGeoBoundsMetric) UnmarshalJSON(data []byte) error { - var aggs map[string]*json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["bounds"]; ok && v != nil { - json.Unmarshal(*v, &a.Bounds) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(*v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// -- Single bucket -- - -// AggregationSingleBucket is a single bucket, returned e.g. via an aggregation of type Global. -type AggregationSingleBucket struct { - Aggregations - - DocCount int64 // `json:"doc_count"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationSingleBucket structure. -func (a *AggregationSingleBucket) UnmarshalJSON(data []byte) error { - var aggs map[string]*json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["doc_count"]; ok && v != nil { - json.Unmarshal(*v, &a.DocCount) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(*v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// -- Bucket range items -- - -// AggregationBucketRangeItems is a bucket aggregation that is e.g. returned -// with a range aggregation. -type AggregationBucketRangeItems struct { - Aggregations - - DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"` - SumOfOtherDocCount int64 //`json:"sum_other_doc_count"` - Buckets []*AggregationBucketRangeItem //`json:"buckets"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItems structure. -func (a *AggregationBucketRangeItems) UnmarshalJSON(data []byte) error { - var aggs map[string]*json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil { - json.Unmarshal(*v, &a.DocCountErrorUpperBound) - } - if v, ok := aggs["sum_other_doc_count"]; ok && v != nil { - json.Unmarshal(*v, &a.SumOfOtherDocCount) - } - if v, ok := aggs["buckets"]; ok && v != nil { - json.Unmarshal(*v, &a.Buckets) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(*v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// AggregationBucketKeyedRangeItems is a bucket aggregation that is e.g. returned -// with a keyed range aggregation. -type AggregationBucketKeyedRangeItems struct { - Aggregations - - DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"` - SumOfOtherDocCount int64 //`json:"sum_other_doc_count"` - Buckets map[string]*AggregationBucketRangeItem //`json:"buckets"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItems structure. -func (a *AggregationBucketKeyedRangeItems) UnmarshalJSON(data []byte) error { - var aggs map[string]*json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil { - json.Unmarshal(*v, &a.DocCountErrorUpperBound) - } - if v, ok := aggs["sum_other_doc_count"]; ok && v != nil { - json.Unmarshal(*v, &a.SumOfOtherDocCount) - } - if v, ok := aggs["buckets"]; ok && v != nil { - json.Unmarshal(*v, &a.Buckets) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(*v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// AggregationBucketRangeItem is a single bucket of an AggregationBucketRangeItems structure. -type AggregationBucketRangeItem struct { - Aggregations - - Key string //`json:"key"` - DocCount int64 //`json:"doc_count"` - From *float64 //`json:"from"` - FromAsString string //`json:"from_as_string"` - To *float64 //`json:"to"` - ToAsString string //`json:"to_as_string"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItem structure. -func (a *AggregationBucketRangeItem) UnmarshalJSON(data []byte) error { - var aggs map[string]*json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["key"]; ok && v != nil { - json.Unmarshal(*v, &a.Key) - } - if v, ok := aggs["doc_count"]; ok && v != nil { - json.Unmarshal(*v, &a.DocCount) - } - if v, ok := aggs["from"]; ok && v != nil { - json.Unmarshal(*v, &a.From) - } - if v, ok := aggs["from_as_string"]; ok && v != nil { - json.Unmarshal(*v, &a.FromAsString) - } - if v, ok := aggs["to"]; ok && v != nil { - json.Unmarshal(*v, &a.To) - } - if v, ok := aggs["to_as_string"]; ok && v != nil { - json.Unmarshal(*v, &a.ToAsString) - } - a.Aggregations = aggs - return nil -} - -// -- Bucket key items -- - -// AggregationBucketKeyItems is a bucket aggregation that is e.g. returned -// with a terms aggregation. -type AggregationBucketKeyItems struct { - Aggregations - - DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"` - SumOfOtherDocCount int64 //`json:"sum_other_doc_count"` - Buckets []*AggregationBucketKeyItem //`json:"buckets"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItems structure. -func (a *AggregationBucketKeyItems) UnmarshalJSON(data []byte) error { - var aggs map[string]*json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil { - json.Unmarshal(*v, &a.DocCountErrorUpperBound) - } - if v, ok := aggs["sum_other_doc_count"]; ok && v != nil { - json.Unmarshal(*v, &a.SumOfOtherDocCount) - } - if v, ok := aggs["buckets"]; ok && v != nil { - json.Unmarshal(*v, &a.Buckets) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(*v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// AggregationBucketKeyItem is a single bucket of an AggregationBucketKeyItems structure. -type AggregationBucketKeyItem struct { - Aggregations - - Key interface{} //`json:"key"` - KeyNumber json.Number - DocCount int64 //`json:"doc_count"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItem structure. -func (a *AggregationBucketKeyItem) UnmarshalJSON(data []byte) error { - var aggs map[string]*json.RawMessage - dec := json.NewDecoder(bytes.NewReader(data)) - dec.UseNumber() - if err := dec.Decode(&aggs); err != nil { - return err - } - if v, ok := aggs["key"]; ok && v != nil { - json.Unmarshal(*v, &a.Key) - json.Unmarshal(*v, &a.KeyNumber) - } - if v, ok := aggs["doc_count"]; ok && v != nil { - json.Unmarshal(*v, &a.DocCount) - } - a.Aggregations = aggs - return nil -} - -// -- Bucket types for significant terms -- - -// AggregationBucketSignificantTerms is a bucket aggregation returned -// with a significant terms aggregation. -type AggregationBucketSignificantTerms struct { - Aggregations - - DocCount int64 //`json:"doc_count"` - Buckets []*AggregationBucketSignificantTerm //`json:"buckets"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerms structure. -func (a *AggregationBucketSignificantTerms) UnmarshalJSON(data []byte) error { - var aggs map[string]*json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["doc_count"]; ok && v != nil { - json.Unmarshal(*v, &a.DocCount) - } - if v, ok := aggs["buckets"]; ok && v != nil { - json.Unmarshal(*v, &a.Buckets) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(*v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// AggregationBucketSignificantTerm is a single bucket of an AggregationBucketSignificantTerms structure. -type AggregationBucketSignificantTerm struct { - Aggregations - - Key string //`json:"key"` - DocCount int64 //`json:"doc_count"` - BgCount int64 //`json:"bg_count"` - Score float64 //`json:"score"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerm structure. -func (a *AggregationBucketSignificantTerm) UnmarshalJSON(data []byte) error { - var aggs map[string]*json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["key"]; ok && v != nil { - json.Unmarshal(*v, &a.Key) - } - if v, ok := aggs["doc_count"]; ok && v != nil { - json.Unmarshal(*v, &a.DocCount) - } - if v, ok := aggs["bg_count"]; ok && v != nil { - json.Unmarshal(*v, &a.BgCount) - } - if v, ok := aggs["score"]; ok && v != nil { - json.Unmarshal(*v, &a.Score) - } - a.Aggregations = aggs - return nil -} - -// -- Bucket filters -- - -// AggregationBucketFilters is a multi-bucket aggregation that is returned -// with a filters aggregation. -type AggregationBucketFilters struct { - Aggregations - - Buckets []*AggregationBucketKeyItem //`json:"buckets"` - NamedBuckets map[string]*AggregationBucketKeyItem //`json:"buckets"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationBucketFilters structure. -func (a *AggregationBucketFilters) UnmarshalJSON(data []byte) error { - var aggs map[string]*json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["buckets"]; ok && v != nil { - json.Unmarshal(*v, &a.Buckets) - json.Unmarshal(*v, &a.NamedBuckets) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(*v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// -- Bucket histogram items -- - -// AggregationBucketHistogramItems is a bucket aggregation that is returned -// with a date histogram aggregation. -type AggregationBucketHistogramItems struct { - Aggregations - - Buckets []*AggregationBucketHistogramItem //`json:"buckets"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItems structure. -func (a *AggregationBucketHistogramItems) UnmarshalJSON(data []byte) error { - var aggs map[string]*json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["buckets"]; ok && v != nil { - json.Unmarshal(*v, &a.Buckets) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(*v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// AggregationBucketHistogramItem is a single bucket of an AggregationBucketHistogramItems structure. -type AggregationBucketHistogramItem struct { - Aggregations - - Key int64 //`json:"key"` - KeyAsString *string //`json:"key_as_string"` - DocCount int64 //`json:"doc_count"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItem structure. -func (a *AggregationBucketHistogramItem) UnmarshalJSON(data []byte) error { - var aggs map[string]*json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["key"]; ok && v != nil { - json.Unmarshal(*v, &a.Key) - } - if v, ok := aggs["key_as_string"]; ok && v != nil { - json.Unmarshal(*v, &a.KeyAsString) - } - if v, ok := aggs["doc_count"]; ok && v != nil { - json.Unmarshal(*v, &a.DocCount) - } - a.Aggregations = aggs - return nil -} - -// -- Pipeline simple value -- - -// AggregationPipelineSimpleValue is a simple value, returned e.g. by a -// MovAvg aggregation. -type AggregationPipelineSimpleValue struct { - Aggregations - - Value *float64 // `json:"value"` - ValueAsString string // `json:"value_as_string"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineSimpleValue structure. -func (a *AggregationPipelineSimpleValue) UnmarshalJSON(data []byte) error { - var aggs map[string]*json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["value"]; ok && v != nil { - json.Unmarshal(*v, &a.Value) - } - if v, ok := aggs["value_as_string"]; ok && v != nil { - json.Unmarshal(*v, &a.ValueAsString) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(*v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// -- Pipeline simple value -- - -// AggregationPipelineBucketMetricValue is a value returned e.g. by a -// MaxBucket aggregation. -type AggregationPipelineBucketMetricValue struct { - Aggregations - - Keys []interface{} // `json:"keys"` - Value *float64 // `json:"value"` - ValueAsString string // `json:"value_as_string"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineBucketMetricValue structure. -func (a *AggregationPipelineBucketMetricValue) UnmarshalJSON(data []byte) error { - var aggs map[string]*json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["keys"]; ok && v != nil { - json.Unmarshal(*v, &a.Keys) - } - if v, ok := aggs["value"]; ok && v != nil { - json.Unmarshal(*v, &a.Value) - } - if v, ok := aggs["value_as_string"]; ok && v != nil { - json.Unmarshal(*v, &a.ValueAsString) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(*v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// -- Pipeline derivative -- - -// AggregationPipelineDerivative is the value returned by a -// Derivative aggregation. -type AggregationPipelineDerivative struct { - Aggregations - - Value *float64 // `json:"value"` - ValueAsString string // `json:"value_as_string"` - NormalizedValue *float64 // `json:"normalized_value"` - NormalizedValueAsString string // `json:"normalized_value_as_string"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineDerivative structure. -func (a *AggregationPipelineDerivative) UnmarshalJSON(data []byte) error { - var aggs map[string]*json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["value"]; ok && v != nil { - json.Unmarshal(*v, &a.Value) - } - if v, ok := aggs["value_as_string"]; ok && v != nil { - json.Unmarshal(*v, &a.ValueAsString) - } - if v, ok := aggs["normalized_value"]; ok && v != nil { - json.Unmarshal(*v, &a.NormalizedValue) - } - if v, ok := aggs["normalized_value_as_string"]; ok && v != nil { - json.Unmarshal(*v, &a.NormalizedValueAsString) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(*v, &a.Meta) - } - a.Aggregations = aggs - return nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_children.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_children.go deleted file mode 100644 index 903e5461f..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_children.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// ChildrenAggregation is a special single bucket aggregation that enables -// aggregating from buckets on parent document types to buckets on child documents. -// It is available from 1.4.0.Beta1 upwards. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-children-aggregation.html -type ChildrenAggregation struct { - typ string - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewChildrenAggregation() *ChildrenAggregation { - return &ChildrenAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *ChildrenAggregation) Type(typ string) *ChildrenAggregation { - a.typ = typ - return a -} - -func (a *ChildrenAggregation) SubAggregation(name string, subAggregation Aggregation) *ChildrenAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *ChildrenAggregation) Meta(metaData map[string]interface{}) *ChildrenAggregation { - a.meta = metaData - return a -} - -func (a *ChildrenAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "to-answers" : { - // "children": { - // "type" : "answer" - // } - // } - // } - // } - // This method returns only the { "type" : ... } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["children"] = opts - opts["type"] = a.typ - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_children_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_children_test.go deleted file mode 100644 index a305073f3..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_children_test.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestChildrenAggregation(t *testing.T) { - agg := NewChildrenAggregation().Type("answer") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"children":{"type":"answer"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestChildrenAggregationWithSubAggregation(t *testing.T) { - subAgg := NewTermsAggregation().Field("owner.display_name").Size(10) - agg := NewChildrenAggregation().Type("answer") - agg = agg.SubAggregation("top-names", subAgg) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"aggregations":{"top-names":{"terms":{"field":"owner.display_name","size":10}}},"children":{"type":"answer"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_histogram.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_histogram.go deleted file mode 100644 index 231c51ef8..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_histogram.go +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// DateHistogramAggregation is a multi-bucket aggregation similar to the -// histogram except it can only be applied on date values. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html -type DateHistogramAggregation struct { - field string - script *Script - missing interface{} - subAggregations map[string]Aggregation - meta map[string]interface{} - - interval string - order string - orderAsc bool - minDocCount *int64 - extendedBoundsMin interface{} - extendedBoundsMax interface{} - timeZone string - format string - offset string -} - -// NewDateHistogramAggregation creates a new DateHistogramAggregation. -func NewDateHistogramAggregation() *DateHistogramAggregation { - return &DateHistogramAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -// Field on which the aggregation is processed. -func (a *DateHistogramAggregation) Field(field string) *DateHistogramAggregation { - a.field = field - return a -} - -func (a *DateHistogramAggregation) Script(script *Script) *DateHistogramAggregation { - a.script = script - return a -} - -// Missing configures the value to use when documents miss a value. -func (a *DateHistogramAggregation) Missing(missing interface{}) *DateHistogramAggregation { - a.missing = missing - return a -} - -func (a *DateHistogramAggregation) SubAggregation(name string, subAggregation Aggregation) *DateHistogramAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *DateHistogramAggregation) Meta(metaData map[string]interface{}) *DateHistogramAggregation { - a.meta = metaData - return a -} - -// Interval by which the aggregation gets processed. -// Allowed values are: "year", "quarter", "month", "week", "day", -// "hour", "minute". It also supports time settings like "1.5h" -// (up to "w" for weeks). -func (a *DateHistogramAggregation) Interval(interval string) *DateHistogramAggregation { - a.interval = interval - return a -} - -// Order specifies the sort order. Valid values for order are: -// "_key", "_count", a sub-aggregation name, or a sub-aggregation name -// with a metric. -func (a *DateHistogramAggregation) Order(order string, asc bool) *DateHistogramAggregation { - a.order = order - a.orderAsc = asc - return a -} - -func (a *DateHistogramAggregation) OrderByCount(asc bool) *DateHistogramAggregation { - // "order" : { "_count" : "asc" } - a.order = "_count" - a.orderAsc = asc - return a -} - -func (a *DateHistogramAggregation) OrderByCountAsc() *DateHistogramAggregation { - return a.OrderByCount(true) -} - -func (a *DateHistogramAggregation) OrderByCountDesc() *DateHistogramAggregation { - return a.OrderByCount(false) -} - -func (a *DateHistogramAggregation) OrderByKey(asc bool) *DateHistogramAggregation { - // "order" : { "_key" : "asc" } - a.order = "_key" - a.orderAsc = asc - return a -} - -func (a *DateHistogramAggregation) OrderByKeyAsc() *DateHistogramAggregation { - return a.OrderByKey(true) -} - -func (a *DateHistogramAggregation) OrderByKeyDesc() *DateHistogramAggregation { - return a.OrderByKey(false) -} - -// OrderByAggregation creates a bucket ordering strategy which sorts buckets -// based on a single-valued calc get. -func (a *DateHistogramAggregation) OrderByAggregation(aggName string, asc bool) *DateHistogramAggregation { - // { - // "aggs" : { - // "genders" : { - // "terms" : { - // "field" : "gender", - // "order" : { "avg_height" : "desc" } - // }, - // "aggs" : { - // "avg_height" : { "avg" : { "field" : "height" } } - // } - // } - // } - // } - a.order = aggName - a.orderAsc = asc - return a -} - -// OrderByAggregationAndMetric creates a bucket ordering strategy which -// sorts buckets based on a multi-valued calc get. -func (a *DateHistogramAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) *DateHistogramAggregation { - // { - // "aggs" : { - // "genders" : { - // "terms" : { - // "field" : "gender", - // "order" : { "height_stats.avg" : "desc" } - // }, - // "aggs" : { - // "height_stats" : { "stats" : { "field" : "height" } } - // } - // } - // } - // } - a.order = aggName + "." + metric - a.orderAsc = asc - return a -} - -// MinDocCount sets the minimum document count per bucket. -// Buckets with less documents than this min value will not be returned. -func (a *DateHistogramAggregation) MinDocCount(minDocCount int64) *DateHistogramAggregation { - a.minDocCount = &minDocCount - return a -} - -// TimeZone sets the timezone in which to translate dates before computing buckets. -func (a *DateHistogramAggregation) TimeZone(timeZone string) *DateHistogramAggregation { - a.timeZone = timeZone - return a -} - -// Format sets the format to use for dates. -func (a *DateHistogramAggregation) Format(format string) *DateHistogramAggregation { - a.format = format - return a -} - -// Offset sets the offset of time intervals in the histogram, e.g. "+6h". -func (a *DateHistogramAggregation) Offset(offset string) *DateHistogramAggregation { - a.offset = offset - return a -} - -// ExtendedBounds accepts int, int64, string, or time.Time values. -// In case the lower value in the histogram would be greater than min or the -// upper value would be less than max, empty buckets will be generated. -func (a *DateHistogramAggregation) ExtendedBounds(min, max interface{}) *DateHistogramAggregation { - a.extendedBoundsMin = min - a.extendedBoundsMax = max - return a -} - -// ExtendedBoundsMin accepts int, int64, string, or time.Time values. -func (a *DateHistogramAggregation) ExtendedBoundsMin(min interface{}) *DateHistogramAggregation { - a.extendedBoundsMin = min - return a -} - -// ExtendedBoundsMax accepts int, int64, string, or time.Time values. -func (a *DateHistogramAggregation) ExtendedBoundsMax(max interface{}) *DateHistogramAggregation { - a.extendedBoundsMax = max - return a -} - -func (a *DateHistogramAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "articles_over_time" : { - // "date_histogram" : { - // "field" : "date", - // "interval" : "month" - // } - // } - // } - // } - // - // This method returns only the { "date_histogram" : { ... } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["date_histogram"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.missing != nil { - opts["missing"] = a.missing - } - - opts["interval"] = a.interval - if a.minDocCount != nil { - opts["min_doc_count"] = *a.minDocCount - } - if a.order != "" { - o := make(map[string]interface{}) - if a.orderAsc { - o[a.order] = "asc" - } else { - o[a.order] = "desc" - } - opts["order"] = o - } - if a.timeZone != "" { - opts["time_zone"] = a.timeZone - } - if a.offset != "" { - opts["offset"] = a.offset - } - if a.format != "" { - opts["format"] = a.format - } - if a.extendedBoundsMin != nil || a.extendedBoundsMax != nil { - bounds := make(map[string]interface{}) - if a.extendedBoundsMin != nil { - bounds["min"] = a.extendedBoundsMin - } - if a.extendedBoundsMax != nil { - bounds["max"] = a.extendedBoundsMax - } - opts["extended_bounds"] = bounds - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_histogram_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_histogram_test.go deleted file mode 100644 index 3c826ce9e..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_histogram_test.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestDateHistogramAggregation(t *testing.T) { - agg := NewDateHistogramAggregation(). - Field("date"). - Interval("month"). - Format("YYYY-MM"). - TimeZone("UTC"). - Offset("+6h") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"date_histogram":{"field":"date","format":"YYYY-MM","interval":"month","offset":"+6h","time_zone":"UTC"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestDateHistogramAggregationWithMissing(t *testing.T) { - agg := NewDateHistogramAggregation().Field("date").Interval("year").Missing("1900") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"date_histogram":{"field":"date","interval":"year","missing":"1900"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_range.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_range.go deleted file mode 100644 index 82de0696b..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_range.go +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "time" -) - -// DateRangeAggregation is a range aggregation that is dedicated for -// date values. The main difference between this aggregation and the -// normal range aggregation is that the from and to values can be expressed -// in Date Math expressions, and it is also possible to specify a -// date format by which the from and to response fields will be returned. -// Note that this aggregration includes the from value and excludes the to -// value for each range. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-daterange-aggregation.html -type DateRangeAggregation struct { - field string - script *Script - subAggregations map[string]Aggregation - meta map[string]interface{} - keyed *bool - unmapped *bool - format string - entries []DateRangeAggregationEntry -} - -type DateRangeAggregationEntry struct { - Key string - From interface{} - To interface{} -} - -func NewDateRangeAggregation() *DateRangeAggregation { - return &DateRangeAggregation{ - subAggregations: make(map[string]Aggregation), - entries: make([]DateRangeAggregationEntry, 0), - } -} - -func (a *DateRangeAggregation) Field(field string) *DateRangeAggregation { - a.field = field - return a -} - -func (a *DateRangeAggregation) Script(script *Script) *DateRangeAggregation { - a.script = script - return a -} - -func (a *DateRangeAggregation) SubAggregation(name string, subAggregation Aggregation) *DateRangeAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *DateRangeAggregation) Meta(metaData map[string]interface{}) *DateRangeAggregation { - a.meta = metaData - return a -} - -func (a *DateRangeAggregation) Keyed(keyed bool) *DateRangeAggregation { - a.keyed = &keyed - return a -} - -func (a *DateRangeAggregation) Unmapped(unmapped bool) *DateRangeAggregation { - a.unmapped = &unmapped - return a -} - -func (a *DateRangeAggregation) Format(format string) *DateRangeAggregation { - a.format = format - return a -} - -func (a *DateRangeAggregation) AddRange(from, to interface{}) *DateRangeAggregation { - a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: to}) - return a -} - -func (a *DateRangeAggregation) AddRangeWithKey(key string, from, to interface{}) *DateRangeAggregation { - a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: to}) - return a -} - -func (a *DateRangeAggregation) AddUnboundedTo(from interface{}) *DateRangeAggregation { - a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: nil}) - return a -} - -func (a *DateRangeAggregation) AddUnboundedToWithKey(key string, from interface{}) *DateRangeAggregation { - a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: nil}) - return a -} - -func (a *DateRangeAggregation) AddUnboundedFrom(to interface{}) *DateRangeAggregation { - a.entries = append(a.entries, DateRangeAggregationEntry{From: nil, To: to}) - return a -} - -func (a *DateRangeAggregation) AddUnboundedFromWithKey(key string, to interface{}) *DateRangeAggregation { - a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: nil, To: to}) - return a -} - -func (a *DateRangeAggregation) Lt(to interface{}) *DateRangeAggregation { - a.entries = append(a.entries, DateRangeAggregationEntry{From: nil, To: to}) - return a -} - -func (a *DateRangeAggregation) LtWithKey(key string, to interface{}) *DateRangeAggregation { - a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: nil, To: to}) - return a -} - -func (a *DateRangeAggregation) Between(from, to interface{}) *DateRangeAggregation { - a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: to}) - return a -} - -func (a *DateRangeAggregation) BetweenWithKey(key string, from, to interface{}) *DateRangeAggregation { - a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: to}) - return a -} - -func (a *DateRangeAggregation) Gt(from interface{}) *DateRangeAggregation { - a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: nil}) - return a -} - -func (a *DateRangeAggregation) GtWithKey(key string, from interface{}) *DateRangeAggregation { - a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: nil}) - return a -} - -func (a *DateRangeAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "range" : { - // "date_range": { - // "field": "date", - // "format": "MM-yyy", - // "ranges": [ - // { "to": "now-10M/M" }, - // { "from": "now-10M/M" } - // ] - // } - // } - // } - // } - // } - // - // This method returns only the { "date_range" : { ... } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["date_range"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - - if a.keyed != nil { - opts["keyed"] = *a.keyed - } - if a.unmapped != nil { - opts["unmapped"] = *a.unmapped - } - if a.format != "" { - opts["format"] = a.format - } - - ranges := make([]interface{}, 0) - for _, ent := range a.entries { - r := make(map[string]interface{}) - if ent.Key != "" { - r["key"] = ent.Key - } - if ent.From != nil { - switch from := ent.From.(type) { - case int, int16, int32, int64, float32, float64: - r["from"] = from - case time.Time: - r["from"] = from.Format(time.RFC3339) - case string: - r["from"] = from - } - } - if ent.To != nil { - switch to := ent.To.(type) { - case int, int16, int32, int64, float32, float64: - r["to"] = to - case time.Time: - r["to"] = to.Format(time.RFC3339) - case string: - r["to"] = to - } - } - ranges = append(ranges, r) - } - opts["ranges"] = ranges - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_range_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_range_test.go deleted file mode 100644 index 42c525121..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_date_range_test.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestDateRangeAggregation(t *testing.T) { - agg := NewDateRangeAggregation().Field("created_at") - agg = agg.AddRange(nil, "2012-12-31") - agg = agg.AddRange("2013-01-01", "2013-12-31") - agg = agg.AddRange("2014-01-01", nil) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestDateRangeAggregationWithUnbounded(t *testing.T) { - agg := NewDateRangeAggregation().Field("created_at"). - AddUnboundedFrom("2012-12-31"). - AddRange("2013-01-01", "2013-12-31"). - AddUnboundedTo("2014-01-01") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestDateRangeAggregationWithLtAndCo(t *testing.T) { - agg := NewDateRangeAggregation().Field("created_at"). - Lt("2012-12-31"). - Between("2013-01-01", "2013-12-31"). - Gt("2014-01-01") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestDateRangeAggregationWithKeyedFlag(t *testing.T) { - agg := NewDateRangeAggregation().Field("created_at"). - Keyed(true). - Lt("2012-12-31"). - Between("2013-01-01", "2013-12-31"). - Gt("2014-01-01") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"date_range":{"field":"created_at","keyed":true,"ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestDateRangeAggregationWithKeys(t *testing.T) { - agg := NewDateRangeAggregation().Field("created_at"). - Keyed(true). - LtWithKey("pre-2012", "2012-12-31"). - BetweenWithKey("2013", "2013-01-01", "2013-12-31"). - GtWithKey("post-2013", "2014-01-01") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"date_range":{"field":"created_at","keyed":true,"ranges":[{"key":"pre-2012","to":"2012-12-31"},{"from":"2013-01-01","key":"2013","to":"2013-12-31"},{"from":"2014-01-01","key":"post-2013"}]}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestDateRangeAggregationWithSpecialNames(t *testing.T) { - agg := NewDateRangeAggregation().Field("created_at"). - AddRange("now-10M/M", "now+10M/M") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"date_range":{"field":"created_at","ranges":[{"from":"now-10M/M","to":"now+10M/M"}]}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filter.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filter.go deleted file mode 100644 index 101399882..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filter.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// FilterAggregation defines a single bucket of all the documents -// in the current document set context that match a specified filter. -// Often this will be used to narrow down the current aggregation context -// to a specific set of documents. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filter-aggregation.html -type FilterAggregation struct { - filter Query - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewFilterAggregation() *FilterAggregation { - return &FilterAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *FilterAggregation) SubAggregation(name string, subAggregation Aggregation) *FilterAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *FilterAggregation) Meta(metaData map[string]interface{}) *FilterAggregation { - a.meta = metaData - return a -} - -func (a *FilterAggregation) Filter(filter Query) *FilterAggregation { - a.filter = filter - return a -} - -func (a *FilterAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "in_stock_products" : { - // "filter" : { "range" : { "stock" : { "gt" : 0 } } } - // } - // } - // } - // This method returns only the { "filter" : {} } part. - - src, err := a.filter.Source() - if err != nil { - return nil, err - } - source := make(map[string]interface{}) - source["filter"] = src - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filter_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filter_test.go deleted file mode 100644 index 5c6262a26..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filter_test.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestFilterAggregation(t *testing.T) { - filter := NewRangeQuery("stock").Gt(0) - agg := NewFilterAggregation().Filter(filter) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"filter":{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestFilterAggregationWithSubAggregation(t *testing.T) { - avgPriceAgg := NewAvgAggregation().Field("price") - filter := NewRangeQuery("stock").Gt(0) - agg := NewFilterAggregation().Filter(filter). - SubAggregation("avg_price", avgPriceAgg) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"aggregations":{"avg_price":{"avg":{"field":"price"}}},"filter":{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestFilterAggregationWithMeta(t *testing.T) { - filter := NewRangeQuery("stock").Gt(0) - agg := NewFilterAggregation().Filter(filter).Meta(map[string]interface{}{"name": "Oliver"}) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"filter":{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},"meta":{"name":"Oliver"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filters.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filters.go deleted file mode 100644 index 6dda39c61..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filters.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// FiltersAggregation defines a multi bucket aggregations where each bucket -// is associated with a filter. Each bucket will collect all documents that -// match its associated filter. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filters-aggregation.html -type FiltersAggregation struct { - filters []Query - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewFiltersAggregation() *FiltersAggregation { - return &FiltersAggregation{ - filters: make([]Query, 0), - subAggregations: make(map[string]Aggregation), - } -} - -func (a *FiltersAggregation) Filter(filter Query) *FiltersAggregation { - a.filters = append(a.filters, filter) - return a -} - -func (a *FiltersAggregation) Filters(filters ...Query) *FiltersAggregation { - if len(filters) > 0 { - a.filters = append(a.filters, filters...) - } - return a -} - -func (a *FiltersAggregation) SubAggregation(name string, subAggregation Aggregation) *FiltersAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *FiltersAggregation) Meta(metaData map[string]interface{}) *FiltersAggregation { - a.meta = metaData - return a -} - -func (a *FiltersAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "messages" : { - // "filters" : { - // "filters" : { - // "errors" : { "term" : { "body" : "error" }}, - // "warnings" : { "term" : { "body" : "warning" }} - // } - // } - // } - // } - // } - // This method returns only the (outer) { "filters" : {} } part. - - source := make(map[string]interface{}) - filters := make(map[string]interface{}) - source["filters"] = filters - - arr := make([]interface{}, len(a.filters)) - for i, filter := range a.filters { - src, err := filter.Source() - if err != nil { - return nil, err - } - arr[i] = src - } - filters["filters"] = arr - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filters_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filters_test.go deleted file mode 100644 index 4977d5162..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_filters_test.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestFiltersAggregation(t *testing.T) { - f1 := NewRangeQuery("stock").Gt(0) - f2 := NewTermQuery("symbol", "GOOG") - agg := NewFiltersAggregation().Filters(f1, f2) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"filters":{"filters":[{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},{"term":{"symbol":"GOOG"}}]}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestFiltersAggregationWithSubAggregation(t *testing.T) { - avgPriceAgg := NewAvgAggregation().Field("price") - f1 := NewRangeQuery("stock").Gt(0) - f2 := NewTermQuery("symbol", "GOOG") - agg := NewFiltersAggregation().Filters(f1, f2).SubAggregation("avg_price", avgPriceAgg) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"aggregations":{"avg_price":{"avg":{"field":"price"}}},"filters":{"filters":[{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},{"term":{"symbol":"GOOG"}}]}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestFiltersAggregationWithMetaData(t *testing.T) { - f1 := NewRangeQuery("stock").Gt(0) - f2 := NewTermQuery("symbol", "GOOG") - agg := NewFiltersAggregation().Filters(f1, f2).Meta(map[string]interface{}{"name": "Oliver"}) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"filters":{"filters":[{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},{"term":{"symbol":"GOOG"}}]},"meta":{"name":"Oliver"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_geo_distance.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_geo_distance.go deleted file mode 100644 index 3a1372221..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_geo_distance.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// GeoDistanceAggregation is a multi-bucket aggregation that works on geo_point fields -// and conceptually works very similar to the range aggregation. -// The user can define a point of origin and a set of distance range buckets. -// The aggregation evaluate the distance of each document value from -// the origin point and determines the buckets it belongs to based on -// the ranges (a document belongs to a bucket if the distance between the -// document and the origin falls within the distance range of the bucket). -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-aggregations-bucket-geodistance-aggregation.html -type GeoDistanceAggregation struct { - field string - unit string - distanceType string - point string - ranges []geoDistAggRange - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -type geoDistAggRange struct { - Key string - From interface{} - To interface{} -} - -func NewGeoDistanceAggregation() *GeoDistanceAggregation { - return &GeoDistanceAggregation{ - subAggregations: make(map[string]Aggregation), - ranges: make([]geoDistAggRange, 0), - } -} - -func (a *GeoDistanceAggregation) Field(field string) *GeoDistanceAggregation { - a.field = field - return a -} - -func (a *GeoDistanceAggregation) Unit(unit string) *GeoDistanceAggregation { - a.unit = unit - return a -} - -func (a *GeoDistanceAggregation) DistanceType(distanceType string) *GeoDistanceAggregation { - a.distanceType = distanceType - return a -} - -func (a *GeoDistanceAggregation) Point(latLon string) *GeoDistanceAggregation { - a.point = latLon - return a -} - -func (a *GeoDistanceAggregation) SubAggregation(name string, subAggregation Aggregation) *GeoDistanceAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *GeoDistanceAggregation) Meta(metaData map[string]interface{}) *GeoDistanceAggregation { - a.meta = metaData - return a -} -func (a *GeoDistanceAggregation) AddRange(from, to interface{}) *GeoDistanceAggregation { - a.ranges = append(a.ranges, geoDistAggRange{From: from, To: to}) - return a -} - -func (a *GeoDistanceAggregation) AddRangeWithKey(key string, from, to interface{}) *GeoDistanceAggregation { - a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: to}) - return a -} - -func (a *GeoDistanceAggregation) AddUnboundedTo(from float64) *GeoDistanceAggregation { - a.ranges = append(a.ranges, geoDistAggRange{From: from, To: nil}) - return a -} - -func (a *GeoDistanceAggregation) AddUnboundedToWithKey(key string, from float64) *GeoDistanceAggregation { - a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: nil}) - return a -} - -func (a *GeoDistanceAggregation) AddUnboundedFrom(to float64) *GeoDistanceAggregation { - a.ranges = append(a.ranges, geoDistAggRange{From: nil, To: to}) - return a -} - -func (a *GeoDistanceAggregation) AddUnboundedFromWithKey(key string, to float64) *GeoDistanceAggregation { - a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: nil, To: to}) - return a -} - -func (a *GeoDistanceAggregation) Between(from, to interface{}) *GeoDistanceAggregation { - a.ranges = append(a.ranges, geoDistAggRange{From: from, To: to}) - return a -} - -func (a *GeoDistanceAggregation) BetweenWithKey(key string, from, to interface{}) *GeoDistanceAggregation { - a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: to}) - return a -} - -func (a *GeoDistanceAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "rings_around_amsterdam" : { - // "geo_distance" : { - // "field" : "location", - // "origin" : "52.3760, 4.894", - // "ranges" : [ - // { "to" : 100 }, - // { "from" : 100, "to" : 300 }, - // { "from" : 300 } - // ] - // } - // } - // } - // } - // - // This method returns only the { "range" : { ... } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["geo_distance"] = opts - - if a.field != "" { - opts["field"] = a.field - } - if a.unit != "" { - opts["unit"] = a.unit - } - if a.distanceType != "" { - opts["distance_type"] = a.distanceType - } - if a.point != "" { - opts["origin"] = a.point - } - - ranges := make([]interface{}, 0) - for _, ent := range a.ranges { - r := make(map[string]interface{}) - if ent.Key != "" { - r["key"] = ent.Key - } - if ent.From != nil { - switch from := ent.From.(type) { - case int, int16, int32, int64, float32, float64: - r["from"] = from - case *int, *int16, *int32, *int64, *float32, *float64: - r["from"] = from - case string: - r["from"] = from - } - } - if ent.To != nil { - switch to := ent.To.(type) { - case int, int16, int32, int64, float32, float64: - r["to"] = to - case *int, *int16, *int32, *int64, *float32, *float64: - r["to"] = to - case string: - r["to"] = to - } - } - ranges = append(ranges, r) - } - opts["ranges"] = ranges - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_geo_distance_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_geo_distance_test.go deleted file mode 100644 index 4cb0cd9f8..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_geo_distance_test.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestGeoDistanceAggregation(t *testing.T) { - agg := NewGeoDistanceAggregation().Field("location").Point("52.3760, 4.894") - agg = agg.AddRange(nil, 100) - agg = agg.AddRange(100, 300) - agg = agg.AddRange(300, nil) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"geo_distance":{"field":"location","origin":"52.3760, 4.894","ranges":[{"to":100},{"from":100,"to":300},{"from":300}]}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestGeoDistanceAggregationWithUnbounded(t *testing.T) { - agg := NewGeoDistanceAggregation().Field("location").Point("52.3760, 4.894") - agg = agg.AddUnboundedFrom(100) - agg = agg.AddRange(100, 300) - agg = agg.AddUnboundedTo(300) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"geo_distance":{"field":"location","origin":"52.3760, 4.894","ranges":[{"to":100},{"from":100,"to":300},{"from":300}]}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestGeoDistanceAggregationWithMetaData(t *testing.T) { - agg := NewGeoDistanceAggregation().Field("location").Point("52.3760, 4.894") - agg = agg.AddRange(nil, 100) - agg = agg.AddRange(100, 300) - agg = agg.AddRange(300, nil) - agg = agg.Meta(map[string]interface{}{"name": "Oliver"}) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"geo_distance":{"field":"location","origin":"52.3760, 4.894","ranges":[{"to":100},{"from":100,"to":300},{"from":300}]},"meta":{"name":"Oliver"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_global.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_global.go deleted file mode 100644 index 49e24d60f..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_global.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// GlobalAggregation defines a single bucket of all the documents within -// the search execution context. This context is defined by the indices -// and the document types you’re searching on, but is not influenced -// by the search query itself. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-global-aggregation.html -type GlobalAggregation struct { - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewGlobalAggregation() *GlobalAggregation { - return &GlobalAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *GlobalAggregation) SubAggregation(name string, subAggregation Aggregation) *GlobalAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *GlobalAggregation) Meta(metaData map[string]interface{}) *GlobalAggregation { - a.meta = metaData - return a -} - -func (a *GlobalAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "all_products" : { - // "global" : {}, - // "aggs" : { - // "avg_price" : { "avg" : { "field" : "price" } } - // } - // } - // } - // } - // This method returns only the { "global" : {} } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["global"] = opts - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_global_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_global_test.go deleted file mode 100644 index 8b55010c7..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_global_test.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestGlobalAggregation(t *testing.T) { - agg := NewGlobalAggregation() - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"global":{}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestGlobalAggregationWithMetaData(t *testing.T) { - agg := NewGlobalAggregation().Meta(map[string]interface{}{"name": "Oliver"}) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"global":{},"meta":{"name":"Oliver"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_histogram.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_histogram.go deleted file mode 100644 index 7821adbc0..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_histogram.go +++ /dev/null @@ -1,253 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// HistogramAggregation is a multi-bucket values source based aggregation -// that can be applied on numeric values extracted from the documents. -// It dynamically builds fixed size (a.k.a. interval) buckets over the -// values. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-histogram-aggregation.html -type HistogramAggregation struct { - field string - script *Script - missing interface{} - subAggregations map[string]Aggregation - meta map[string]interface{} - - interval int64 - order string - orderAsc bool - minDocCount *int64 - extendedBoundsMin *int64 - extendedBoundsMax *int64 - offset *int64 -} - -func NewHistogramAggregation() *HistogramAggregation { - return &HistogramAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *HistogramAggregation) Field(field string) *HistogramAggregation { - a.field = field - return a -} - -func (a *HistogramAggregation) Script(script *Script) *HistogramAggregation { - a.script = script - return a -} - -// Missing configures the value to use when documents miss a value. -func (a *HistogramAggregation) Missing(missing interface{}) *HistogramAggregation { - a.missing = missing - return a -} - -func (a *HistogramAggregation) SubAggregation(name string, subAggregation Aggregation) *HistogramAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *HistogramAggregation) Meta(metaData map[string]interface{}) *HistogramAggregation { - a.meta = metaData - return a -} - -func (a *HistogramAggregation) Interval(interval int64) *HistogramAggregation { - a.interval = interval - return a -} - -// Order specifies the sort order. Valid values for order are: -// "_key", "_count", a sub-aggregation name, or a sub-aggregation name -// with a metric. -func (a *HistogramAggregation) Order(order string, asc bool) *HistogramAggregation { - a.order = order - a.orderAsc = asc - return a -} - -func (a *HistogramAggregation) OrderByCount(asc bool) *HistogramAggregation { - // "order" : { "_count" : "asc" } - a.order = "_count" - a.orderAsc = asc - return a -} - -func (a *HistogramAggregation) OrderByCountAsc() *HistogramAggregation { - return a.OrderByCount(true) -} - -func (a *HistogramAggregation) OrderByCountDesc() *HistogramAggregation { - return a.OrderByCount(false) -} - -func (a *HistogramAggregation) OrderByKey(asc bool) *HistogramAggregation { - // "order" : { "_key" : "asc" } - a.order = "_key" - a.orderAsc = asc - return a -} - -func (a *HistogramAggregation) OrderByKeyAsc() *HistogramAggregation { - return a.OrderByKey(true) -} - -func (a *HistogramAggregation) OrderByKeyDesc() *HistogramAggregation { - return a.OrderByKey(false) -} - -// OrderByAggregation creates a bucket ordering strategy which sorts buckets -// based on a single-valued calc get. -func (a *HistogramAggregation) OrderByAggregation(aggName string, asc bool) *HistogramAggregation { - // { - // "aggs" : { - // "genders" : { - // "terms" : { - // "field" : "gender", - // "order" : { "avg_height" : "desc" } - // }, - // "aggs" : { - // "avg_height" : { "avg" : { "field" : "height" } } - // } - // } - // } - // } - a.order = aggName - a.orderAsc = asc - return a -} - -// OrderByAggregationAndMetric creates a bucket ordering strategy which -// sorts buckets based on a multi-valued calc get. -func (a *HistogramAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) *HistogramAggregation { - // { - // "aggs" : { - // "genders" : { - // "terms" : { - // "field" : "gender", - // "order" : { "height_stats.avg" : "desc" } - // }, - // "aggs" : { - // "height_stats" : { "stats" : { "field" : "height" } } - // } - // } - // } - // } - a.order = aggName + "." + metric - a.orderAsc = asc - return a -} - -func (a *HistogramAggregation) MinDocCount(minDocCount int64) *HistogramAggregation { - a.minDocCount = &minDocCount - return a -} - -func (a *HistogramAggregation) ExtendedBounds(min, max int64) *HistogramAggregation { - a.extendedBoundsMin = &min - a.extendedBoundsMax = &max - return a -} - -func (a *HistogramAggregation) ExtendedBoundsMin(min int64) *HistogramAggregation { - a.extendedBoundsMin = &min - return a -} - -func (a *HistogramAggregation) ExtendedBoundsMax(max int64) *HistogramAggregation { - a.extendedBoundsMax = &max - return a -} - -func (a *HistogramAggregation) Offset(offset int64) *HistogramAggregation { - a.offset = &offset - return a -} - -func (a *HistogramAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "prices" : { - // "histogram" : { - // "field" : "price", - // "interval" : 50 - // } - // } - // } - // } - // - // This method returns only the { "histogram" : { ... } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["histogram"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.missing != nil { - opts["missing"] = a.missing - } - - opts["interval"] = a.interval - if a.order != "" { - o := make(map[string]interface{}) - if a.orderAsc { - o[a.order] = "asc" - } else { - o[a.order] = "desc" - } - opts["order"] = o - } - if a.offset != nil { - opts["offset"] = *a.offset - } - if a.minDocCount != nil { - opts["min_doc_count"] = *a.minDocCount - } - if a.extendedBoundsMin != nil || a.extendedBoundsMax != nil { - bounds := make(map[string]interface{}) - if a.extendedBoundsMin != nil { - bounds["min"] = a.extendedBoundsMin - } - if a.extendedBoundsMax != nil { - bounds["max"] = a.extendedBoundsMax - } - opts["extended_bounds"] = bounds - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_histogram_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_histogram_test.go deleted file mode 100644 index 6a5d5fb92..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_histogram_test.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestHistogramAggregation(t *testing.T) { - agg := NewHistogramAggregation().Field("price").Interval(50) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"histogram":{"field":"price","interval":50}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestHistogramAggregationWithMetaData(t *testing.T) { - agg := NewHistogramAggregation().Field("price").Offset(10).Interval(50).Meta(map[string]interface{}{"name": "Oliver"}) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"histogram":{"field":"price","interval":50,"offset":10},"meta":{"name":"Oliver"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestHistogramAggregationWithMissing(t *testing.T) { - agg := NewHistogramAggregation().Field("price").Interval(50).Missing("n/a") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"histogram":{"field":"price","interval":50,"missing":"n/a"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_missing.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_missing.go deleted file mode 100644 index ca610c953..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_missing.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// MissingAggregation is a field data based single bucket aggregation, -// that creates a bucket of all documents in the current document set context -// that are missing a field value (effectively, missing a field or having -// the configured NULL value set). This aggregator will often be used in -// conjunction with other field data bucket aggregators (such as ranges) -// to return information for all the documents that could not be placed -// in any of the other buckets due to missing field data values. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-missing-aggregation.html -type MissingAggregation struct { - field string - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewMissingAggregation() *MissingAggregation { - return &MissingAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *MissingAggregation) Field(field string) *MissingAggregation { - a.field = field - return a -} - -func (a *MissingAggregation) SubAggregation(name string, subAggregation Aggregation) *MissingAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *MissingAggregation) Meta(metaData map[string]interface{}) *MissingAggregation { - a.meta = metaData - return a -} - -func (a *MissingAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "products_without_a_price" : { - // "missing" : { "field" : "price" } - // } - // } - // } - // This method returns only the { "missing" : { ... } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["missing"] = opts - - if a.field != "" { - opts["field"] = a.field - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_missing_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_missing_test.go deleted file mode 100644 index b52a96511..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_missing_test.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestMissingAggregation(t *testing.T) { - agg := NewMissingAggregation().Field("price") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"missing":{"field":"price"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestMissingAggregationWithMetaData(t *testing.T) { - agg := NewMissingAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"}) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"meta":{"name":"Oliver"},"missing":{"field":"price"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_nested.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_nested.go deleted file mode 100644 index f65da8048..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_nested.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// NestedAggregation is a special single bucket aggregation that enables -// aggregating nested documents. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-aggregations-bucket-nested-aggregation.html -type NestedAggregation struct { - path string - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewNestedAggregation() *NestedAggregation { - return &NestedAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *NestedAggregation) SubAggregation(name string, subAggregation Aggregation) *NestedAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *NestedAggregation) Meta(metaData map[string]interface{}) *NestedAggregation { - a.meta = metaData - return a -} - -func (a *NestedAggregation) Path(path string) *NestedAggregation { - a.path = path - return a -} - -func (a *NestedAggregation) Source() (interface{}, error) { - // Example: - // { - // "query" : { - // "match" : { "name" : "led tv" } - // } - // "aggs" : { - // "resellers" : { - // "nested" : { - // "path" : "resellers" - // }, - // "aggs" : { - // "min_price" : { "min" : { "field" : "resellers.price" } } - // } - // } - // } - // } - // This method returns only the { "nested" : {} } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["nested"] = opts - - opts["path"] = a.path - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_nested_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_nested_test.go deleted file mode 100644 index c55612f07..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_nested_test.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestNestedAggregation(t *testing.T) { - agg := NewNestedAggregation().Path("resellers") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"nested":{"path":"resellers"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestNestedAggregationWithSubAggregation(t *testing.T) { - minPriceAgg := NewMinAggregation().Field("resellers.price") - agg := NewNestedAggregation().Path("resellers").SubAggregation("min_price", minPriceAgg) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"aggregations":{"min_price":{"min":{"field":"resellers.price"}}},"nested":{"path":"resellers"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestNestedAggregationWithMetaData(t *testing.T) { - agg := NewNestedAggregation().Path("resellers").Meta(map[string]interface{}{"name": "Oliver"}) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"meta":{"name":"Oliver"},"nested":{"path":"resellers"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_range.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_range.go deleted file mode 100644 index bc017c60f..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_range.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "time" -) - -// RangeAggregation is a multi-bucket value source based aggregation that -// enables the user to define a set of ranges - each representing a bucket. -// During the aggregation process, the values extracted from each document -// will be checked against each bucket range and "bucket" the -// relevant/matching document. Note that this aggregration includes the -// from value and excludes the to value for each range. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-range-aggregation.html -type RangeAggregation struct { - field string - script *Script - missing interface{} - subAggregations map[string]Aggregation - meta map[string]interface{} - keyed *bool - unmapped *bool - entries []rangeAggregationEntry -} - -type rangeAggregationEntry struct { - Key string - From interface{} - To interface{} -} - -func NewRangeAggregation() *RangeAggregation { - return &RangeAggregation{ - subAggregations: make(map[string]Aggregation), - entries: make([]rangeAggregationEntry, 0), - } -} - -func (a *RangeAggregation) Field(field string) *RangeAggregation { - a.field = field - return a -} - -func (a *RangeAggregation) Script(script *Script) *RangeAggregation { - a.script = script - return a -} - -// Missing configures the value to use when documents miss a value. -func (a *RangeAggregation) Missing(missing interface{}) *RangeAggregation { - a.missing = missing - return a -} - -func (a *RangeAggregation) SubAggregation(name string, subAggregation Aggregation) *RangeAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *RangeAggregation) Meta(metaData map[string]interface{}) *RangeAggregation { - a.meta = metaData - return a -} - -func (a *RangeAggregation) Keyed(keyed bool) *RangeAggregation { - a.keyed = &keyed - return a -} - -func (a *RangeAggregation) Unmapped(unmapped bool) *RangeAggregation { - a.unmapped = &unmapped - return a -} - -func (a *RangeAggregation) AddRange(from, to interface{}) *RangeAggregation { - a.entries = append(a.entries, rangeAggregationEntry{From: from, To: to}) - return a -} - -func (a *RangeAggregation) AddRangeWithKey(key string, from, to interface{}) *RangeAggregation { - a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: to}) - return a -} - -func (a *RangeAggregation) AddUnboundedTo(from interface{}) *RangeAggregation { - a.entries = append(a.entries, rangeAggregationEntry{From: from, To: nil}) - return a -} - -func (a *RangeAggregation) AddUnboundedToWithKey(key string, from interface{}) *RangeAggregation { - a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: nil}) - return a -} - -func (a *RangeAggregation) AddUnboundedFrom(to interface{}) *RangeAggregation { - a.entries = append(a.entries, rangeAggregationEntry{From: nil, To: to}) - return a -} - -func (a *RangeAggregation) AddUnboundedFromWithKey(key string, to interface{}) *RangeAggregation { - a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: nil, To: to}) - return a -} - -func (a *RangeAggregation) Lt(to interface{}) *RangeAggregation { - a.entries = append(a.entries, rangeAggregationEntry{From: nil, To: to}) - return a -} - -func (a *RangeAggregation) LtWithKey(key string, to interface{}) *RangeAggregation { - a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: nil, To: to}) - return a -} - -func (a *RangeAggregation) Between(from, to interface{}) *RangeAggregation { - a.entries = append(a.entries, rangeAggregationEntry{From: from, To: to}) - return a -} - -func (a *RangeAggregation) BetweenWithKey(key string, from, to interface{}) *RangeAggregation { - a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: to}) - return a -} - -func (a *RangeAggregation) Gt(from interface{}) *RangeAggregation { - a.entries = append(a.entries, rangeAggregationEntry{From: from, To: nil}) - return a -} - -func (a *RangeAggregation) GtWithKey(key string, from interface{}) *RangeAggregation { - a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: nil}) - return a -} - -func (a *RangeAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "price_ranges" : { - // "range" : { - // "field" : "price", - // "ranges" : [ - // { "to" : 50 }, - // { "from" : 50, "to" : 100 }, - // { "from" : 100 } - // ] - // } - // } - // } - // } - // - // This method returns only the { "range" : { ... } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["range"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.missing != nil { - opts["missing"] = a.missing - } - - if a.keyed != nil { - opts["keyed"] = *a.keyed - } - if a.unmapped != nil { - opts["unmapped"] = *a.unmapped - } - - ranges := make([]interface{}, 0) - for _, ent := range a.entries { - r := make(map[string]interface{}) - if ent.Key != "" { - r["key"] = ent.Key - } - if ent.From != nil { - switch from := ent.From.(type) { - case int, int16, int32, int64, float32, float64: - r["from"] = from - case time.Time: - r["from"] = from.Format(time.RFC3339) - case string: - r["from"] = from - } - } - if ent.To != nil { - switch to := ent.To.(type) { - case int, int16, int32, int64, float32, float64: - r["to"] = to - case time.Time: - r["to"] = to.Format(time.RFC3339) - case string: - r["to"] = to - } - } - ranges = append(ranges, r) - } - opts["ranges"] = ranges - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_range_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_range_test.go deleted file mode 100644 index f0fd5f5fd..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_range_test.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestRangeAggregation(t *testing.T) { - agg := NewRangeAggregation().Field("price") - agg = agg.AddRange(nil, 50) - agg = agg.AddRange(50, 100) - agg = agg.AddRange(100, nil) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"range":{"field":"price","ranges":[{"to":50},{"from":50,"to":100},{"from":100}]}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestRangeAggregationWithUnbounded(t *testing.T) { - agg := NewRangeAggregation().Field("field_name"). - AddUnboundedFrom(50). - AddRange(20, 70). - AddRange(70, 120). - AddUnboundedTo(150) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"range":{"field":"field_name","ranges":[{"to":50},{"from":20,"to":70},{"from":70,"to":120},{"from":150}]}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestRangeAggregationWithLtAndCo(t *testing.T) { - agg := NewRangeAggregation().Field("field_name"). - Lt(50). - Between(20, 70). - Between(70, 120). - Gt(150) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"range":{"field":"field_name","ranges":[{"to":50},{"from":20,"to":70},{"from":70,"to":120},{"from":150}]}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestRangeAggregationWithKeyedFlag(t *testing.T) { - agg := NewRangeAggregation().Field("field_name"). - Keyed(true). - Lt(50). - Between(20, 70). - Between(70, 120). - Gt(150) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"range":{"field":"field_name","keyed":true,"ranges":[{"to":50},{"from":20,"to":70},{"from":70,"to":120},{"from":150}]}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestRangeAggregationWithKeys(t *testing.T) { - agg := NewRangeAggregation().Field("field_name"). - Keyed(true). - LtWithKey("cheap", 50). - BetweenWithKey("affordable", 20, 70). - BetweenWithKey("average", 70, 120). - GtWithKey("expensive", 150) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"range":{"field":"field_name","keyed":true,"ranges":[{"key":"cheap","to":50},{"from":20,"key":"affordable","to":70},{"from":70,"key":"average","to":120},{"from":150,"key":"expensive"}]}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestRangeAggregationWithMetaData(t *testing.T) { - agg := NewRangeAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"}) - agg = agg.AddRange(nil, 50) - agg = agg.AddRange(50, 100) - agg = agg.AddRange(100, nil) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"meta":{"name":"Oliver"},"range":{"field":"price","ranges":[{"to":50},{"from":50,"to":100},{"from":100}]}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestRangeAggregationWithMissing(t *testing.T) { - agg := NewRangeAggregation().Field("price").Missing(0) - agg = agg.AddRange(nil, 50) - agg = agg.AddRange(50, 100) - agg = agg.AddRange(100, nil) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"range":{"field":"price","missing":0,"ranges":[{"to":50},{"from":50,"to":100},{"from":100}]}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_sampler.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_sampler.go deleted file mode 100644 index 9a6df15ec..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_sampler.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2012-2016 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// SamplerAggregation is a filtering aggregation used to limit any -// sub aggregations' processing to a sample of the top-scoring documents. -// Optionally, diversity settings can be used to limit the number of matches -// that share a common value such as an "author". -// See: https://www.elastic.co/guide/en/elasticsearch/reference/2.x/search-aggregations-bucket-sampler-aggregation.html -type SamplerAggregation struct { - field string - script *Script - missing interface{} - subAggregations map[string]Aggregation - meta map[string]interface{} - - shardSize int - maxDocsPerValue int - executionHint string -} - -func NewSamplerAggregation() *SamplerAggregation { - return &SamplerAggregation{ - shardSize: -1, - maxDocsPerValue: -1, - subAggregations: make(map[string]Aggregation), - } -} - -func (a *SamplerAggregation) Field(field string) *SamplerAggregation { - a.field = field - return a -} - -func (a *SamplerAggregation) Script(script *Script) *SamplerAggregation { - a.script = script - return a -} - -// Missing configures the value to use when documents miss a value. -func (a *SamplerAggregation) Missing(missing interface{}) *SamplerAggregation { - a.missing = missing - return a -} - -func (a *SamplerAggregation) SubAggregation(name string, subAggregation Aggregation) *SamplerAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *SamplerAggregation) Meta(metaData map[string]interface{}) *SamplerAggregation { - a.meta = metaData - return a -} - -// ShardSize sets the maximum number of docs returned from each shard. -func (a *SamplerAggregation) ShardSize(shardSize int) *SamplerAggregation { - a.shardSize = shardSize - return a -} - -func (a *SamplerAggregation) MaxDocsPerValue(maxDocsPerValue int) *SamplerAggregation { - a.maxDocsPerValue = maxDocsPerValue - return a -} - -func (a *SamplerAggregation) ExecutionHint(hint string) *SamplerAggregation { - a.executionHint = hint - return a -} - -func (a *SamplerAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "sample" : { - // "sampler" : { - // "field" : "user.id", - // "shard_size" : 200 - // }, - // "aggs": { - // "keywords": { - // "significant_terms": { - // "field": "text" - // } - // } - // } - // } - // } - // } - // - // This method returns only the { "sampler" : { ... } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["sampler"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.missing != nil { - opts["missing"] = a.missing - } - - if a.shardSize >= 0 { - opts["shard_size"] = a.shardSize - } - if a.maxDocsPerValue >= 0 { - opts["max_docs_per_value"] = a.maxDocsPerValue - } - if a.executionHint != "" { - opts["execution_hint"] = a.executionHint - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_sampler_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_sampler_test.go deleted file mode 100644 index da4ca5534..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_sampler_test.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2012-2016 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestSamplerAggregation(t *testing.T) { - keywordsAgg := NewSignificantTermsAggregation().Field("text") - agg := NewSamplerAggregation(). - Field("user.id"). - ShardSize(200). - SubAggregation("keywords", keywordsAgg) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"aggregations":{"keywords":{"significant_terms":{"field":"text"}}},"sampler":{"field":"user.id","shard_size":200}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestSamplerAggregationWithMissing(t *testing.T) { - keywordsAgg := NewSignificantTermsAggregation().Field("text") - agg := NewSamplerAggregation(). - Field("user.id"). - Missing("n/a"). - SubAggregation("keywords", keywordsAgg) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"aggregations":{"keywords":{"significant_terms":{"field":"text"}}},"sampler":{"field":"user.id","missing":"n/a"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_significant_terms.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_significant_terms.go deleted file mode 100644 index 1008887f0..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_significant_terms.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// SignificantSignificantTermsAggregation is an aggregation that returns interesting -// or unusual occurrences of terms in a set. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html -type SignificantTermsAggregation struct { - field string - subAggregations map[string]Aggregation - meta map[string]interface{} - - minDocCount *int - shardMinDocCount *int - requiredSize *int - shardSize *int - filter Query - executionHint string -} - -func NewSignificantTermsAggregation() *SignificantTermsAggregation { - return &SignificantTermsAggregation{ - subAggregations: make(map[string]Aggregation, 0), - } -} - -func (a *SignificantTermsAggregation) Field(field string) *SignificantTermsAggregation { - a.field = field - return a -} - -func (a *SignificantTermsAggregation) SubAggregation(name string, subAggregation Aggregation) *SignificantTermsAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *SignificantTermsAggregation) Meta(metaData map[string]interface{}) *SignificantTermsAggregation { - a.meta = metaData - return a -} - -func (a *SignificantTermsAggregation) MinDocCount(minDocCount int) *SignificantTermsAggregation { - a.minDocCount = &minDocCount - return a -} - -func (a *SignificantTermsAggregation) ShardMinDocCount(shardMinDocCount int) *SignificantTermsAggregation { - a.shardMinDocCount = &shardMinDocCount - return a -} - -func (a *SignificantTermsAggregation) RequiredSize(requiredSize int) *SignificantTermsAggregation { - a.requiredSize = &requiredSize - return a -} - -func (a *SignificantTermsAggregation) ShardSize(shardSize int) *SignificantTermsAggregation { - a.shardSize = &shardSize - return a -} - -func (a *SignificantTermsAggregation) BackgroundFilter(filter Query) *SignificantTermsAggregation { - a.filter = filter - return a -} - -func (a *SignificantTermsAggregation) ExecutionHint(hint string) *SignificantTermsAggregation { - a.executionHint = hint - return a -} - -func (a *SignificantTermsAggregation) Source() (interface{}, error) { - // Example: - // { - // "query" : { - // "terms" : {"force" : [ "British Transport Police" ]} - // }, - // "aggregations" : { - // "significantCrimeTypes" : { - // "significant_terms" : { "field" : "crime_type" } - // } - // } - // } - // - // This method returns only the - // { "significant_terms" : { "field" : "crime_type" } - // part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["significant_terms"] = opts - - if a.field != "" { - opts["field"] = a.field - } - if a.requiredSize != nil { - opts["size"] = *a.requiredSize // not a typo! - } - if a.shardSize != nil { - opts["shard_size"] = *a.shardSize - } - if a.minDocCount != nil { - opts["min_doc_count"] = *a.minDocCount - } - if a.shardMinDocCount != nil { - opts["shard_min_doc_count"] = *a.shardMinDocCount - } - if a.filter != nil { - src, err := a.filter.Source() - if err != nil { - return nil, err - } - opts["background_filter"] = src - } - if a.executionHint != "" { - opts["execution_hint"] = a.executionHint - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_significant_terms_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_significant_terms_test.go deleted file mode 100644 index d24f3c9d1..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_significant_terms_test.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestSignificantTermsAggregation(t *testing.T) { - agg := NewSignificantTermsAggregation().Field("crime_type") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"significant_terms":{"field":"crime_type"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestSignificantTermsAggregationWithArgs(t *testing.T) { - agg := NewSignificantTermsAggregation(). - Field("crime_type"). - ExecutionHint("map"). - ShardSize(5). - MinDocCount(10). - BackgroundFilter(NewTermQuery("city", "London")) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"significant_terms":{"background_filter":{"term":{"city":"London"}},"execution_hint":"map","field":"crime_type","min_doc_count":10,"shard_size":5}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestSignificantTermsAggregationSubAggregation(t *testing.T) { - crimeTypesAgg := NewSignificantTermsAggregation().Field("crime_type") - agg := NewTermsAggregation().Field("force") - agg = agg.SubAggregation("significantCrimeTypes", crimeTypesAgg) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"aggregations":{"significantCrimeTypes":{"significant_terms":{"field":"crime_type"}}},"terms":{"field":"force"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestSignificantTermsAggregationWithMetaData(t *testing.T) { - agg := NewSignificantTermsAggregation().Field("crime_type") - agg = agg.Meta(map[string]interface{}{"name": "Oliver"}) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"meta":{"name":"Oliver"},"significant_terms":{"field":"crime_type"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_terms.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_terms.go deleted file mode 100644 index 2d3c0d1ad..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_terms.go +++ /dev/null @@ -1,341 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// TermsAggregation is a multi-bucket value source based aggregation -// where buckets are dynamically built - one per unique value. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html -type TermsAggregation struct { - field string - script *Script - missing interface{} - subAggregations map[string]Aggregation - meta map[string]interface{} - - size *int - shardSize *int - requiredSize *int - minDocCount *int - shardMinDocCount *int - valueType string - order string - orderAsc bool - includePattern string - includeFlags *int - excludePattern string - excludeFlags *int - executionHint string - collectionMode string - showTermDocCountError *bool - includeTerms []string - excludeTerms []string -} - -func NewTermsAggregation() *TermsAggregation { - return &TermsAggregation{ - subAggregations: make(map[string]Aggregation, 0), - includeTerms: make([]string, 0), - excludeTerms: make([]string, 0), - } -} - -func (a *TermsAggregation) Field(field string) *TermsAggregation { - a.field = field - return a -} - -func (a *TermsAggregation) Script(script *Script) *TermsAggregation { - a.script = script - return a -} - -// Missing configures the value to use when documents miss a value. -func (a *TermsAggregation) Missing(missing interface{}) *TermsAggregation { - a.missing = missing - return a -} - -func (a *TermsAggregation) SubAggregation(name string, subAggregation Aggregation) *TermsAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *TermsAggregation) Meta(metaData map[string]interface{}) *TermsAggregation { - a.meta = metaData - return a -} - -func (a *TermsAggregation) Size(size int) *TermsAggregation { - a.size = &size - return a -} - -func (a *TermsAggregation) RequiredSize(requiredSize int) *TermsAggregation { - a.requiredSize = &requiredSize - return a -} - -func (a *TermsAggregation) ShardSize(shardSize int) *TermsAggregation { - a.shardSize = &shardSize - return a -} - -func (a *TermsAggregation) MinDocCount(minDocCount int) *TermsAggregation { - a.minDocCount = &minDocCount - return a -} - -func (a *TermsAggregation) ShardMinDocCount(shardMinDocCount int) *TermsAggregation { - a.shardMinDocCount = &shardMinDocCount - return a -} - -func (a *TermsAggregation) Include(regexp string) *TermsAggregation { - a.includePattern = regexp - return a -} - -func (a *TermsAggregation) IncludeWithFlags(regexp string, flags int) *TermsAggregation { - a.includePattern = regexp - a.includeFlags = &flags - return a -} - -func (a *TermsAggregation) Exclude(regexp string) *TermsAggregation { - a.excludePattern = regexp - return a -} - -func (a *TermsAggregation) ExcludeWithFlags(regexp string, flags int) *TermsAggregation { - a.excludePattern = regexp - a.excludeFlags = &flags - return a -} - -// ValueType can be string, long, or double. -func (a *TermsAggregation) ValueType(valueType string) *TermsAggregation { - a.valueType = valueType - return a -} - -func (a *TermsAggregation) Order(order string, asc bool) *TermsAggregation { - a.order = order - a.orderAsc = asc - return a -} - -func (a *TermsAggregation) OrderByCount(asc bool) *TermsAggregation { - // "order" : { "_count" : "asc" } - a.order = "_count" - a.orderAsc = asc - return a -} - -func (a *TermsAggregation) OrderByCountAsc() *TermsAggregation { - return a.OrderByCount(true) -} - -func (a *TermsAggregation) OrderByCountDesc() *TermsAggregation { - return a.OrderByCount(false) -} - -func (a *TermsAggregation) OrderByTerm(asc bool) *TermsAggregation { - // "order" : { "_term" : "asc" } - a.order = "_term" - a.orderAsc = asc - return a -} - -func (a *TermsAggregation) OrderByTermAsc() *TermsAggregation { - return a.OrderByTerm(true) -} - -func (a *TermsAggregation) OrderByTermDesc() *TermsAggregation { - return a.OrderByTerm(false) -} - -// OrderByAggregation creates a bucket ordering strategy which sorts buckets -// based on a single-valued calc get. -func (a *TermsAggregation) OrderByAggregation(aggName string, asc bool) *TermsAggregation { - // { - // "aggs" : { - // "genders" : { - // "terms" : { - // "field" : "gender", - // "order" : { "avg_height" : "desc" } - // }, - // "aggs" : { - // "avg_height" : { "avg" : { "field" : "height" } } - // } - // } - // } - // } - a.order = aggName - a.orderAsc = asc - return a -} - -// OrderByAggregationAndMetric creates a bucket ordering strategy which -// sorts buckets based on a multi-valued calc get. -func (a *TermsAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) *TermsAggregation { - // { - // "aggs" : { - // "genders" : { - // "terms" : { - // "field" : "gender", - // "order" : { "height_stats.avg" : "desc" } - // }, - // "aggs" : { - // "height_stats" : { "stats" : { "field" : "height" } } - // } - // } - // } - // } - a.order = aggName + "." + metric - a.orderAsc = asc - return a -} - -func (a *TermsAggregation) ExecutionHint(hint string) *TermsAggregation { - a.executionHint = hint - return a -} - -// Collection mode can be depth_first or breadth_first as of 1.4.0. -func (a *TermsAggregation) CollectionMode(collectionMode string) *TermsAggregation { - a.collectionMode = collectionMode - return a -} - -func (a *TermsAggregation) ShowTermDocCountError(showTermDocCountError bool) *TermsAggregation { - a.showTermDocCountError = &showTermDocCountError - return a -} - -func (a *TermsAggregation) IncludeTerms(terms ...string) *TermsAggregation { - a.includeTerms = append(a.includeTerms, terms...) - return a -} - -func (a *TermsAggregation) ExcludeTerms(terms ...string) *TermsAggregation { - a.excludeTerms = append(a.excludeTerms, terms...) - return a -} - -func (a *TermsAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "genders" : { - // "terms" : { "field" : "gender" } - // } - // } - // } - // This method returns only the { "terms" : { "field" : "gender" } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["terms"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.missing != nil { - opts["missing"] = a.missing - } - - // TermsBuilder - if a.size != nil && *a.size >= 0 { - opts["size"] = *a.size - } - if a.shardSize != nil && *a.shardSize >= 0 { - opts["shard_size"] = *a.shardSize - } - if a.requiredSize != nil && *a.requiredSize >= 0 { - opts["required_size"] = *a.requiredSize - } - if a.minDocCount != nil && *a.minDocCount >= 0 { - opts["min_doc_count"] = *a.minDocCount - } - if a.shardMinDocCount != nil && *a.shardMinDocCount >= 0 { - opts["shard_min_doc_count"] = *a.shardMinDocCount - } - if a.showTermDocCountError != nil { - opts["show_term_doc_count_error"] = *a.showTermDocCountError - } - if a.collectionMode != "" { - opts["collect_mode"] = a.collectionMode - } - if a.valueType != "" { - opts["value_type"] = a.valueType - } - if a.order != "" { - o := make(map[string]interface{}) - if a.orderAsc { - o[a.order] = "asc" - } else { - o[a.order] = "desc" - } - opts["order"] = o - } - if len(a.includeTerms) > 0 { - opts["include"] = a.includeTerms - } - if a.includePattern != "" { - if a.includeFlags == nil || *a.includeFlags == 0 { - opts["include"] = a.includePattern - } else { - p := make(map[string]interface{}) - p["pattern"] = a.includePattern - p["flags"] = *a.includeFlags - opts["include"] = p - } - } - if len(a.excludeTerms) > 0 { - opts["exclude"] = a.excludeTerms - } - if a.excludePattern != "" { - if a.excludeFlags == nil || *a.excludeFlags == 0 { - opts["exclude"] = a.excludePattern - } else { - p := make(map[string]interface{}) - p["pattern"] = a.excludePattern - p["flags"] = *a.excludeFlags - opts["exclude"] = p - } - } - if a.executionHint != "" { - opts["execution_hint"] = a.executionHint - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_terms_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_terms_test.go deleted file mode 100644 index e5f979333..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_bucket_terms_test.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestTermsAggregation(t *testing.T) { - agg := NewTermsAggregation().Field("gender").Size(10).OrderByTermDesc() - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"terms":{"field":"gender","order":{"_term":"desc"},"size":10}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestTermsAggregationWithSubAggregation(t *testing.T) { - subAgg := NewAvgAggregation().Field("height") - agg := NewTermsAggregation().Field("gender").Size(10). - OrderByAggregation("avg_height", false) - agg = agg.SubAggregation("avg_height", subAgg) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"aggregations":{"avg_height":{"avg":{"field":"height"}}},"terms":{"field":"gender","order":{"avg_height":"desc"},"size":10}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestTermsAggregationWithMultipleSubAggregation(t *testing.T) { - subAgg1 := NewAvgAggregation().Field("height") - subAgg2 := NewAvgAggregation().Field("width") - agg := NewTermsAggregation().Field("gender").Size(10). - OrderByAggregation("avg_height", false) - agg = agg.SubAggregation("avg_height", subAgg1) - agg = agg.SubAggregation("avg_width", subAgg2) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"aggregations":{"avg_height":{"avg":{"field":"height"}},"avg_width":{"avg":{"field":"width"}}},"terms":{"field":"gender","order":{"avg_height":"desc"},"size":10}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestTermsAggregationWithMetaData(t *testing.T) { - agg := NewTermsAggregation().Field("gender").Size(10).OrderByTermDesc() - agg = agg.Meta(map[string]interface{}{"name": "Oliver"}) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"meta":{"name":"Oliver"},"terms":{"field":"gender","order":{"_term":"desc"},"size":10}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestTermsAggregationWithMissing(t *testing.T) { - agg := NewTermsAggregation().Field("gender").Size(10).Missing("n/a") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"terms":{"field":"gender","missing":"n/a","size":10}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_avg.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_avg.go deleted file mode 100644 index 37ec2b7ad..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_avg.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// AvgAggregation is a single-value metrics aggregation that computes -// the average of numeric values that are extracted from the -// aggregated documents. These values can be extracted either from -// specific numeric fields in the documents, or be generated by -// a provided script. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-avg-aggregation.html -type AvgAggregation struct { - field string - script *Script - format string - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewAvgAggregation() *AvgAggregation { - return &AvgAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *AvgAggregation) Field(field string) *AvgAggregation { - a.field = field - return a -} - -func (a *AvgAggregation) Script(script *Script) *AvgAggregation { - a.script = script - return a -} - -func (a *AvgAggregation) Format(format string) *AvgAggregation { - a.format = format - return a -} - -func (a *AvgAggregation) SubAggregation(name string, subAggregation Aggregation) *AvgAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *AvgAggregation) Meta(metaData map[string]interface{}) *AvgAggregation { - a.meta = metaData - return a -} - -func (a *AvgAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "avg_grade" : { "avg" : { "field" : "grade" } } - // } - // } - // This method returns only the { "avg" : { "field" : "grade" } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["avg"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - - if a.format != "" { - opts["format"] = a.format - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_avg_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_avg_test.go deleted file mode 100644 index c8539d12d..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_avg_test.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestAvgAggregation(t *testing.T) { - agg := NewAvgAggregation().Field("grade") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"avg":{"field":"grade"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestAvgAggregationWithFormat(t *testing.T) { - agg := NewAvgAggregation().Field("grade").Format("000.0") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"avg":{"field":"grade","format":"000.0"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestAvgAggregationWithMetaData(t *testing.T) { - agg := NewAvgAggregation().Field("grade").Meta(map[string]interface{}{"name": "Oliver"}) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"avg":{"field":"grade"},"meta":{"name":"Oliver"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_cardinality.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_cardinality.go deleted file mode 100644 index ebf247c79..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_cardinality.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// CardinalityAggregation is a single-value metrics aggregation that -// calculates an approximate count of distinct values. -// Values can be extracted either from specific fields in the document -// or generated by a script. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html -type CardinalityAggregation struct { - field string - script *Script - format string - subAggregations map[string]Aggregation - meta map[string]interface{} - precisionThreshold *int64 - rehash *bool -} - -func NewCardinalityAggregation() *CardinalityAggregation { - return &CardinalityAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *CardinalityAggregation) Field(field string) *CardinalityAggregation { - a.field = field - return a -} - -func (a *CardinalityAggregation) Script(script *Script) *CardinalityAggregation { - a.script = script - return a -} - -func (a *CardinalityAggregation) Format(format string) *CardinalityAggregation { - a.format = format - return a -} - -func (a *CardinalityAggregation) SubAggregation(name string, subAggregation Aggregation) *CardinalityAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *CardinalityAggregation) Meta(metaData map[string]interface{}) *CardinalityAggregation { - a.meta = metaData - return a -} - -func (a *CardinalityAggregation) PrecisionThreshold(threshold int64) *CardinalityAggregation { - a.precisionThreshold = &threshold - return a -} - -func (a *CardinalityAggregation) Rehash(rehash bool) *CardinalityAggregation { - a.rehash = &rehash - return a -} - -func (a *CardinalityAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "author_count" : { - // "cardinality" : { "field" : "author" } - // } - // } - // } - // This method returns only the "cardinality" : { "field" : "author" } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["cardinality"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - - if a.format != "" { - opts["format"] = a.format - } - if a.precisionThreshold != nil { - opts["precision_threshold"] = *a.precisionThreshold - } - if a.rehash != nil { - opts["rehash"] = *a.rehash - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_cardinality_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_cardinality_test.go deleted file mode 100644 index bccfa7aae..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_cardinality_test.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestCardinalityAggregation(t *testing.T) { - agg := NewCardinalityAggregation().Field("author.hash") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"cardinality":{"field":"author.hash"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestCardinalityAggregationWithOptions(t *testing.T) { - agg := NewCardinalityAggregation().Field("author.hash").PrecisionThreshold(100).Rehash(true) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"cardinality":{"field":"author.hash","precision_threshold":100,"rehash":true}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestCardinalityAggregationWithFormat(t *testing.T) { - agg := NewCardinalityAggregation().Field("author.hash").Format("00000") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"cardinality":{"field":"author.hash","format":"00000"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestCardinalityAggregationWithMetaData(t *testing.T) { - agg := NewCardinalityAggregation().Field("author.hash").Meta(map[string]interface{}{"name": "Oliver"}) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"cardinality":{"field":"author.hash"},"meta":{"name":"Oliver"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_extended_stats.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_extended_stats.go deleted file mode 100644 index 69447409c..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_extended_stats.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// ExtendedExtendedStatsAggregation is a multi-value metrics aggregation that -// computes stats over numeric values extracted from the aggregated documents. -// These values can be extracted either from specific numeric fields -// in the documents, or be generated by a provided script. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-extendedstats-aggregation.html -type ExtendedStatsAggregation struct { - field string - script *Script - format string - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewExtendedStatsAggregation() *ExtendedStatsAggregation { - return &ExtendedStatsAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *ExtendedStatsAggregation) Field(field string) *ExtendedStatsAggregation { - a.field = field - return a -} - -func (a *ExtendedStatsAggregation) Script(script *Script) *ExtendedStatsAggregation { - a.script = script - return a -} - -func (a *ExtendedStatsAggregation) Format(format string) *ExtendedStatsAggregation { - a.format = format - return a -} - -func (a *ExtendedStatsAggregation) SubAggregation(name string, subAggregation Aggregation) *ExtendedStatsAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *ExtendedStatsAggregation) Meta(metaData map[string]interface{}) *ExtendedStatsAggregation { - a.meta = metaData - return a -} - -func (a *ExtendedStatsAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "grades_stats" : { "extended_stats" : { "field" : "grade" } } - // } - // } - // This method returns only the { "extended_stats" : { "field" : "grade" } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["extended_stats"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.format != "" { - opts["format"] = a.format - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_extended_stats_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_extended_stats_test.go deleted file mode 100644 index 4a80693cf..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_extended_stats_test.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestExtendedStatsAggregation(t *testing.T) { - agg := NewExtendedStatsAggregation().Field("grade") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"extended_stats":{"field":"grade"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestExtendedStatsAggregationWithFormat(t *testing.T) { - agg := NewExtendedStatsAggregation().Field("grade").Format("000.0") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"extended_stats":{"field":"grade","format":"000.0"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_geo_bounds.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_geo_bounds.go deleted file mode 100644 index 647ba5139..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_geo_bounds.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// GeoBoundsAggregation is a metric aggregation that computes the -// bounding box containing all geo_point values for a field. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-geobounds-aggregation.html -type GeoBoundsAggregation struct { - field string - script *Script - wrapLongitude *bool - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewGeoBoundsAggregation() *GeoBoundsAggregation { - return &GeoBoundsAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *GeoBoundsAggregation) Field(field string) *GeoBoundsAggregation { - a.field = field - return a -} - -func (a *GeoBoundsAggregation) Script(script *Script) *GeoBoundsAggregation { - a.script = script - return a -} - -func (a *GeoBoundsAggregation) WrapLongitude(wrapLongitude bool) *GeoBoundsAggregation { - a.wrapLongitude = &wrapLongitude - return a -} - -func (a *GeoBoundsAggregation) SubAggregation(name string, subAggregation Aggregation) *GeoBoundsAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *GeoBoundsAggregation) Meta(metaData map[string]interface{}) *GeoBoundsAggregation { - a.meta = metaData - return a -} - -func (a *GeoBoundsAggregation) Source() (interface{}, error) { - // Example: - // { - // "query" : { - // "match" : { "business_type" : "shop" } - // }, - // "aggs" : { - // "viewport" : { - // "geo_bounds" : { - // "field" : "location" - // "wrap_longitude" : "true" - // } - // } - // } - // } - // - // This method returns only the { "geo_bounds" : { ... } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["geo_bounds"] = opts - - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.wrapLongitude != nil { - opts["wrap_longitude"] = *a.wrapLongitude - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_geo_bounds_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_geo_bounds_test.go deleted file mode 100644 index 3096b8ee5..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_geo_bounds_test.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestGeoBoundsAggregation(t *testing.T) { - agg := NewGeoBoundsAggregation().Field("location") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"geo_bounds":{"field":"location"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestGeoBoundsAggregationWithWrapLongitude(t *testing.T) { - agg := NewGeoBoundsAggregation().Field("location").WrapLongitude(true) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"geo_bounds":{"field":"location","wrap_longitude":true}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestGeoBoundsAggregationWithMetaData(t *testing.T) { - agg := NewGeoBoundsAggregation().Field("location").Meta(map[string]interface{}{"name": "Oliver"}) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"geo_bounds":{"field":"location"},"meta":{"name":"Oliver"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_max.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_max.go deleted file mode 100644 index 334cff020..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_max.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// MaxAggregation is a single-value metrics aggregation that keeps track and -// returns the maximum value among the numeric values extracted from -// the aggregated documents. These values can be extracted either from -// specific numeric fields in the documents, or be generated by -// a provided script. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-max-aggregation.html -type MaxAggregation struct { - field string - script *Script - format string - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewMaxAggregation() *MaxAggregation { - return &MaxAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *MaxAggregation) Field(field string) *MaxAggregation { - a.field = field - return a -} - -func (a *MaxAggregation) Script(script *Script) *MaxAggregation { - a.script = script - return a -} - -func (a *MaxAggregation) Format(format string) *MaxAggregation { - a.format = format - return a -} - -func (a *MaxAggregation) SubAggregation(name string, subAggregation Aggregation) *MaxAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *MaxAggregation) Meta(metaData map[string]interface{}) *MaxAggregation { - a.meta = metaData - return a -} -func (a *MaxAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "max_price" : { "max" : { "field" : "price" } } - // } - // } - // This method returns only the { "max" : { "field" : "price" } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["max"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.format != "" { - opts["format"] = a.format - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_max_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_max_test.go deleted file mode 100644 index b5da00c19..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_max_test.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestMaxAggregation(t *testing.T) { - agg := NewMaxAggregation().Field("price") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"max":{"field":"price"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestMaxAggregationWithFormat(t *testing.T) { - agg := NewMaxAggregation().Field("price").Format("00000.00") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"max":{"field":"price","format":"00000.00"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestMaxAggregationWithMetaData(t *testing.T) { - agg := NewMaxAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"}) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"max":{"field":"price"},"meta":{"name":"Oliver"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_min.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_min.go deleted file mode 100644 index f9e21f7a8..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_min.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// MinAggregation is a single-value metrics aggregation that keeps track and -// returns the minimum value among numeric values extracted from the -// aggregated documents. These values can be extracted either from -// specific numeric fields in the documents, or be generated by a -// provided script. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-min-aggregation.html -type MinAggregation struct { - field string - script *Script - format string - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewMinAggregation() *MinAggregation { - return &MinAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *MinAggregation) Field(field string) *MinAggregation { - a.field = field - return a -} - -func (a *MinAggregation) Script(script *Script) *MinAggregation { - a.script = script - return a -} - -func (a *MinAggregation) Format(format string) *MinAggregation { - a.format = format - return a -} - -func (a *MinAggregation) SubAggregation(name string, subAggregation Aggregation) *MinAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *MinAggregation) Meta(metaData map[string]interface{}) *MinAggregation { - a.meta = metaData - return a -} - -func (a *MinAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "min_price" : { "min" : { "field" : "price" } } - // } - // } - // This method returns only the { "min" : { "field" : "price" } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["min"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.format != "" { - opts["format"] = a.format - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_min_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_min_test.go deleted file mode 100644 index 170650667..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_min_test.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestMinAggregation(t *testing.T) { - agg := NewMinAggregation().Field("price") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"min":{"field":"price"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestMinAggregationWithFormat(t *testing.T) { - agg := NewMinAggregation().Field("price").Format("00000.00") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"min":{"field":"price","format":"00000.00"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestMinAggregationWithMetaData(t *testing.T) { - agg := NewMinAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"}) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"meta":{"name":"Oliver"},"min":{"field":"price"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks.go deleted file mode 100644 index c0b3aa663..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// PercentileRanksAggregation -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-rank-aggregation.html -type PercentileRanksAggregation struct { - field string - script *Script - format string - subAggregations map[string]Aggregation - meta map[string]interface{} - values []float64 - compression *float64 - estimator string -} - -func NewPercentileRanksAggregation() *PercentileRanksAggregation { - return &PercentileRanksAggregation{ - subAggregations: make(map[string]Aggregation), - values: make([]float64, 0), - } -} - -func (a *PercentileRanksAggregation) Field(field string) *PercentileRanksAggregation { - a.field = field - return a -} - -func (a *PercentileRanksAggregation) Script(script *Script) *PercentileRanksAggregation { - a.script = script - return a -} - -func (a *PercentileRanksAggregation) Format(format string) *PercentileRanksAggregation { - a.format = format - return a -} - -func (a *PercentileRanksAggregation) SubAggregation(name string, subAggregation Aggregation) *PercentileRanksAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *PercentileRanksAggregation) Meta(metaData map[string]interface{}) *PercentileRanksAggregation { - a.meta = metaData - return a -} - -func (a *PercentileRanksAggregation) Values(values ...float64) *PercentileRanksAggregation { - a.values = append(a.values, values...) - return a -} - -func (a *PercentileRanksAggregation) Compression(compression float64) *PercentileRanksAggregation { - a.compression = &compression - return a -} - -func (a *PercentileRanksAggregation) Estimator(estimator string) *PercentileRanksAggregation { - a.estimator = estimator - return a -} - -func (a *PercentileRanksAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "load_time_outlier" : { - // "percentile_ranks" : { - // "field" : "load_time" - // "values" : [15, 30] - // } - // } - // } - // } - // This method returns only the - // { "percentile_ranks" : { "field" : "load_time", "values" : [15, 30] } } - // part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["percentile_ranks"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.format != "" { - opts["format"] = a.format - } - if len(a.values) > 0 { - opts["values"] = a.values - } - if a.compression != nil { - opts["compression"] = *a.compression - } - if a.estimator != "" { - opts["estimator"] = a.estimator - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks_test.go deleted file mode 100644 index df4b7c4a3..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks_test.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestPercentileRanksAggregation(t *testing.T) { - agg := NewPercentileRanksAggregation().Field("load_time") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"percentile_ranks":{"field":"load_time"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestPercentileRanksAggregationWithCustomValues(t *testing.T) { - agg := NewPercentileRanksAggregation().Field("load_time").Values(15, 30) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"percentile_ranks":{"field":"load_time","values":[15,30]}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestPercentileRanksAggregationWithFormat(t *testing.T) { - agg := NewPercentileRanksAggregation().Field("load_time").Format("000.0") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"percentile_ranks":{"field":"load_time","format":"000.0"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestPercentileRanksAggregationWithMetaData(t *testing.T) { - agg := NewPercentileRanksAggregation().Field("load_time").Meta(map[string]interface{}{"name": "Oliver"}) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"meta":{"name":"Oliver"},"percentile_ranks":{"field":"load_time"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentiles.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentiles.go deleted file mode 100644 index b1695ebb3..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentiles.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// PercentilesAggregation -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-aggregation.html -type PercentilesAggregation struct { - field string - script *Script - format string - subAggregations map[string]Aggregation - meta map[string]interface{} - percentiles []float64 - compression *float64 - estimator string -} - -func NewPercentilesAggregation() *PercentilesAggregation { - return &PercentilesAggregation{ - subAggregations: make(map[string]Aggregation), - percentiles: make([]float64, 0), - } -} - -func (a *PercentilesAggregation) Field(field string) *PercentilesAggregation { - a.field = field - return a -} - -func (a *PercentilesAggregation) Script(script *Script) *PercentilesAggregation { - a.script = script - return a -} - -func (a *PercentilesAggregation) Format(format string) *PercentilesAggregation { - a.format = format - return a -} - -func (a *PercentilesAggregation) SubAggregation(name string, subAggregation Aggregation) *PercentilesAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *PercentilesAggregation) Meta(metaData map[string]interface{}) *PercentilesAggregation { - a.meta = metaData - return a -} - -func (a *PercentilesAggregation) Percentiles(percentiles ...float64) *PercentilesAggregation { - a.percentiles = append(a.percentiles, percentiles...) - return a -} - -func (a *PercentilesAggregation) Compression(compression float64) *PercentilesAggregation { - a.compression = &compression - return a -} - -func (a *PercentilesAggregation) Estimator(estimator string) *PercentilesAggregation { - a.estimator = estimator - return a -} - -func (a *PercentilesAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "load_time_outlier" : { - // "percentiles" : { - // "field" : "load_time" - // } - // } - // } - // } - // This method returns only the - // { "percentiles" : { "field" : "load_time" } } - // part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["percentiles"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.format != "" { - opts["format"] = a.format - } - if len(a.percentiles) > 0 { - opts["percents"] = a.percentiles - } - if a.compression != nil { - opts["compression"] = *a.compression - } - if a.estimator != "" { - opts["estimator"] = a.estimator - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentiles_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentiles_test.go deleted file mode 100644 index da2d2055e..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_percentiles_test.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestPercentilesAggregation(t *testing.T) { - agg := NewPercentilesAggregation().Field("price") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"percentiles":{"field":"price"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestPercentilesAggregationWithCustomPercents(t *testing.T) { - agg := NewPercentilesAggregation().Field("price").Percentiles(0.2, 0.5, 0.9) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"percentiles":{"field":"price","percents":[0.2,0.5,0.9]}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestPercentilesAggregationWithFormat(t *testing.T) { - agg := NewPercentilesAggregation().Field("price").Format("00000.00") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"percentiles":{"field":"price","format":"00000.00"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestPercentilesAggregationWithMetaData(t *testing.T) { - agg := NewPercentilesAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"}) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"meta":{"name":"Oliver"},"percentiles":{"field":"price"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_stats.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_stats.go deleted file mode 100644 index 42da9c854..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_stats.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// StatsAggregation is a multi-value metrics aggregation that computes stats -// over numeric values extracted from the aggregated documents. -// These values can be extracted either from specific numeric fields -// in the documents, or be generated by a provided script. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-stats-aggregation.html -type StatsAggregation struct { - field string - script *Script - format string - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewStatsAggregation() *StatsAggregation { - return &StatsAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *StatsAggregation) Field(field string) *StatsAggregation { - a.field = field - return a -} - -func (a *StatsAggregation) Script(script *Script) *StatsAggregation { - a.script = script - return a -} - -func (a *StatsAggregation) Format(format string) *StatsAggregation { - a.format = format - return a -} - -func (a *StatsAggregation) SubAggregation(name string, subAggregation Aggregation) *StatsAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *StatsAggregation) Meta(metaData map[string]interface{}) *StatsAggregation { - a.meta = metaData - return a -} - -func (a *StatsAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "grades_stats" : { "stats" : { "field" : "grade" } } - // } - // } - // This method returns only the { "stats" : { "field" : "grade" } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["stats"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.format != "" { - opts["format"] = a.format - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_stats_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_stats_test.go deleted file mode 100644 index 0ea0b175d..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_stats_test.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestStatsAggregation(t *testing.T) { - agg := NewStatsAggregation().Field("grade") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"stats":{"field":"grade"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestStatsAggregationWithFormat(t *testing.T) { - agg := NewStatsAggregation().Field("grade").Format("0000.0") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"stats":{"field":"grade","format":"0000.0"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestStatsAggregationWithMetaData(t *testing.T) { - agg := NewStatsAggregation().Field("grade").Meta(map[string]interface{}{"name": "Oliver"}) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"meta":{"name":"Oliver"},"stats":{"field":"grade"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_sum.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_sum.go deleted file mode 100644 index 6f783e7e1..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_sum.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// SumAggregation is a single-value metrics aggregation that sums up -// numeric values that are extracted from the aggregated documents. -// These values can be extracted either from specific numeric fields -// in the documents, or be generated by a provided script. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-sum-aggregation.html -type SumAggregation struct { - field string - script *Script - format string - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewSumAggregation() *SumAggregation { - return &SumAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *SumAggregation) Field(field string) *SumAggregation { - a.field = field - return a -} - -func (a *SumAggregation) Script(script *Script) *SumAggregation { - a.script = script - return a -} - -func (a *SumAggregation) Format(format string) *SumAggregation { - a.format = format - return a -} - -func (a *SumAggregation) SubAggregation(name string, subAggregation Aggregation) *SumAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *SumAggregation) Meta(metaData map[string]interface{}) *SumAggregation { - a.meta = metaData - return a -} - -func (a *SumAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "intraday_return" : { "sum" : { "field" : "change" } } - // } - // } - // This method returns only the { "sum" : { "field" : "change" } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["sum"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.format != "" { - opts["format"] = a.format - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_sum_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_sum_test.go deleted file mode 100644 index 737808931..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_sum_test.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestSumAggregation(t *testing.T) { - agg := NewSumAggregation().Field("price") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"sum":{"field":"price"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestSumAggregationWithFormat(t *testing.T) { - agg := NewSumAggregation().Field("price").Format("00000.00") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"sum":{"field":"price","format":"00000.00"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestSumAggregationWithMetaData(t *testing.T) { - agg := NewSumAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"}) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"meta":{"name":"Oliver"},"sum":{"field":"price"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_top_hits.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_top_hits.go deleted file mode 100644 index c017abb98..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_top_hits.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// TopHitsAggregation keeps track of the most relevant document -// being aggregated. This aggregator is intended to be used as a -// sub aggregator, so that the top matching documents -// can be aggregated per bucket. -// -// It can effectively be used to group result sets by certain fields via -// a bucket aggregator. One or more bucket aggregators determines by -// which properties a result set get sliced into. -// -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-top-hits-aggregation.html -type TopHitsAggregation struct { - searchSource *SearchSource -} - -func NewTopHitsAggregation() *TopHitsAggregation { - return &TopHitsAggregation{ - searchSource: NewSearchSource(), - } -} - -func (a *TopHitsAggregation) From(from int) *TopHitsAggregation { - a.searchSource = a.searchSource.From(from) - return a -} - -func (a *TopHitsAggregation) Size(size int) *TopHitsAggregation { - a.searchSource = a.searchSource.Size(size) - return a -} - -func (a *TopHitsAggregation) TrackScores(trackScores bool) *TopHitsAggregation { - a.searchSource = a.searchSource.TrackScores(trackScores) - return a -} - -func (a *TopHitsAggregation) Explain(explain bool) *TopHitsAggregation { - a.searchSource = a.searchSource.Explain(explain) - return a -} - -func (a *TopHitsAggregation) Version(version bool) *TopHitsAggregation { - a.searchSource = a.searchSource.Version(version) - return a -} - -func (a *TopHitsAggregation) NoFields() *TopHitsAggregation { - a.searchSource = a.searchSource.NoFields() - return a -} - -func (a *TopHitsAggregation) FetchSource(fetchSource bool) *TopHitsAggregation { - a.searchSource = a.searchSource.FetchSource(fetchSource) - return a -} - -func (a *TopHitsAggregation) FetchSourceContext(fetchSourceContext *FetchSourceContext) *TopHitsAggregation { - a.searchSource = a.searchSource.FetchSourceContext(fetchSourceContext) - return a -} - -func (a *TopHitsAggregation) FieldDataFields(fieldDataFields ...string) *TopHitsAggregation { - a.searchSource = a.searchSource.FieldDataFields(fieldDataFields...) - return a -} - -func (a *TopHitsAggregation) FieldDataField(fieldDataField string) *TopHitsAggregation { - a.searchSource = a.searchSource.FieldDataField(fieldDataField) - return a -} - -func (a *TopHitsAggregation) ScriptFields(scriptFields ...*ScriptField) *TopHitsAggregation { - a.searchSource = a.searchSource.ScriptFields(scriptFields...) - return a -} - -func (a *TopHitsAggregation) ScriptField(scriptField *ScriptField) *TopHitsAggregation { - a.searchSource = a.searchSource.ScriptField(scriptField) - return a -} - -func (a *TopHitsAggregation) Sort(field string, ascending bool) *TopHitsAggregation { - a.searchSource = a.searchSource.Sort(field, ascending) - return a -} - -func (a *TopHitsAggregation) SortWithInfo(info SortInfo) *TopHitsAggregation { - a.searchSource = a.searchSource.SortWithInfo(info) - return a -} - -func (a *TopHitsAggregation) SortBy(sorter ...Sorter) *TopHitsAggregation { - a.searchSource = a.searchSource.SortBy(sorter...) - return a -} - -func (a *TopHitsAggregation) Highlight(highlight *Highlight) *TopHitsAggregation { - a.searchSource = a.searchSource.Highlight(highlight) - return a -} - -func (a *TopHitsAggregation) Highlighter() *Highlight { - return a.searchSource.Highlighter() -} - -func (a *TopHitsAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs": { - // "top_tag_hits": { - // "top_hits": { - // "sort": [ - // { - // "last_activity_date": { - // "order": "desc" - // } - // } - // ], - // "_source": { - // "include": [ - // "title" - // ] - // }, - // "size" : 1 - // } - // } - // } - // } - // This method returns only the { "top_hits" : { ... } } part. - - source := make(map[string]interface{}) - src, err := a.searchSource.Source() - if err != nil { - return nil, err - } - source["top_hits"] = src - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_top_hits_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_top_hits_test.go deleted file mode 100644 index 2634a22b6..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_top_hits_test.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestTopHitsAggregation(t *testing.T) { - fsc := NewFetchSourceContext(true).Include("title") - agg := NewTopHitsAggregation(). - Sort("last_activity_date", false). - FetchSourceContext(fsc). - Size(1) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"top_hits":{"_source":{"excludes":[],"includes":["title"]},"size":1,"sort":[{"last_activity_date":{"order":"desc"}}]}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_value_count.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_value_count.go deleted file mode 100644 index b2e3e8241..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_value_count.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// ValueCountAggregation is a single-value metrics aggregation that counts -// the number of values that are extracted from the aggregated documents. -// These values can be extracted either from specific fields in the documents, -// or be generated by a provided script. Typically, this aggregator will be -// used in conjunction with other single-value aggregations. -// For example, when computing the avg one might be interested in the -// number of values the average is computed over. -// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-valuecount-aggregation.html -type ValueCountAggregation struct { - field string - script *Script - format string - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewValueCountAggregation() *ValueCountAggregation { - return &ValueCountAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *ValueCountAggregation) Field(field string) *ValueCountAggregation { - a.field = field - return a -} - -func (a *ValueCountAggregation) Script(script *Script) *ValueCountAggregation { - a.script = script - return a -} - -func (a *ValueCountAggregation) Format(format string) *ValueCountAggregation { - a.format = format - return a -} - -func (a *ValueCountAggregation) SubAggregation(name string, subAggregation Aggregation) *ValueCountAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *ValueCountAggregation) Meta(metaData map[string]interface{}) *ValueCountAggregation { - a.meta = metaData - return a -} - -func (a *ValueCountAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "grades_count" : { "value_count" : { "field" : "grade" } } - // } - // } - // This method returns only the { "value_count" : { "field" : "grade" } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["value_count"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.format != "" { - opts["format"] = a.format - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_value_count_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_value_count_test.go deleted file mode 100644 index eee189b51..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_metrics_value_count_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestValueCountAggregation(t *testing.T) { - agg := NewValueCountAggregation().Field("grade") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"value_count":{"field":"grade"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestValueCountAggregationWithFormat(t *testing.T) { - // Format comes with 1.5.0+ - agg := NewValueCountAggregation().Field("grade").Format("0000.0") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"value_count":{"field":"grade","format":"0000.0"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestValueCountAggregationWithMetaData(t *testing.T) { - agg := NewValueCountAggregation().Field("grade") - agg = agg.Meta(map[string]interface{}{"name": "Oliver"}) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"meta":{"name":"Oliver"},"value_count":{"field":"grade"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket.go deleted file mode 100644 index 5cd93d5cc..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// AvgBucketAggregation is a sibling pipeline aggregation which calculates -// the (mean) average value of a specified metric in a sibling aggregation. -// The specified metric must be numeric and the sibling aggregation must -// be a multi-bucket aggregation. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-avg-bucket-aggregation.html -type AvgBucketAggregation struct { - format string - gapPolicy string - - subAggregations map[string]Aggregation - meta map[string]interface{} - bucketsPaths []string -} - -// NewAvgBucketAggregation creates and initializes a new AvgBucketAggregation. -func NewAvgBucketAggregation() *AvgBucketAggregation { - return &AvgBucketAggregation{ - subAggregations: make(map[string]Aggregation), - bucketsPaths: make([]string, 0), - } -} - -func (a *AvgBucketAggregation) Format(format string) *AvgBucketAggregation { - a.format = format - return a -} - -// GapPolicy defines what should be done when a gap in the series is discovered. -// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". -func (a *AvgBucketAggregation) GapPolicy(gapPolicy string) *AvgBucketAggregation { - a.gapPolicy = gapPolicy - return a -} - -// GapInsertZeros inserts zeros for gaps in the series. -func (a *AvgBucketAggregation) GapInsertZeros() *AvgBucketAggregation { - a.gapPolicy = "insert_zeros" - return a -} - -// GapSkip skips gaps in the series. -func (a *AvgBucketAggregation) GapSkip() *AvgBucketAggregation { - a.gapPolicy = "skip" - return a -} - -// SubAggregation adds a sub-aggregation to this aggregation. -func (a *AvgBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *AvgBucketAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *AvgBucketAggregation) Meta(metaData map[string]interface{}) *AvgBucketAggregation { - a.meta = metaData - return a -} - -// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. -func (a *AvgBucketAggregation) BucketsPath(bucketsPaths ...string) *AvgBucketAggregation { - a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) - return a -} - -func (a *AvgBucketAggregation) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["avg_bucket"] = params - - if a.format != "" { - params["format"] = a.format - } - if a.gapPolicy != "" { - params["gap_policy"] = a.gapPolicy - } - - // Add buckets paths - switch len(a.bucketsPaths) { - case 0: - case 1: - params["buckets_path"] = a.bucketsPaths[0] - default: - params["buckets_path"] = a.bucketsPaths - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket_test.go deleted file mode 100644 index 0e6509ecb..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket_test.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestAvgBucketAggregation(t *testing.T) { - agg := NewAvgBucketAggregation().BucketsPath("the_sum").GapPolicy("skip") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"avg_bucket":{"buckets_path":"the_sum","gap_policy":"skip"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_script.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_script.go deleted file mode 100644 index 44d6bc624..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_script.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// BucketScriptAggregation is a parent pipeline aggregation which executes -// a script which can perform per bucket computations on specified metrics -// in the parent multi-bucket aggregation. The specified metric must be -// numeric and the script must return a numeric value. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-bucket-script-aggregation.html -type BucketScriptAggregation struct { - format string - gapPolicy string - script *Script - - subAggregations map[string]Aggregation - meta map[string]interface{} - bucketsPathsMap map[string]string -} - -// NewBucketScriptAggregation creates and initializes a new BucketScriptAggregation. -func NewBucketScriptAggregation() *BucketScriptAggregation { - return &BucketScriptAggregation{ - subAggregations: make(map[string]Aggregation), - bucketsPathsMap: make(map[string]string), - } -} - -func (a *BucketScriptAggregation) Format(format string) *BucketScriptAggregation { - a.format = format - return a -} - -// GapPolicy defines what should be done when a gap in the series is discovered. -// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". -func (a *BucketScriptAggregation) GapPolicy(gapPolicy string) *BucketScriptAggregation { - a.gapPolicy = gapPolicy - return a -} - -// GapInsertZeros inserts zeros for gaps in the series. -func (a *BucketScriptAggregation) GapInsertZeros() *BucketScriptAggregation { - a.gapPolicy = "insert_zeros" - return a -} - -// GapSkip skips gaps in the series. -func (a *BucketScriptAggregation) GapSkip() *BucketScriptAggregation { - a.gapPolicy = "skip" - return a -} - -// Script is the script to run. -func (a *BucketScriptAggregation) Script(script *Script) *BucketScriptAggregation { - a.script = script - return a -} - -// SubAggregation adds a sub-aggregation to this aggregation. -func (a *BucketScriptAggregation) SubAggregation(name string, subAggregation Aggregation) *BucketScriptAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *BucketScriptAggregation) Meta(metaData map[string]interface{}) *BucketScriptAggregation { - a.meta = metaData - return a -} - -// BucketsPathsMap sets the paths to the buckets to use for this pipeline aggregator. -func (a *BucketScriptAggregation) BucketsPathsMap(bucketsPathsMap map[string]string) *BucketScriptAggregation { - a.bucketsPathsMap = bucketsPathsMap - return a -} - -// AddBucketsPath adds a bucket path to use for this pipeline aggregator. -func (a *BucketScriptAggregation) AddBucketsPath(name, path string) *BucketScriptAggregation { - if a.bucketsPathsMap == nil { - a.bucketsPathsMap = make(map[string]string) - } - a.bucketsPathsMap[name] = path - return a -} - -func (a *BucketScriptAggregation) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["bucket_script"] = params - - if a.format != "" { - params["format"] = a.format - } - if a.gapPolicy != "" { - params["gap_policy"] = a.gapPolicy - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - params["script"] = src - } - - // Add buckets paths - if len(a.bucketsPathsMap) > 0 { - params["buckets_path"] = a.bucketsPathsMap - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_script_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_script_test.go deleted file mode 100644 index 7f4d966d0..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_script_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestBucketScriptAggregation(t *testing.T) { - agg := NewBucketScriptAggregation(). - AddBucketsPath("tShirtSales", "t-shirts>sales"). - AddBucketsPath("totalSales", "total_sales"). - Script(NewScript("tShirtSales / totalSales * 100")) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"bucket_script":{"buckets_path":{"tShirtSales":"t-shirts\u003esales","totalSales":"total_sales"},"script":"tShirtSales / totalSales * 100"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector.go deleted file mode 100644 index ce17ec1f6..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// BucketSelectorAggregation is a parent pipeline aggregation which -// determines whether the current bucket will be retained in the parent -// multi-bucket aggregation. The specific metric must be numeric and -// the script must return a boolean value. If the script language is -// expression then a numeric return value is permitted. In this case 0.0 -// will be evaluated as false and all other values will evaluate to true. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-bucket-selector-aggregation.html -type BucketSelectorAggregation struct { - format string - gapPolicy string - script *Script - - subAggregations map[string]Aggregation - meta map[string]interface{} - bucketsPathsMap map[string]string -} - -// NewBucketSelectorAggregation creates and initializes a new BucketSelectorAggregation. -func NewBucketSelectorAggregation() *BucketSelectorAggregation { - return &BucketSelectorAggregation{ - subAggregations: make(map[string]Aggregation), - bucketsPathsMap: make(map[string]string), - } -} - -func (a *BucketSelectorAggregation) Format(format string) *BucketSelectorAggregation { - a.format = format - return a -} - -// GapPolicy defines what should be done when a gap in the series is discovered. -// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". -func (a *BucketSelectorAggregation) GapPolicy(gapPolicy string) *BucketSelectorAggregation { - a.gapPolicy = gapPolicy - return a -} - -// GapInsertZeros inserts zeros for gaps in the series. -func (a *BucketSelectorAggregation) GapInsertZeros() *BucketSelectorAggregation { - a.gapPolicy = "insert_zeros" - return a -} - -// GapSkip skips gaps in the series. -func (a *BucketSelectorAggregation) GapSkip() *BucketSelectorAggregation { - a.gapPolicy = "skip" - return a -} - -// Script is the script to run. -func (a *BucketSelectorAggregation) Script(script *Script) *BucketSelectorAggregation { - a.script = script - return a -} - -// SubAggregation adds a sub-aggregation to this aggregation. -func (a *BucketSelectorAggregation) SubAggregation(name string, subAggregation Aggregation) *BucketSelectorAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *BucketSelectorAggregation) Meta(metaData map[string]interface{}) *BucketSelectorAggregation { - a.meta = metaData - return a -} - -// BucketsPathsMap sets the paths to the buckets to use for this pipeline aggregator. -func (a *BucketSelectorAggregation) BucketsPathsMap(bucketsPathsMap map[string]string) *BucketSelectorAggregation { - a.bucketsPathsMap = bucketsPathsMap - return a -} - -// AddBucketsPath adds a bucket path to use for this pipeline aggregator. -func (a *BucketSelectorAggregation) AddBucketsPath(name, path string) *BucketSelectorAggregation { - if a.bucketsPathsMap == nil { - a.bucketsPathsMap = make(map[string]string) - } - a.bucketsPathsMap[name] = path - return a -} - -func (a *BucketSelectorAggregation) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["bucket_selector"] = params - - if a.format != "" { - params["format"] = a.format - } - if a.gapPolicy != "" { - params["gap_policy"] = a.gapPolicy - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - params["script"] = src - } - - // Add buckets paths - if len(a.bucketsPathsMap) > 0 { - params["buckets_path"] = a.bucketsPathsMap - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector_test.go deleted file mode 100644 index d4e0206de..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector_test.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestBucketSelectorAggregation(t *testing.T) { - agg := NewBucketSelectorAggregation(). - AddBucketsPath("totalSales", "total_sales"). - Script(NewScript("totalSales >= 1000")) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"bucket_selector":{"buckets_path":{"totalSales":"total_sales"},"script":"totalSales \u003e= 1000"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum.go deleted file mode 100644 index 018eb918f..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// CumulativeSumAggregation is a parent pipeline aggregation which calculates -// the cumulative sum of a specified metric in a parent histogram (or date_histogram) -// aggregation. The specified metric must be numeric and the enclosing -// histogram must have min_doc_count set to 0 (default for histogram aggregations). -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-cumulative-sum-aggregation.html -type CumulativeSumAggregation struct { - format string - - subAggregations map[string]Aggregation - meta map[string]interface{} - bucketsPaths []string -} - -// NewCumulativeSumAggregation creates and initializes a new CumulativeSumAggregation. -func NewCumulativeSumAggregation() *CumulativeSumAggregation { - return &CumulativeSumAggregation{ - subAggregations: make(map[string]Aggregation), - bucketsPaths: make([]string, 0), - } -} - -func (a *CumulativeSumAggregation) Format(format string) *CumulativeSumAggregation { - a.format = format - return a -} - -// SubAggregation adds a sub-aggregation to this aggregation. -func (a *CumulativeSumAggregation) SubAggregation(name string, subAggregation Aggregation) *CumulativeSumAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *CumulativeSumAggregation) Meta(metaData map[string]interface{}) *CumulativeSumAggregation { - a.meta = metaData - return a -} - -// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. -func (a *CumulativeSumAggregation) BucketsPath(bucketsPaths ...string) *CumulativeSumAggregation { - a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) - return a -} - -func (a *CumulativeSumAggregation) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["cumulative_sum"] = params - - if a.format != "" { - params["format"] = a.format - } - - // Add buckets paths - switch len(a.bucketsPaths) { - case 0: - case 1: - params["buckets_path"] = a.bucketsPaths[0] - default: - params["buckets_path"] = a.bucketsPaths - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum_test.go deleted file mode 100644 index a4023d84e..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum_test.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestCumulativeSumAggregation(t *testing.T) { - agg := NewCumulativeSumAggregation().BucketsPath("sales") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"cumulative_sum":{"buckets_path":"sales"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_derivative.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_derivative.go deleted file mode 100644 index 66611f46e..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_derivative.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// DerivativeAggregation is a parent pipeline aggregation which calculates -// the derivative of a specified metric in a parent histogram (or date_histogram) -// aggregation. The specified metric must be numeric and the enclosing -// histogram must have min_doc_count set to 0 (default for histogram aggregations). -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-derivative-aggregation.html -type DerivativeAggregation struct { - format string - gapPolicy string - unit string - - subAggregations map[string]Aggregation - meta map[string]interface{} - bucketsPaths []string -} - -// NewDerivativeAggregation creates and initializes a new DerivativeAggregation. -func NewDerivativeAggregation() *DerivativeAggregation { - return &DerivativeAggregation{ - subAggregations: make(map[string]Aggregation), - bucketsPaths: make([]string, 0), - } -} - -func (a *DerivativeAggregation) Format(format string) *DerivativeAggregation { - a.format = format - return a -} - -// GapPolicy defines what should be done when a gap in the series is discovered. -// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". -func (a *DerivativeAggregation) GapPolicy(gapPolicy string) *DerivativeAggregation { - a.gapPolicy = gapPolicy - return a -} - -// GapInsertZeros inserts zeros for gaps in the series. -func (a *DerivativeAggregation) GapInsertZeros() *DerivativeAggregation { - a.gapPolicy = "insert_zeros" - return a -} - -// GapSkip skips gaps in the series. -func (a *DerivativeAggregation) GapSkip() *DerivativeAggregation { - a.gapPolicy = "skip" - return a -} - -// Unit sets the unit provided, e.g. "1d" or "1y". -// It is only useful when calculating the derivative using a date_histogram. -func (a *DerivativeAggregation) Unit(unit string) *DerivativeAggregation { - a.unit = unit - return a -} - -// SubAggregation adds a sub-aggregation to this aggregation. -func (a *DerivativeAggregation) SubAggregation(name string, subAggregation Aggregation) *DerivativeAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *DerivativeAggregation) Meta(metaData map[string]interface{}) *DerivativeAggregation { - a.meta = metaData - return a -} - -// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. -func (a *DerivativeAggregation) BucketsPath(bucketsPaths ...string) *DerivativeAggregation { - a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) - return a -} - -func (a *DerivativeAggregation) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["derivative"] = params - - if a.format != "" { - params["format"] = a.format - } - if a.gapPolicy != "" { - params["gap_policy"] = a.gapPolicy - } - if a.unit != "" { - params["unit"] = a.unit - } - - // Add buckets paths - switch len(a.bucketsPaths) { - case 0: - case 1: - params["buckets_path"] = a.bucketsPaths[0] - default: - params["buckets_path"] = a.bucketsPaths - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_derivative_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_derivative_test.go deleted file mode 100644 index 1d2ec2d38..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_derivative_test.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestDerivativeAggregation(t *testing.T) { - agg := NewDerivativeAggregation().BucketsPath("sales") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"derivative":{"buckets_path":"sales"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_max_bucket.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_max_bucket.go deleted file mode 100644 index da6f9ef36..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_max_bucket.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// MaxBucketAggregation is a sibling pipeline aggregation which identifies -// the bucket(s) with the maximum value of a specified metric in a sibling -// aggregation and outputs both the value and the key(s) of the bucket(s). -// The specified metric must be numeric and the sibling aggregation must -// be a multi-bucket aggregation. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-max-bucket-aggregation.html -type MaxBucketAggregation struct { - format string - gapPolicy string - - subAggregations map[string]Aggregation - meta map[string]interface{} - bucketsPaths []string -} - -// NewMaxBucketAggregation creates and initializes a new MaxBucketAggregation. -func NewMaxBucketAggregation() *MaxBucketAggregation { - return &MaxBucketAggregation{ - subAggregations: make(map[string]Aggregation), - bucketsPaths: make([]string, 0), - } -} - -func (a *MaxBucketAggregation) Format(format string) *MaxBucketAggregation { - a.format = format - return a -} - -// GapPolicy defines what should be done when a gap in the series is discovered. -// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". -func (a *MaxBucketAggregation) GapPolicy(gapPolicy string) *MaxBucketAggregation { - a.gapPolicy = gapPolicy - return a -} - -// GapInsertZeros inserts zeros for gaps in the series. -func (a *MaxBucketAggregation) GapInsertZeros() *MaxBucketAggregation { - a.gapPolicy = "insert_zeros" - return a -} - -// GapSkip skips gaps in the series. -func (a *MaxBucketAggregation) GapSkip() *MaxBucketAggregation { - a.gapPolicy = "skip" - return a -} - -// SubAggregation adds a sub-aggregation to this aggregation. -func (a *MaxBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *MaxBucketAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *MaxBucketAggregation) Meta(metaData map[string]interface{}) *MaxBucketAggregation { - a.meta = metaData - return a -} - -// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. -func (a *MaxBucketAggregation) BucketsPath(bucketsPaths ...string) *MaxBucketAggregation { - a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) - return a -} - -func (a *MaxBucketAggregation) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["max_bucket"] = params - - if a.format != "" { - params["format"] = a.format - } - if a.gapPolicy != "" { - params["gap_policy"] = a.gapPolicy - } - - // Add buckets paths - switch len(a.bucketsPaths) { - case 0: - case 1: - params["buckets_path"] = a.bucketsPaths[0] - default: - params["buckets_path"] = a.bucketsPaths - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_max_bucket_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_max_bucket_test.go deleted file mode 100644 index 8bdde8fcd..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_max_bucket_test.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestMaxBucketAggregation(t *testing.T) { - agg := NewMaxBucketAggregation().BucketsPath("the_sum").GapPolicy("skip") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"max_bucket":{"buckets_path":"the_sum","gap_policy":"skip"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_min_bucket.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_min_bucket.go deleted file mode 100644 index 325f00f03..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_min_bucket.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// MinBucketAggregation is a sibling pipeline aggregation which identifies -// the bucket(s) with the maximum value of a specified metric in a sibling -// aggregation and outputs both the value and the key(s) of the bucket(s). -// The specified metric must be numeric and the sibling aggregation must -// be a multi-bucket aggregation. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-min-bucket-aggregation.html -type MinBucketAggregation struct { - format string - gapPolicy string - - subAggregations map[string]Aggregation - meta map[string]interface{} - bucketsPaths []string -} - -// NewMinBucketAggregation creates and initializes a new MinBucketAggregation. -func NewMinBucketAggregation() *MinBucketAggregation { - return &MinBucketAggregation{ - subAggregations: make(map[string]Aggregation), - bucketsPaths: make([]string, 0), - } -} - -func (a *MinBucketAggregation) Format(format string) *MinBucketAggregation { - a.format = format - return a -} - -// GapPolicy defines what should be done when a gap in the series is discovered. -// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". -func (a *MinBucketAggregation) GapPolicy(gapPolicy string) *MinBucketAggregation { - a.gapPolicy = gapPolicy - return a -} - -// GapInsertZeros inserts zeros for gaps in the series. -func (a *MinBucketAggregation) GapInsertZeros() *MinBucketAggregation { - a.gapPolicy = "insert_zeros" - return a -} - -// GapSkip skips gaps in the series. -func (a *MinBucketAggregation) GapSkip() *MinBucketAggregation { - a.gapPolicy = "skip" - return a -} - -// SubAggregation adds a sub-aggregation to this aggregation. -func (a *MinBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *MinBucketAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *MinBucketAggregation) Meta(metaData map[string]interface{}) *MinBucketAggregation { - a.meta = metaData - return a -} - -// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. -func (a *MinBucketAggregation) BucketsPath(bucketsPaths ...string) *MinBucketAggregation { - a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) - return a -} - -func (a *MinBucketAggregation) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["min_bucket"] = params - - if a.format != "" { - params["format"] = a.format - } - if a.gapPolicy != "" { - params["gap_policy"] = a.gapPolicy - } - - // Add buckets paths - switch len(a.bucketsPaths) { - case 0: - case 1: - params["buckets_path"] = a.bucketsPaths[0] - default: - params["buckets_path"] = a.bucketsPaths - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_min_bucket_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_min_bucket_test.go deleted file mode 100644 index 86fc9cd7f..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_min_bucket_test.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestMinBucketAggregation(t *testing.T) { - agg := NewMinBucketAggregation().BucketsPath("sales_per_month>sales").GapPolicy("skip") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"min_bucket":{"buckets_path":"sales_per_month\u003esales","gap_policy":"skip"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_mov_avg.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_mov_avg.go deleted file mode 100644 index 021144ddc..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_mov_avg.go +++ /dev/null @@ -1,393 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// MovAvgAggregation operates on a series of data. It will slide a window -// across the data and emit the average value of that window. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html -type MovAvgAggregation struct { - format string - gapPolicy string - model MovAvgModel - window *int - predict *int - minimize *bool - - subAggregations map[string]Aggregation - meta map[string]interface{} - bucketsPaths []string -} - -// NewMovAvgAggregation creates and initializes a new MovAvgAggregation. -func NewMovAvgAggregation() *MovAvgAggregation { - return &MovAvgAggregation{ - subAggregations: make(map[string]Aggregation), - bucketsPaths: make([]string, 0), - } -} - -func (a *MovAvgAggregation) Format(format string) *MovAvgAggregation { - a.format = format - return a -} - -// GapPolicy defines what should be done when a gap in the series is discovered. -// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". -func (a *MovAvgAggregation) GapPolicy(gapPolicy string) *MovAvgAggregation { - a.gapPolicy = gapPolicy - return a -} - -// GapInsertZeros inserts zeros for gaps in the series. -func (a *MovAvgAggregation) GapInsertZeros() *MovAvgAggregation { - a.gapPolicy = "insert_zeros" - return a -} - -// GapSkip skips gaps in the series. -func (a *MovAvgAggregation) GapSkip() *MovAvgAggregation { - a.gapPolicy = "skip" - return a -} - -// Model is used to define what type of moving average you want to use -// in the series. -func (a *MovAvgAggregation) Model(model MovAvgModel) *MovAvgAggregation { - a.model = model - return a -} - -// Window sets the window size for the moving average. This window will -// "slide" across the series, and the values inside that window will -// be used to calculate the moving avg value. -func (a *MovAvgAggregation) Window(window int) *MovAvgAggregation { - a.window = &window - return a -} - -// Predict sets the number of predictions that should be returned. -// Each prediction will be spaced at the intervals in the histogram. -// E.g. a predict of 2 will return two new buckets at the end of the -// histogram with the predicted values. -func (a *MovAvgAggregation) Predict(numPredictions int) *MovAvgAggregation { - a.predict = &numPredictions - return a -} - -// Minimize determines if the model should be fit to the data using a -// cost minimizing algorithm. -func (a *MovAvgAggregation) Minimize(minimize bool) *MovAvgAggregation { - a.minimize = &minimize - return a -} - -// SubAggregation adds a sub-aggregation to this aggregation. -func (a *MovAvgAggregation) SubAggregation(name string, subAggregation Aggregation) *MovAvgAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *MovAvgAggregation) Meta(metaData map[string]interface{}) *MovAvgAggregation { - a.meta = metaData - return a -} - -// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. -func (a *MovAvgAggregation) BucketsPath(bucketsPaths ...string) *MovAvgAggregation { - a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) - return a -} - -func (a *MovAvgAggregation) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["moving_avg"] = params - - if a.format != "" { - params["format"] = a.format - } - if a.gapPolicy != "" { - params["gap_policy"] = a.gapPolicy - } - if a.model != nil { - params["model"] = a.model.Name() - settings := a.model.Settings() - if len(settings) > 0 { - params["settings"] = settings - } - } - if a.window != nil { - params["window"] = *a.window - } - if a.predict != nil { - params["predict"] = *a.predict - } - if a.minimize != nil { - params["minimize"] = *a.minimize - } - - // Add buckets paths - switch len(a.bucketsPaths) { - case 0: - case 1: - params["buckets_path"] = a.bucketsPaths[0] - default: - params["buckets_path"] = a.bucketsPaths - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} - -// -- Models for moving averages -- -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_models - -// MovAvgModel specifies the model to use with the MovAvgAggregation. -type MovAvgModel interface { - Name() string - Settings() map[string]interface{} -} - -// -- EWMA -- - -// EWMAMovAvgModel calculates an exponentially weighted moving average. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_ewma_exponentially_weighted -type EWMAMovAvgModel struct { - alpha *float64 -} - -// NewEWMAMovAvgModel creates and initializes a new EWMAMovAvgModel. -func NewEWMAMovAvgModel() *EWMAMovAvgModel { - return &EWMAMovAvgModel{} -} - -// Alpha controls the smoothing of the data. Alpha = 1 retains no memory -// of past values (e.g. a random walk), while alpha = 0 retains infinite -// memory of past values (e.g. the series mean). Useful values are somewhere -// in between. Defaults to 0.5. -func (m *EWMAMovAvgModel) Alpha(alpha float64) *EWMAMovAvgModel { - m.alpha = &alpha - return m -} - -// Name of the model. -func (m *EWMAMovAvgModel) Name() string { - return "ewma" -} - -// Settings of the model. -func (m *EWMAMovAvgModel) Settings() map[string]interface{} { - settings := make(map[string]interface{}) - if m.alpha != nil { - settings["alpha"] = *m.alpha - } - return settings -} - -// -- Holt linear -- - -// HoltLinearMovAvgModel calculates a doubly exponential weighted moving average. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_holt_linear -type HoltLinearMovAvgModel struct { - alpha *float64 - beta *float64 -} - -// NewHoltLinearMovAvgModel creates and initializes a new HoltLinearMovAvgModel. -func NewHoltLinearMovAvgModel() *HoltLinearMovAvgModel { - return &HoltLinearMovAvgModel{} -} - -// Alpha controls the smoothing of the data. Alpha = 1 retains no memory -// of past values (e.g. a random walk), while alpha = 0 retains infinite -// memory of past values (e.g. the series mean). Useful values are somewhere -// in between. Defaults to 0.5. -func (m *HoltLinearMovAvgModel) Alpha(alpha float64) *HoltLinearMovAvgModel { - m.alpha = &alpha - return m -} - -// Beta is equivalent to Alpha but controls the smoothing of the trend -// instead of the data. -func (m *HoltLinearMovAvgModel) Beta(beta float64) *HoltLinearMovAvgModel { - m.beta = &beta - return m -} - -// Name of the model. -func (m *HoltLinearMovAvgModel) Name() string { - return "holt" -} - -// Settings of the model. -func (m *HoltLinearMovAvgModel) Settings() map[string]interface{} { - settings := make(map[string]interface{}) - if m.alpha != nil { - settings["alpha"] = *m.alpha - } - if m.beta != nil { - settings["beta"] = *m.beta - } - return settings -} - -// -- Holt Winters -- - -// HoltWintersMovAvgModel calculates a triple exponential weighted moving average. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_holt_winters -type HoltWintersMovAvgModel struct { - alpha *float64 - beta *float64 - gamma *float64 - period *int - seasonalityType string - pad *bool -} - -// NewHoltWintersMovAvgModel creates and initializes a new HoltWintersMovAvgModel. -func NewHoltWintersMovAvgModel() *HoltWintersMovAvgModel { - return &HoltWintersMovAvgModel{} -} - -// Alpha controls the smoothing of the data. Alpha = 1 retains no memory -// of past values (e.g. a random walk), while alpha = 0 retains infinite -// memory of past values (e.g. the series mean). Useful values are somewhere -// in between. Defaults to 0.5. -func (m *HoltWintersMovAvgModel) Alpha(alpha float64) *HoltWintersMovAvgModel { - m.alpha = &alpha - return m -} - -// Beta is equivalent to Alpha but controls the smoothing of the trend -// instead of the data. -func (m *HoltWintersMovAvgModel) Beta(beta float64) *HoltWintersMovAvgModel { - m.beta = &beta - return m -} - -func (m *HoltWintersMovAvgModel) Gamma(gamma float64) *HoltWintersMovAvgModel { - m.gamma = &gamma - return m -} - -func (m *HoltWintersMovAvgModel) Period(period int) *HoltWintersMovAvgModel { - m.period = &period - return m -} - -func (m *HoltWintersMovAvgModel) SeasonalityType(typ string) *HoltWintersMovAvgModel { - m.seasonalityType = typ - return m -} - -func (m *HoltWintersMovAvgModel) Pad(pad bool) *HoltWintersMovAvgModel { - m.pad = &pad - return m -} - -// Name of the model. -func (m *HoltWintersMovAvgModel) Name() string { - return "holt_winters" -} - -// Settings of the model. -func (m *HoltWintersMovAvgModel) Settings() map[string]interface{} { - settings := make(map[string]interface{}) - if m.alpha != nil { - settings["alpha"] = *m.alpha - } - if m.beta != nil { - settings["beta"] = *m.beta - } - if m.gamma != nil { - settings["gamma"] = *m.gamma - } - if m.period != nil { - settings["period"] = *m.period - } - if m.pad != nil { - settings["pad"] = *m.pad - } - if m.seasonalityType != "" { - settings["type"] = m.seasonalityType - } - return settings -} - -// -- Linear -- - -// LinearMovAvgModel calculates a linearly weighted moving average, such -// that older values are linearly less important. "Time" is determined -// by position in collection. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_linear -type LinearMovAvgModel struct { -} - -// NewLinearMovAvgModel creates and initializes a new LinearMovAvgModel. -func NewLinearMovAvgModel() *LinearMovAvgModel { - return &LinearMovAvgModel{} -} - -// Name of the model. -func (m *LinearMovAvgModel) Name() string { - return "linear" -} - -// Settings of the model. -func (m *LinearMovAvgModel) Settings() map[string]interface{} { - return nil -} - -// -- Simple -- - -// SimpleMovAvgModel calculates a simple unweighted (arithmetic) moving average. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_simple -type SimpleMovAvgModel struct { -} - -// NewSimpleMovAvgModel creates and initializes a new SimpleMovAvgModel. -func NewSimpleMovAvgModel() *SimpleMovAvgModel { - return &SimpleMovAvgModel{} -} - -// Name of the model. -func (m *SimpleMovAvgModel) Name() string { - return "simple" -} - -// Settings of the model. -func (m *SimpleMovAvgModel) Settings() map[string]interface{} { - return nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_mov_avg_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_mov_avg_test.go deleted file mode 100644 index e17c1c0a0..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_mov_avg_test.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestMovAvgAggregation(t *testing.T) { - agg := NewMovAvgAggregation().BucketsPath("the_sum") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"moving_avg":{"buckets_path":"the_sum"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestMovAvgAggregationWithSimpleModel(t *testing.T) { - agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30).Model(NewSimpleMovAvgModel()) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"moving_avg":{"buckets_path":"the_sum","model":"simple","window":30}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestMovAvgAggregationWithLinearModel(t *testing.T) { - agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30).Model(NewLinearMovAvgModel()) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"moving_avg":{"buckets_path":"the_sum","model":"linear","window":30}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestMovAvgAggregationWithEWMAModel(t *testing.T) { - agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30).Model(NewEWMAMovAvgModel().Alpha(0.5)) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"moving_avg":{"buckets_path":"the_sum","model":"ewma","settings":{"alpha":0.5},"window":30}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestMovAvgAggregationWithHoltLinearModel(t *testing.T) { - agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30). - Model(NewHoltLinearMovAvgModel().Alpha(0.5).Beta(0.4)) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"moving_avg":{"buckets_path":"the_sum","model":"holt","settings":{"alpha":0.5,"beta":0.4},"window":30}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestMovAvgAggregationWithHoltWintersModel(t *testing.T) { - agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30).Predict(10).Minimize(true). - Model(NewHoltWintersMovAvgModel().Alpha(0.5).Beta(0.4).Gamma(0.3).Period(7).Pad(true)) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"moving_avg":{"buckets_path":"the_sum","minimize":true,"model":"holt_winters","predict":10,"settings":{"alpha":0.5,"beta":0.4,"gamma":0.3,"pad":true,"period":7},"window":30}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestMovAvgAggregationWithSubAggs(t *testing.T) { - agg := NewMovAvgAggregation().BucketsPath("the_sum") - agg = agg.SubAggregation("avg_sum", NewAvgAggregation().Field("height")) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"aggregations":{"avg_sum":{"avg":{"field":"height"}}},"moving_avg":{"buckets_path":"the_sum"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_serial_diff.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_serial_diff.go deleted file mode 100644 index db81d3cf4..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_serial_diff.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// SerialDiffAggregation implements serial differencing. -// Serial differencing is a technique where values in a time series are -// subtracted from itself at different time lags or periods. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-serialdiff-aggregation.html -type SerialDiffAggregation struct { - format string - gapPolicy string - lag *int - - subAggregations map[string]Aggregation - meta map[string]interface{} - bucketsPaths []string -} - -// NewSerialDiffAggregation creates and initializes a new SerialDiffAggregation. -func NewSerialDiffAggregation() *SerialDiffAggregation { - return &SerialDiffAggregation{ - subAggregations: make(map[string]Aggregation), - bucketsPaths: make([]string, 0), - } -} - -func (a *SerialDiffAggregation) Format(format string) *SerialDiffAggregation { - a.format = format - return a -} - -// GapPolicy defines what should be done when a gap in the series is discovered. -// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". -func (a *SerialDiffAggregation) GapPolicy(gapPolicy string) *SerialDiffAggregation { - a.gapPolicy = gapPolicy - return a -} - -// GapInsertZeros inserts zeros for gaps in the series. -func (a *SerialDiffAggregation) GapInsertZeros() *SerialDiffAggregation { - a.gapPolicy = "insert_zeros" - return a -} - -// GapSkip skips gaps in the series. -func (a *SerialDiffAggregation) GapSkip() *SerialDiffAggregation { - a.gapPolicy = "skip" - return a -} - -// Lag specifies the historical bucket to subtract from the current value. -// E.g. a lag of 7 will subtract the current value from the value 7 buckets -// ago. Lag must be a positive, non-zero integer. -func (a *SerialDiffAggregation) Lag(lag int) *SerialDiffAggregation { - a.lag = &lag - return a -} - -// SubAggregation adds a sub-aggregation to this aggregation. -func (a *SerialDiffAggregation) SubAggregation(name string, subAggregation Aggregation) *SerialDiffAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *SerialDiffAggregation) Meta(metaData map[string]interface{}) *SerialDiffAggregation { - a.meta = metaData - return a -} - -// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. -func (a *SerialDiffAggregation) BucketsPath(bucketsPaths ...string) *SerialDiffAggregation { - a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) - return a -} - -func (a *SerialDiffAggregation) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["serial_diff"] = params - - if a.format != "" { - params["format"] = a.format - } - if a.gapPolicy != "" { - params["gap_policy"] = a.gapPolicy - } - if a.lag != nil { - params["lag"] = *a.lag - } - - // Add buckets paths - switch len(a.bucketsPaths) { - case 0: - case 1: - params["buckets_path"] = a.bucketsPaths[0] - default: - params["buckets_path"] = a.bucketsPaths - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_serial_diff_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_serial_diff_test.go deleted file mode 100644 index 17e512c5d..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_serial_diff_test.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestSerialDiffAggregation(t *testing.T) { - agg := NewSerialDiffAggregation().BucketsPath("the_sum").Lag(7) - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"serial_diff":{"buckets_path":"the_sum","lag":7}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket.go deleted file mode 100644 index 16ef64986..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// SumBucketAggregation is a sibling pipeline aggregation which calculates -// the sum across all buckets of a specified metric in a sibling aggregation. -// The specified metric must be numeric and the sibling aggregation must -// be a multi-bucket aggregation. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-sum-bucket-aggregation.html -type SumBucketAggregation struct { - format string - gapPolicy string - - subAggregations map[string]Aggregation - meta map[string]interface{} - bucketsPaths []string -} - -// NewSumBucketAggregation creates and initializes a new SumBucketAggregation. -func NewSumBucketAggregation() *SumBucketAggregation { - return &SumBucketAggregation{ - subAggregations: make(map[string]Aggregation), - bucketsPaths: make([]string, 0), - } -} - -func (a *SumBucketAggregation) Format(format string) *SumBucketAggregation { - a.format = format - return a -} - -// GapPolicy defines what should be done when a gap in the series is discovered. -// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". -func (a *SumBucketAggregation) GapPolicy(gapPolicy string) *SumBucketAggregation { - a.gapPolicy = gapPolicy - return a -} - -// GapInsertZeros inserts zeros for gaps in the series. -func (a *SumBucketAggregation) GapInsertZeros() *SumBucketAggregation { - a.gapPolicy = "insert_zeros" - return a -} - -// GapSkip skips gaps in the series. -func (a *SumBucketAggregation) GapSkip() *SumBucketAggregation { - a.gapPolicy = "skip" - return a -} - -// SubAggregation adds a sub-aggregation to this aggregation. -func (a *SumBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *SumBucketAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *SumBucketAggregation) Meta(metaData map[string]interface{}) *SumBucketAggregation { - a.meta = metaData - return a -} - -// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. -func (a *SumBucketAggregation) BucketsPath(bucketsPaths ...string) *SumBucketAggregation { - a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) - return a -} - -func (a *SumBucketAggregation) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["sum_bucket"] = params - - if a.format != "" { - params["format"] = a.format - } - if a.gapPolicy != "" { - params["gap_policy"] = a.gapPolicy - } - - // Add buckets paths - switch len(a.bucketsPaths) { - case 0: - case 1: - params["buckets_path"] = a.bucketsPaths[0] - default: - params["buckets_path"] = a.bucketsPaths - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket_test.go deleted file mode 100644 index a1c84026d..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket_test.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestSumBucketAggregation(t *testing.T) { - agg := NewSumBucketAggregation().BucketsPath("the_sum") - src, err := agg.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"sum_bucket":{"buckets_path":"the_sum"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_test.go deleted file mode 100644 index be6bbfc87..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_pipeline_test.go +++ /dev/null @@ -1,1000 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "testing" - -func TestAggsIntegrationAvgBucket(t *testing.T) { - //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) - client := setupTestClientAndCreateIndexAndAddDocs(t) - - esversion, err := client.ElasticsearchVersion(DefaultURL) - if err != nil { - t.Fatal(err) - } - - if esversion < "2.0" { - t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) - return - } - - // Match all should return all documents - builder := client.Search(). - Index(testIndexName). - Type("order"). - Query(NewMatchAllQuery()). - Pretty(true) - h := NewDateHistogramAggregation().Field("time").Interval("month") - h = h.SubAggregation("sales", NewSumAggregation().Field("price")) - builder = builder.Aggregation("sales_per_month", h) - builder = builder.Aggregation("avg_monthly_sales", NewAvgBucketAggregation().BucketsPath("sales_per_month>sales")) - - res, err := builder.Do() - if err != nil { - t.Fatal(err) - } - if res.Hits == nil { - t.Errorf("expected Hits != nil; got: nil") - } - - aggs := res.Aggregations - if aggs == nil { - t.Fatal("expected aggregations != nil; got: nil") - } - - agg, found := aggs.AvgBucket("avg_monthly_sales") - if !found { - t.Fatal("expected avg_monthly_sales aggregation") - } - if agg == nil { - t.Fatal("expected avg_monthly_sales aggregation") - } - if agg.Value == nil { - t.Fatal("expected avg_monthly_sales.value != nil") - } - if got, want := *agg.Value, float64(939.2); got != want { - t.Fatalf("expected avg_monthly_sales.value=%v; got: %v", want, got) - } -} - -func TestAggsIntegrationDerivative(t *testing.T) { - //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) - client := setupTestClientAndCreateIndexAndAddDocs(t) - - esversion, err := client.ElasticsearchVersion(DefaultURL) - if err != nil { - t.Fatal(err) - } - - if esversion < "2.0" { - t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) - return - } - - // Match all should return all documents - builder := client.Search(). - Index(testIndexName). - Type("order"). - Query(NewMatchAllQuery()). - Pretty(true) - h := NewDateHistogramAggregation().Field("time").Interval("month") - h = h.SubAggregation("sales", NewSumAggregation().Field("price")) - h = h.SubAggregation("sales_deriv", NewDerivativeAggregation().BucketsPath("sales")) - builder = builder.Aggregation("sales_per_month", h) - - res, err := builder.Do() - if err != nil { - t.Fatal(err) - } - if res.Hits == nil { - t.Errorf("expected Hits != nil; got: nil") - } - - aggs := res.Aggregations - if aggs == nil { - t.Fatal("expected aggregations != nil; got: nil") - } - - agg, found := aggs.DateHistogram("sales_per_month") - if !found { - t.Fatal("expected sales_per_month aggregation") - } - if agg == nil { - t.Fatal("expected sales_per_month aggregation") - } - if got, want := len(agg.Buckets), 6; got != want { - t.Fatalf("expected %d buckets; got: %d", want, got) - } - - if got, want := agg.Buckets[0].DocCount, int64(1); got != want { - t.Fatalf("expected DocCount=%d; got: %d", want, got) - } - if got, want := agg.Buckets[1].DocCount, int64(0); got != want { - t.Fatalf("expected DocCount=%d; got: %d", want, got) - } - if got, want := agg.Buckets[2].DocCount, int64(1); got != want { - t.Fatalf("expected DocCount=%d; got: %d", want, got) - } - if got, want := agg.Buckets[3].DocCount, int64(3); got != want { - t.Fatalf("expected DocCount=%d; got: %d", want, got) - } - if got, want := agg.Buckets[4].DocCount, int64(1); got != want { - t.Fatalf("expected DocCount=%d; got: %d", want, got) - } - if got, want := agg.Buckets[5].DocCount, int64(2); got != want { - t.Fatalf("expected DocCount=%d; got: %d", want, got) - } - - d, found := agg.Buckets[0].Derivative("sales_deriv") - if found { - t.Fatal("expected no sales_deriv aggregation") - } - if d != nil { - t.Fatal("expected no sales_deriv aggregation") - } - - d, found = agg.Buckets[1].Derivative("sales_deriv") - if !found { - t.Fatal("expected sales_deriv aggregation") - } - if d == nil { - t.Fatal("expected sales_deriv aggregation") - } - if d.Value != nil { - t.Fatal("expected sales_deriv value == nil") - } - - d, found = agg.Buckets[2].Derivative("sales_deriv") - if !found { - t.Fatal("expected sales_deriv aggregation") - } - if d == nil { - t.Fatal("expected sales_deriv aggregation") - } - if d.Value != nil { - t.Fatal("expected sales_deriv value == nil") - } - - d, found = agg.Buckets[3].Derivative("sales_deriv") - if !found { - t.Fatal("expected sales_deriv aggregation") - } - if d == nil { - t.Fatal("expected sales_deriv aggregation") - } - if d.Value == nil { - t.Fatal("expected sales_deriv value != nil") - } - if got, want := *d.Value, float64(2348.0); got != want { - t.Fatalf("expected sales_deriv.value=%v; got: %v", want, got) - } - - d, found = agg.Buckets[4].Derivative("sales_deriv") - if !found { - t.Fatal("expected sales_deriv aggregation") - } - if d == nil { - t.Fatal("expected sales_deriv aggregation") - } - if d.Value == nil { - t.Fatal("expected sales_deriv value != nil") - } - if got, want := *d.Value, float64(-1658.0); got != want { - t.Fatalf("expected sales_deriv.value=%v; got: %v", want, got) - } - - d, found = agg.Buckets[5].Derivative("sales_deriv") - if !found { - t.Fatal("expected sales_deriv aggregation") - } - if d == nil { - t.Fatal("expected sales_deriv aggregation") - } - if d.Value == nil { - t.Fatal("expected sales_deriv value != nil") - } - if got, want := *d.Value, float64(-722.0); got != want { - t.Fatalf("expected sales_deriv.value=%v; got: %v", want, got) - } -} - -func TestAggsIntegrationMaxBucket(t *testing.T) { - //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) - client := setupTestClientAndCreateIndexAndAddDocs(t) - - esversion, err := client.ElasticsearchVersion(DefaultURL) - if err != nil { - t.Fatal(err) - } - - if esversion < "2.0" { - t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) - return - } - - // Match all should return all documents - builder := client.Search(). - Index(testIndexName). - Type("order"). - Query(NewMatchAllQuery()). - Pretty(true) - h := NewDateHistogramAggregation().Field("time").Interval("month") - h = h.SubAggregation("sales", NewSumAggregation().Field("price")) - builder = builder.Aggregation("sales_per_month", h) - builder = builder.Aggregation("max_monthly_sales", NewMaxBucketAggregation().BucketsPath("sales_per_month>sales")) - - res, err := builder.Do() - if err != nil { - t.Fatal(err) - } - if res.Hits == nil { - t.Errorf("expected Hits != nil; got: nil") - } - - aggs := res.Aggregations - if aggs == nil { - t.Fatal("expected aggregations != nil; got: nil") - } - - agg, found := aggs.MaxBucket("max_monthly_sales") - if !found { - t.Fatal("expected max_monthly_sales aggregation") - } - if agg == nil { - t.Fatal("expected max_monthly_sales aggregation") - } - if got, want := len(agg.Keys), 1; got != want { - t.Fatalf("expected len(max_monthly_sales.keys)=%d; got: %d", want, got) - } - if got, want := agg.Keys[0], "2015-04-01"; got != want { - t.Fatalf("expected max_monthly_sales.keys[0]=%v; got: %v", want, got) - } - if agg.Value == nil { - t.Fatal("expected max_monthly_sales.value != nil") - } - if got, want := *agg.Value, float64(2448); got != want { - t.Fatalf("expected max_monthly_sales.value=%v; got: %v", want, got) - } -} - -func TestAggsIntegrationMinBucket(t *testing.T) { - //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) - client := setupTestClientAndCreateIndexAndAddDocs(t) - - esversion, err := client.ElasticsearchVersion(DefaultURL) - if err != nil { - t.Fatal(err) - } - - if esversion < "2.0" { - t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) - return - } - - // Match all should return all documents - builder := client.Search(). - Index(testIndexName). - Type("order"). - Query(NewMatchAllQuery()). - Pretty(true) - h := NewDateHistogramAggregation().Field("time").Interval("month") - h = h.SubAggregation("sales", NewSumAggregation().Field("price")) - builder = builder.Aggregation("sales_per_month", h) - builder = builder.Aggregation("min_monthly_sales", NewMinBucketAggregation().BucketsPath("sales_per_month>sales")) - - res, err := builder.Do() - if err != nil { - t.Fatal(err) - } - if res.Hits == nil { - t.Errorf("expected Hits != nil; got: nil") - } - - aggs := res.Aggregations - if aggs == nil { - t.Fatal("expected aggregations != nil; got: nil") - } - - agg, found := aggs.MinBucket("min_monthly_sales") - if !found { - t.Fatal("expected min_monthly_sales aggregation") - } - if agg == nil { - t.Fatal("expected min_monthly_sales aggregation") - } - if got, want := len(agg.Keys), 1; got != want { - t.Fatalf("expected len(min_monthly_sales.keys)=%d; got: %d", want, got) - } - if got, want := agg.Keys[0], "2015-06-01"; got != want { - t.Fatalf("expected min_monthly_sales.keys[0]=%v; got: %v", want, got) - } - if agg.Value == nil { - t.Fatal("expected min_monthly_sales.value != nil") - } - if got, want := *agg.Value, float64(68); got != want { - t.Fatalf("expected min_monthly_sales.value=%v; got: %v", want, got) - } -} - -func TestAggsIntegrationSumBucket(t *testing.T) { - //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) - client := setupTestClientAndCreateIndexAndAddDocs(t) - - esversion, err := client.ElasticsearchVersion(DefaultURL) - if err != nil { - t.Fatal(err) - } - - if esversion < "2.0" { - t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) - return - } - - // Match all should return all documents - builder := client.Search(). - Index(testIndexName). - Type("order"). - Query(NewMatchAllQuery()). - Pretty(true) - h := NewDateHistogramAggregation().Field("time").Interval("month") - h = h.SubAggregation("sales", NewSumAggregation().Field("price")) - builder = builder.Aggregation("sales_per_month", h) - builder = builder.Aggregation("sum_monthly_sales", NewSumBucketAggregation().BucketsPath("sales_per_month>sales")) - - res, err := builder.Do() - if err != nil { - t.Fatal(err) - } - if res.Hits == nil { - t.Errorf("expected Hits != nil; got: nil") - } - - aggs := res.Aggregations - if aggs == nil { - t.Fatal("expected aggregations != nil; got: nil") - } - - agg, found := aggs.SumBucket("sum_monthly_sales") - if !found { - t.Fatal("expected sum_monthly_sales aggregation") - } - if agg == nil { - t.Fatal("expected sum_monthly_sales aggregation") - } - if agg.Value == nil { - t.Fatal("expected sum_monthly_sales.value != nil") - } - if got, want := *agg.Value, float64(4696.0); got != want { - t.Fatalf("expected sum_monthly_sales.value=%v; got: %v", want, got) - } -} - -func TestAggsIntegrationMovAvg(t *testing.T) { - //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) - client := setupTestClientAndCreateIndexAndAddDocs(t) - - esversion, err := client.ElasticsearchVersion(DefaultURL) - if err != nil { - t.Fatal(err) - } - - if esversion < "2.0" { - t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) - return - } - - // Match all should return all documents - builder := client.Search(). - Index(testIndexName). - Type("order"). - Query(NewMatchAllQuery()). - Pretty(true) - h := NewDateHistogramAggregation().Field("time").Interval("month") - h = h.SubAggregation("the_sum", NewSumAggregation().Field("price")) - h = h.SubAggregation("the_movavg", NewMovAvgAggregation().BucketsPath("the_sum")) - builder = builder.Aggregation("my_date_histo", h) - - res, err := builder.Do() - if err != nil { - t.Fatal(err) - } - if res.Hits == nil { - t.Errorf("expected Hits != nil; got: nil") - } - - aggs := res.Aggregations - if aggs == nil { - t.Fatal("expected aggregations != nil; got: nil") - } - - agg, found := aggs.DateHistogram("my_date_histo") - if !found { - t.Fatal("expected sum_monthly_sales aggregation") - } - if agg == nil { - t.Fatal("expected sum_monthly_sales aggregation") - } - if got, want := len(agg.Buckets), 6; got != want { - t.Fatalf("expected %d buckets; got: %d", want, got) - } - - d, found := agg.Buckets[0].MovAvg("the_movavg") - if found { - t.Fatal("expected no the_movavg aggregation") - } - if d != nil { - t.Fatal("expected no the_movavg aggregation") - } - - d, found = agg.Buckets[1].MovAvg("the_movavg") - if found { - t.Fatal("expected no the_movavg aggregation") - } - if d != nil { - t.Fatal("expected no the_movavg aggregation") - } - - d, found = agg.Buckets[2].MovAvg("the_movavg") - if !found { - t.Fatal("expected the_movavg aggregation") - } - if d == nil { - t.Fatal("expected the_movavg aggregation") - } - if d.Value == nil { - t.Fatal("expected the_movavg value") - } - if got, want := *d.Value, float64(1290.0); got != want { - t.Fatalf("expected %v buckets; got: %v", want, got) - } - - d, found = agg.Buckets[3].MovAvg("the_movavg") - if !found { - t.Fatal("expected the_movavg aggregation") - } - if d == nil { - t.Fatal("expected the_movavg aggregation") - } - if d.Value == nil { - t.Fatal("expected the_movavg value") - } - if got, want := *d.Value, float64(695.0); got != want { - t.Fatalf("expected %v buckets; got: %v", want, got) - } - - d, found = agg.Buckets[4].MovAvg("the_movavg") - if !found { - t.Fatal("expected the_movavg aggregation") - } - if d == nil { - t.Fatal("expected the_movavg aggregation") - } - if d.Value == nil { - t.Fatal("expected the_movavg value") - } - if got, want := *d.Value, float64(1279.3333333333333); got != want { - t.Fatalf("expected %v buckets; got: %v", want, got) - } - - d, found = agg.Buckets[5].MovAvg("the_movavg") - if !found { - t.Fatal("expected the_movavg aggregation") - } - if d == nil { - t.Fatal("expected the_movavg aggregation") - } - if d.Value == nil { - t.Fatal("expected the_movavg value") - } - if got, want := *d.Value, float64(1157.0); got != want { - t.Fatalf("expected %v buckets; got: %v", want, got) - } -} - -func TestAggsIntegrationCumulativeSum(t *testing.T) { - //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) - client := setupTestClientAndCreateIndexAndAddDocs(t) - - esversion, err := client.ElasticsearchVersion(DefaultURL) - if err != nil { - t.Fatal(err) - } - - if esversion < "2.0" { - t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) - return - } - - // Match all should return all documents - builder := client.Search(). - Index(testIndexName). - Type("order"). - Query(NewMatchAllQuery()). - Pretty(true) - h := NewDateHistogramAggregation().Field("time").Interval("month") - h = h.SubAggregation("sales", NewSumAggregation().Field("price")) - h = h.SubAggregation("cumulative_sales", NewCumulativeSumAggregation().BucketsPath("sales")) - builder = builder.Aggregation("sales_per_month", h) - - res, err := builder.Do() - if err != nil { - t.Fatal(err) - } - if res.Hits == nil { - t.Errorf("expected Hits != nil; got: nil") - } - - aggs := res.Aggregations - if aggs == nil { - t.Fatal("expected aggregations != nil; got: nil") - } - - agg, found := aggs.DateHistogram("sales_per_month") - if !found { - t.Fatal("expected sales_per_month aggregation") - } - if agg == nil { - t.Fatal("expected sales_per_month aggregation") - } - if got, want := len(agg.Buckets), 6; got != want { - t.Fatalf("expected %d buckets; got: %d", want, got) - } - - if got, want := agg.Buckets[0].DocCount, int64(1); got != want { - t.Fatalf("expected DocCount=%d; got: %d", want, got) - } - if got, want := agg.Buckets[1].DocCount, int64(0); got != want { - t.Fatalf("expected DocCount=%d; got: %d", want, got) - } - if got, want := agg.Buckets[2].DocCount, int64(1); got != want { - t.Fatalf("expected DocCount=%d; got: %d", want, got) - } - if got, want := agg.Buckets[3].DocCount, int64(3); got != want { - t.Fatalf("expected DocCount=%d; got: %d", want, got) - } - if got, want := agg.Buckets[4].DocCount, int64(1); got != want { - t.Fatalf("expected DocCount=%d; got: %d", want, got) - } - if got, want := agg.Buckets[5].DocCount, int64(2); got != want { - t.Fatalf("expected DocCount=%d; got: %d", want, got) - } - - d, found := agg.Buckets[0].CumulativeSum("cumulative_sales") - if !found { - t.Fatal("expected cumulative_sales aggregation") - } - if d == nil { - t.Fatal("expected cumulative_sales aggregation") - } - if d.Value == nil { - t.Fatal("expected cumulative_sales value != nil") - } - if got, want := *d.Value, float64(1290.0); got != want { - t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got) - } - - d, found = agg.Buckets[1].CumulativeSum("cumulative_sales") - if !found { - t.Fatal("expected cumulative_sales aggregation") - } - if d == nil { - t.Fatal("expected cumulative_sales aggregation") - } - if d.Value == nil { - t.Fatal("expected cumulative_sales value != nil") - } - if got, want := *d.Value, float64(1290.0); got != want { - t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got) - } - - d, found = agg.Buckets[2].CumulativeSum("cumulative_sales") - if !found { - t.Fatal("expected cumulative_sales aggregation") - } - if d == nil { - t.Fatal("expected cumulative_sales aggregation") - } - if d.Value == nil { - t.Fatal("expected cumulative_sales value != nil") - } - if got, want := *d.Value, float64(1390.0); got != want { - t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got) - } - - d, found = agg.Buckets[3].CumulativeSum("cumulative_sales") - if !found { - t.Fatal("expected cumulative_sales aggregation") - } - if d == nil { - t.Fatal("expected cumulative_sales aggregation") - } - if d.Value == nil { - t.Fatal("expected cumulative_sales value != nil") - } - if got, want := *d.Value, float64(3838.0); got != want { - t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got) - } - - d, found = agg.Buckets[4].CumulativeSum("cumulative_sales") - if !found { - t.Fatal("expected cumulative_sales aggregation") - } - if d == nil { - t.Fatal("expected cumulative_sales aggregation") - } - if d.Value == nil { - t.Fatal("expected cumulative_sales value != nil") - } - if got, want := *d.Value, float64(4628.0); got != want { - t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got) - } - - d, found = agg.Buckets[5].CumulativeSum("cumulative_sales") - if !found { - t.Fatal("expected cumulative_sales aggregation") - } - if d == nil { - t.Fatal("expected cumulative_sales aggregation") - } - if d.Value == nil { - t.Fatal("expected cumulative_sales value != nil") - } - if got, want := *d.Value, float64(4696.0); got != want { - t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got) - } -} - -func TestAggsIntegrationBucketScript(t *testing.T) { - //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) - client := setupTestClientAndCreateIndexAndAddDocs(t) - - esversion, err := client.ElasticsearchVersion(DefaultURL) - if err != nil { - t.Fatal(err) - } - - if esversion < "2.0" { - t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) - return - } - - // Match all should return all documents - builder := client.Search(). - Index(testIndexName). - Type("order"). - Query(NewMatchAllQuery()). - Pretty(true) - h := NewDateHistogramAggregation().Field("time").Interval("month") - h = h.SubAggregation("total_sales", NewSumAggregation().Field("price")) - appleFilter := NewFilterAggregation().Filter(NewTermQuery("manufacturer", "Apple")) - appleFilter = appleFilter.SubAggregation("sales", NewSumAggregation().Field("price")) - h = h.SubAggregation("apple_sales", appleFilter) - h = h.SubAggregation("apple_percentage", - NewBucketScriptAggregation(). - GapPolicy("insert_zeros"). - AddBucketsPath("appleSales", "apple_sales>sales"). - AddBucketsPath("totalSales", "total_sales"). - Script(NewScript("appleSales / totalSales * 100"))) - builder = builder.Aggregation("sales_per_month", h) - - res, err := builder.Do() - if err != nil { - t.Fatalf("%v (maybe scripting is disabled?)", err) - } - if res.Hits == nil { - t.Errorf("expected Hits != nil; got: nil") - } - - aggs := res.Aggregations - if aggs == nil { - t.Fatal("expected aggregations != nil; got: nil") - } - - agg, found := aggs.DateHistogram("sales_per_month") - if !found { - t.Fatal("expected sales_per_month aggregation") - } - if agg == nil { - t.Fatal("expected sales_per_month aggregation") - } - if got, want := len(agg.Buckets), 6; got != want { - t.Fatalf("expected %d buckets; got: %d", want, got) - } - - if got, want := agg.Buckets[0].DocCount, int64(1); got != want { - t.Fatalf("expected DocCount=%d; got: %d", want, got) - } - if got, want := agg.Buckets[1].DocCount, int64(0); got != want { - t.Fatalf("expected DocCount=%d; got: %d", want, got) - } - if got, want := agg.Buckets[2].DocCount, int64(1); got != want { - t.Fatalf("expected DocCount=%d; got: %d", want, got) - } - if got, want := agg.Buckets[3].DocCount, int64(3); got != want { - t.Fatalf("expected DocCount=%d; got: %d", want, got) - } - if got, want := agg.Buckets[4].DocCount, int64(1); got != want { - t.Fatalf("expected DocCount=%d; got: %d", want, got) - } - if got, want := agg.Buckets[5].DocCount, int64(2); got != want { - t.Fatalf("expected DocCount=%d; got: %d", want, got) - } - - d, found := agg.Buckets[0].BucketScript("apple_percentage") - if !found { - t.Fatal("expected apple_percentage aggregation") - } - if d == nil { - t.Fatal("expected apple_percentage aggregation") - } - if d.Value == nil { - t.Fatal("expected apple_percentage value != nil") - } - if got, want := *d.Value, float64(100.0); got != want { - t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got) - } - - d, found = agg.Buckets[1].BucketScript("apple_percentage") - if !found { - t.Fatal("expected apple_percentage aggregation") - } - if d == nil { - t.Fatal("expected apple_percentage aggregation") - } - if d.Value != nil { - t.Fatal("expected apple_percentage value == nil") - } - - d, found = agg.Buckets[2].BucketScript("apple_percentage") - if !found { - t.Fatal("expected apple_percentage aggregation") - } - if d == nil { - t.Fatal("expected apple_percentage aggregation") - } - if d.Value == nil { - t.Fatal("expected apple_percentage value != nil") - } - if got, want := *d.Value, float64(0.0); got != want { - t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got) - } - - d, found = agg.Buckets[3].BucketScript("apple_percentage") - if !found { - t.Fatal("expected apple_percentage aggregation") - } - if d == nil { - t.Fatal("expected apple_percentage aggregation") - } - if d.Value == nil { - t.Fatal("expected apple_percentage value != nil") - } - if got, want := *d.Value, float64(34.64052287581699); got != want { - t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got) - } - - d, found = agg.Buckets[4].BucketScript("apple_percentage") - if !found { - t.Fatal("expected apple_percentage aggregation") - } - if d == nil { - t.Fatal("expected apple_percentage aggregation") - } - if d.Value == nil { - t.Fatal("expected apple_percentage value != nil") - } - if got, want := *d.Value, float64(0.0); got != want { - t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got) - } - - d, found = agg.Buckets[5].BucketScript("apple_percentage") - if !found { - t.Fatal("expected apple_percentage aggregation") - } - if d == nil { - t.Fatal("expected apple_percentage aggregation") - } - if d.Value == nil { - t.Fatal("expected apple_percentage value != nil") - } - if got, want := *d.Value, float64(0.0); got != want { - t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got) - } -} - -func TestAggsIntegrationBucketSelector(t *testing.T) { - //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) - client := setupTestClientAndCreateIndexAndAddDocs(t) - - esversion, err := client.ElasticsearchVersion(DefaultURL) - if err != nil { - t.Fatal(err) - } - - if esversion < "2.0" { - t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) - return - } - - // Match all should return all documents - builder := client.Search(). - Index(testIndexName). - Type("order"). - Query(NewMatchAllQuery()). - Pretty(true) - h := NewDateHistogramAggregation().Field("time").Interval("month") - h = h.SubAggregation("total_sales", NewSumAggregation().Field("price")) - h = h.SubAggregation("sales_bucket_filter", - NewBucketSelectorAggregation(). - AddBucketsPath("totalSales", "total_sales"). - Script(NewScript("totalSales <= 100"))) - builder = builder.Aggregation("sales_per_month", h) - - res, err := builder.Do() - if err != nil { - t.Fatalf("%v (maybe scripting is disabled?)", err) - } - if res.Hits == nil { - t.Errorf("expected Hits != nil; got: nil") - } - - aggs := res.Aggregations - if aggs == nil { - t.Fatal("expected aggregations != nil; got: nil") - } - - agg, found := aggs.DateHistogram("sales_per_month") - if !found { - t.Fatal("expected sales_per_month aggregation") - } - if agg == nil { - t.Fatal("expected sales_per_month aggregation") - } - if got, want := len(agg.Buckets), 2; got != want { - t.Fatalf("expected %d buckets; got: %d", want, got) - } - - if got, want := agg.Buckets[0].DocCount, int64(1); got != want { - t.Fatalf("expected DocCount=%d; got: %d", want, got) - } - if got, want := agg.Buckets[1].DocCount, int64(2); got != want { - t.Fatalf("expected DocCount=%d; got: %d", want, got) - } -} - -func TestAggsIntegrationSerialDiff(t *testing.T) { - //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) - client := setupTestClientAndCreateIndexAndAddDocs(t) - - esversion, err := client.ElasticsearchVersion(DefaultURL) - if err != nil { - t.Fatal(err) - } - - if esversion < "2.0" { - t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion) - return - } - - // Match all should return all documents - builder := client.Search(). - Index(testIndexName). - Type("order"). - Query(NewMatchAllQuery()). - Pretty(true) - h := NewDateHistogramAggregation().Field("time").Interval("month") - h = h.SubAggregation("sales", NewSumAggregation().Field("price")) - h = h.SubAggregation("the_diff", NewSerialDiffAggregation().BucketsPath("sales").Lag(1)) - builder = builder.Aggregation("sales_per_month", h) - - res, err := builder.Do() - if err != nil { - t.Fatal(err) - } - if res.Hits == nil { - t.Errorf("expected Hits != nil; got: nil") - } - - aggs := res.Aggregations - if aggs == nil { - t.Fatal("expected aggregations != nil; got: nil") - } - - agg, found := aggs.DateHistogram("sales_per_month") - if !found { - t.Fatal("expected sales_per_month aggregation") - } - if agg == nil { - t.Fatal("expected sales_per_month aggregation") - } - if got, want := len(agg.Buckets), 6; got != want { - t.Fatalf("expected %d buckets; got: %d", want, got) - } - - if got, want := agg.Buckets[0].DocCount, int64(1); got != want { - t.Fatalf("expected DocCount=%d; got: %d", want, got) - } - if got, want := agg.Buckets[1].DocCount, int64(0); got != want { - t.Fatalf("expected DocCount=%d; got: %d", want, got) - } - if got, want := agg.Buckets[2].DocCount, int64(1); got != want { - t.Fatalf("expected DocCount=%d; got: %d", want, got) - } - if got, want := agg.Buckets[3].DocCount, int64(3); got != want { - t.Fatalf("expected DocCount=%d; got: %d", want, got) - } - if got, want := agg.Buckets[4].DocCount, int64(1); got != want { - t.Fatalf("expected DocCount=%d; got: %d", want, got) - } - if got, want := agg.Buckets[5].DocCount, int64(2); got != want { - t.Fatalf("expected DocCount=%d; got: %d", want, got) - } - - d, found := agg.Buckets[0].SerialDiff("the_diff") - if found { - t.Fatal("expected no the_diff aggregation") - } - if d != nil { - t.Fatal("expected no the_diff aggregation") - } - - d, found = agg.Buckets[1].SerialDiff("the_diff") - if found { - t.Fatal("expected no the_diff aggregation") - } - if d != nil { - t.Fatal("expected no the_diff aggregation") - } - - d, found = agg.Buckets[2].SerialDiff("the_diff") - if found { - t.Fatal("expected no the_diff aggregation") - } - if d != nil { - t.Fatal("expected no the_diff aggregation") - } - - d, found = agg.Buckets[3].SerialDiff("the_diff") - if !found { - t.Fatal("expected the_diff aggregation") - } - if d == nil { - t.Fatal("expected the_diff aggregation") - } - if d.Value == nil { - t.Fatal("expected the_diff value != nil") - } - if got, want := *d.Value, float64(2348.0); got != want { - t.Fatalf("expected the_diff.value=%v; got: %v", want, got) - } - - d, found = agg.Buckets[4].SerialDiff("the_diff") - if !found { - t.Fatal("expected the_diff aggregation") - } - if d == nil { - t.Fatal("expected the_diff aggregation") - } - if d.Value == nil { - t.Fatal("expected the_diff value != nil") - } - if got, want := *d.Value, float64(-1658.0); got != want { - t.Fatalf("expected the_diff.value=%v; got: %v", want, got) - } - - d, found = agg.Buckets[5].SerialDiff("the_diff") - if !found { - t.Fatal("expected the_diff aggregation") - } - if d == nil { - t.Fatal("expected the_diff aggregation") - } - if d.Value == nil { - t.Fatal("expected the_diff value != nil") - } - if got, want := *d.Value, float64(-722.0); got != want { - t.Fatalf("expected the_diff.value=%v; got: %v", want, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_test.go deleted file mode 100644 index ef6ec2112..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_aggs_test.go +++ /dev/null @@ -1,2996 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "strings" - "testing" - "time" -) - -func TestAggs(t *testing.T) { - //client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) - client := setupTestClientAndCreateIndex(t) - - esversion, err := client.ElasticsearchVersion(DefaultURL) - if err != nil { - t.Fatal(err) - } - - tweet1 := tweet{ - User: "olivere", - Retweets: 108, - Message: "Welcome to Golang and Elasticsearch.", - Image: "http://golang.org/doc/gopher/gophercolor.png", - Tags: []string{"golang", "elasticsearch"}, - Location: "48.1333,11.5667", // lat,lon - Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), - } - tweet2 := tweet{ - User: "olivere", - Retweets: 0, - Message: "Another unrelated topic.", - Tags: []string{"golang"}, - Location: "48.1189,11.4289", // lat,lon - Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), - } - tweet3 := tweet{ - User: "sandrae", - Retweets: 12, - Message: "Cycling is fun.", - Tags: []string{"sports", "cycling"}, - Location: "47.7167,11.7167", // lat,lon - Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), - } - - // Add all documents - _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Match all should return all documents - all := NewMatchAllQuery() - - // Terms Aggregate by user name - globalAgg := NewGlobalAggregation() - usersAgg := NewTermsAggregation().Field("user").Size(10).OrderByCountDesc() - retweetsAgg := NewTermsAggregation().Field("retweets").Size(10) - avgRetweetsAgg := NewAvgAggregation().Field("retweets") - avgRetweetsWithMetaAgg := NewAvgAggregation().Field("retweetsMeta").Meta(map[string]interface{}{"meta": true}) - minRetweetsAgg := NewMinAggregation().Field("retweets") - maxRetweetsAgg := NewMaxAggregation().Field("retweets") - sumRetweetsAgg := NewSumAggregation().Field("retweets") - statsRetweetsAgg := NewStatsAggregation().Field("retweets") - extstatsRetweetsAgg := NewExtendedStatsAggregation().Field("retweets") - valueCountRetweetsAgg := NewValueCountAggregation().Field("retweets") - percentilesRetweetsAgg := NewPercentilesAggregation().Field("retweets") - percentileRanksRetweetsAgg := NewPercentileRanksAggregation().Field("retweets").Values(25, 50, 75) - cardinalityAgg := NewCardinalityAggregation().Field("user") - significantTermsAgg := NewSignificantTermsAggregation().Field("message") - samplerAgg := NewSamplerAggregation().Field("user").SubAggregation("tagged_with", NewTermsAggregation().Field("tags")) - retweetsRangeAgg := NewRangeAggregation().Field("retweets").Lt(10).Between(10, 100).Gt(100) - retweetsKeyedRangeAgg := NewRangeAggregation().Field("retweets").Keyed(true).Lt(10).Between(10, 100).Gt(100) - dateRangeAgg := NewDateRangeAggregation().Field("created").Lt("2012-01-01").Between("2012-01-01", "2013-01-01").Gt("2013-01-01") - missingTagsAgg := NewMissingAggregation().Field("tags") - retweetsHistoAgg := NewHistogramAggregation().Field("retweets").Interval(100) - dateHistoAgg := NewDateHistogramAggregation().Field("created").Interval("year") - retweetsFilterAgg := NewFilterAggregation().Filter( - NewRangeQuery("created").Gte("2012-01-01").Lte("2012-12-31")). - SubAggregation("avgRetweetsSub", NewAvgAggregation().Field("retweets")) - queryFilterAgg := NewFilterAggregation().Filter(NewTermQuery("tags", "golang")) - topTagsHitsAgg := NewTopHitsAggregation().Sort("created", false).Size(5).FetchSource(true) - topTagsAgg := NewTermsAggregation().Field("tags").Size(3).SubAggregation("top_tag_hits", topTagsHitsAgg) - geoBoundsAgg := NewGeoBoundsAggregation().Field("location") - - // Run query - builder := client.Search().Index(testIndexName).Query(all).Pretty(true) - builder = builder.Aggregation("global", globalAgg) - builder = builder.Aggregation("users", usersAgg) - builder = builder.Aggregation("retweets", retweetsAgg) - builder = builder.Aggregation("avgRetweets", avgRetweetsAgg) - if esversion >= "2.0" { - builder = builder.Aggregation("avgRetweetsWithMeta", avgRetweetsWithMetaAgg) - } - builder = builder.Aggregation("minRetweets", minRetweetsAgg) - builder = builder.Aggregation("maxRetweets", maxRetweetsAgg) - builder = builder.Aggregation("sumRetweets", sumRetweetsAgg) - builder = builder.Aggregation("statsRetweets", statsRetweetsAgg) - builder = builder.Aggregation("extstatsRetweets", extstatsRetweetsAgg) - builder = builder.Aggregation("valueCountRetweets", valueCountRetweetsAgg) - builder = builder.Aggregation("percentilesRetweets", percentilesRetweetsAgg) - builder = builder.Aggregation("percentileRanksRetweets", percentileRanksRetweetsAgg) - builder = builder.Aggregation("usersCardinality", cardinalityAgg) - builder = builder.Aggregation("significantTerms", significantTermsAgg) - builder = builder.Aggregation("sample", samplerAgg) - builder = builder.Aggregation("retweetsRange", retweetsRangeAgg) - builder = builder.Aggregation("retweetsKeyedRange", retweetsKeyedRangeAgg) - builder = builder.Aggregation("dateRange", dateRangeAgg) - builder = builder.Aggregation("missingTags", missingTagsAgg) - builder = builder.Aggregation("retweetsHisto", retweetsHistoAgg) - builder = builder.Aggregation("dateHisto", dateHistoAgg) - builder = builder.Aggregation("retweetsFilter", retweetsFilterAgg) - builder = builder.Aggregation("queryFilter", queryFilterAgg) - builder = builder.Aggregation("top-tags", topTagsAgg) - builder = builder.Aggregation("viewport", geoBoundsAgg) - if esversion >= "1.4" { - countByUserAgg := NewFiltersAggregation().Filters(NewTermQuery("user", "olivere"), NewTermQuery("user", "sandrae")) - builder = builder.Aggregation("countByUser", countByUserAgg) - } - if esversion >= "2.0" { - // AvgBucket - dateHisto := NewDateHistogramAggregation().Field("created").Interval("year") - dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) - builder = builder.Aggregation("avgBucketDateHisto", dateHisto) - builder = builder.Aggregation("avgSumOfRetweets", NewAvgBucketAggregation().BucketsPath("avgBucketDateHisto>sumOfRetweets")) - // MinBucket - dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") - dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) - builder = builder.Aggregation("minBucketDateHisto", dateHisto) - builder = builder.Aggregation("minBucketSumOfRetweets", NewMinBucketAggregation().BucketsPath("minBucketDateHisto>sumOfRetweets")) - // MaxBucket - dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") - dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) - builder = builder.Aggregation("maxBucketDateHisto", dateHisto) - builder = builder.Aggregation("maxBucketSumOfRetweets", NewMaxBucketAggregation().BucketsPath("maxBucketDateHisto>sumOfRetweets")) - // SumBucket - dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") - dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) - builder = builder.Aggregation("sumBucketDateHisto", dateHisto) - builder = builder.Aggregation("sumBucketSumOfRetweets", NewSumBucketAggregation().BucketsPath("sumBucketDateHisto>sumOfRetweets")) - // MovAvg - dateHisto = NewDateHistogramAggregation().Field("created").Interval("year") - dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets")) - dateHisto = dateHisto.SubAggregation("movingAvg", NewMovAvgAggregation().BucketsPath("sumOfRetweets")) - builder = builder.Aggregation("movingAvgDateHisto", dateHisto) - } - searchResult, err := builder.Do() - if err != nil { - t.Fatal(err) - } - if searchResult.Hits == nil { - t.Errorf("expected Hits != nil; got: nil") - } - if searchResult.Hits.TotalHits != 3 { - t.Errorf("expected Hits.TotalHits = %d; got: %d", 3, searchResult.Hits.TotalHits) - } - if len(searchResult.Hits.Hits) != 3 { - t.Errorf("expected len(Hits.Hits) = %d; got: %d", 3, len(searchResult.Hits.Hits)) - } - agg := searchResult.Aggregations - if agg == nil { - t.Fatalf("expected Aggregations != nil; got: nil") - } - - // Search for non-existent aggregate should return (nil, false) - unknownAgg, found := agg.Terms("no-such-aggregate") - if found { - t.Errorf("expected unknown aggregation to not be found; got: %v", found) - } - if unknownAgg != nil { - t.Errorf("expected unknown aggregation to return %v; got %v", nil, unknownAgg) - } - - // Global - globalAggRes, found := agg.Global("global") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if globalAggRes == nil { - t.Fatalf("expected != nil; got: nil") - } - if globalAggRes.DocCount != 3 { - t.Errorf("expected DocCount = %d; got: %d", 3, globalAggRes.DocCount) - } - - // Search for existent aggregate (by name) should return (aggregate, true) - termsAggRes, found := agg.Terms("users") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if termsAggRes == nil { - t.Fatalf("expected != nil; got: nil") - } - if len(termsAggRes.Buckets) != 2 { - t.Fatalf("expected %d; got: %d", 2, len(termsAggRes.Buckets)) - } - if termsAggRes.Buckets[0].Key != "olivere" { - t.Errorf("expected %q; got: %q", "olivere", termsAggRes.Buckets[0].Key) - } - if termsAggRes.Buckets[0].DocCount != 2 { - t.Errorf("expected %d; got: %d", 2, termsAggRes.Buckets[0].DocCount) - } - if termsAggRes.Buckets[1].Key != "sandrae" { - t.Errorf("expected %q; got: %q", "sandrae", termsAggRes.Buckets[1].Key) - } - if termsAggRes.Buckets[1].DocCount != 1 { - t.Errorf("expected %d; got: %d", 1, termsAggRes.Buckets[1].DocCount) - } - - // A terms aggregate with keys that are not strings - retweetsAggRes, found := agg.Terms("retweets") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if retweetsAggRes == nil { - t.Fatalf("expected != nil; got: nil") - } - if len(retweetsAggRes.Buckets) != 3 { - t.Fatalf("expected %d; got: %d", 3, len(retweetsAggRes.Buckets)) - } - - if retweetsAggRes.Buckets[0].Key != float64(0) { - t.Errorf("expected %v; got: %v", float64(0), retweetsAggRes.Buckets[0].Key) - } - if got, err := retweetsAggRes.Buckets[0].KeyNumber.Int64(); err != nil { - t.Errorf("expected %d; got: %v", 0, retweetsAggRes.Buckets[0].Key) - } else if got != 0 { - t.Errorf("expected %d; got: %d", 0, got) - } - if retweetsAggRes.Buckets[0].KeyNumber != "0" { - t.Errorf("expected %q; got: %q", "0", retweetsAggRes.Buckets[0].KeyNumber) - } - if retweetsAggRes.Buckets[0].DocCount != 1 { - t.Errorf("expected %d; got: %d", 1, retweetsAggRes.Buckets[0].DocCount) - } - - if retweetsAggRes.Buckets[1].Key != float64(12) { - t.Errorf("expected %v; got: %v", float64(12), retweetsAggRes.Buckets[1].Key) - } - if got, err := retweetsAggRes.Buckets[1].KeyNumber.Int64(); err != nil { - t.Errorf("expected %d; got: %v", 0, retweetsAggRes.Buckets[1].KeyNumber) - } else if got != 12 { - t.Errorf("expected %d; got: %d", 12, got) - } - if retweetsAggRes.Buckets[1].KeyNumber != "12" { - t.Errorf("expected %q; got: %q", "12", retweetsAggRes.Buckets[1].KeyNumber) - } - if retweetsAggRes.Buckets[1].DocCount != 1 { - t.Errorf("expected %d; got: %d", 1, retweetsAggRes.Buckets[1].DocCount) - } - - if retweetsAggRes.Buckets[2].Key != float64(108) { - t.Errorf("expected %v; got: %v", float64(108), retweetsAggRes.Buckets[2].Key) - } - if got, err := retweetsAggRes.Buckets[2].KeyNumber.Int64(); err != nil { - t.Errorf("expected %d; got: %v", 108, retweetsAggRes.Buckets[2].KeyNumber) - } else if got != 108 { - t.Errorf("expected %d; got: %d", 108, got) - } - if retweetsAggRes.Buckets[2].KeyNumber != "108" { - t.Errorf("expected %q; got: %q", "108", retweetsAggRes.Buckets[2].KeyNumber) - } - if retweetsAggRes.Buckets[2].DocCount != 1 { - t.Errorf("expected %d; got: %d", 1, retweetsAggRes.Buckets[2].DocCount) - } - - // avgRetweets - avgAggRes, found := agg.Avg("avgRetweets") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if avgAggRes == nil { - t.Fatalf("expected != nil; got: nil") - } - if avgAggRes.Value == nil { - t.Fatalf("expected != nil; got: %v", *avgAggRes.Value) - } - if *avgAggRes.Value != 40.0 { - t.Errorf("expected %v; got: %v", 40.0, *avgAggRes.Value) - } - - // avgRetweetsWithMeta - if esversion >= "2.0" { - avgMetaAggRes, found := agg.Avg("avgRetweetsWithMeta") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if avgMetaAggRes == nil { - t.Fatalf("expected != nil; got: nil") - } - if avgMetaAggRes.Meta == nil { - t.Fatalf("expected != nil; got: %v", avgMetaAggRes.Meta) - } - metaDataValue, found := avgMetaAggRes.Meta["meta"] - if !found { - t.Fatalf("expected to return meta data key %q; got: %v", "meta", found) - } - if flag, ok := metaDataValue.(bool); !ok { - t.Fatalf("expected to return meta data key type %T; got: %T", true, metaDataValue) - } else if flag != true { - t.Fatalf("expected to return meta data key value %v; got: %v", true, flag) - } - } - - // minRetweets - minAggRes, found := agg.Min("minRetweets") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if minAggRes == nil { - t.Fatalf("expected != nil; got: nil") - } - if minAggRes.Value == nil { - t.Fatalf("expected != nil; got: %v", *minAggRes.Value) - } - if *minAggRes.Value != 0.0 { - t.Errorf("expected %v; got: %v", 0.0, *minAggRes.Value) - } - - // maxRetweets - maxAggRes, found := agg.Max("maxRetweets") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if maxAggRes == nil { - t.Fatalf("expected != nil; got: nil") - } - if maxAggRes.Value == nil { - t.Fatalf("expected != nil; got: %v", *maxAggRes.Value) - } - if *maxAggRes.Value != 108.0 { - t.Errorf("expected %v; got: %v", 108.0, *maxAggRes.Value) - } - - // sumRetweets - sumAggRes, found := agg.Sum("sumRetweets") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if sumAggRes == nil { - t.Fatalf("expected != nil; got: nil") - } - if sumAggRes.Value == nil { - t.Fatalf("expected != nil; got: %v", *sumAggRes.Value) - } - if *sumAggRes.Value != 120.0 { - t.Errorf("expected %v; got: %v", 120.0, *sumAggRes.Value) - } - - // statsRetweets - statsAggRes, found := agg.Stats("statsRetweets") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if statsAggRes == nil { - t.Fatalf("expected != nil; got: nil") - } - if statsAggRes.Count != 3 { - t.Errorf("expected %d; got: %d", 3, statsAggRes.Count) - } - if statsAggRes.Min == nil { - t.Fatalf("expected != nil; got: %v", *statsAggRes.Min) - } - if *statsAggRes.Min != 0.0 { - t.Errorf("expected %v; got: %v", 0.0, *statsAggRes.Min) - } - if statsAggRes.Max == nil { - t.Fatalf("expected != nil; got: %v", *statsAggRes.Max) - } - if *statsAggRes.Max != 108.0 { - t.Errorf("expected %v; got: %v", 108.0, *statsAggRes.Max) - } - if statsAggRes.Avg == nil { - t.Fatalf("expected != nil; got: %v", *statsAggRes.Avg) - } - if *statsAggRes.Avg != 40.0 { - t.Errorf("expected %v; got: %v", 40.0, *statsAggRes.Avg) - } - if statsAggRes.Sum == nil { - t.Fatalf("expected != nil; got: %v", *statsAggRes.Sum) - } - if *statsAggRes.Sum != 120.0 { - t.Errorf("expected %v; got: %v", 120.0, *statsAggRes.Sum) - } - - // extstatsRetweets - extStatsAggRes, found := agg.ExtendedStats("extstatsRetweets") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if extStatsAggRes == nil { - t.Fatalf("expected != nil; got: nil") - } - if extStatsAggRes.Count != 3 { - t.Errorf("expected %d; got: %d", 3, extStatsAggRes.Count) - } - if extStatsAggRes.Min == nil { - t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Min) - } - if *extStatsAggRes.Min != 0.0 { - t.Errorf("expected %v; got: %v", 0.0, *extStatsAggRes.Min) - } - if extStatsAggRes.Max == nil { - t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Max) - } - if *extStatsAggRes.Max != 108.0 { - t.Errorf("expected %v; got: %v", 108.0, *extStatsAggRes.Max) - } - if extStatsAggRes.Avg == nil { - t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Avg) - } - if *extStatsAggRes.Avg != 40.0 { - t.Errorf("expected %v; got: %v", 40.0, *extStatsAggRes.Avg) - } - if extStatsAggRes.Sum == nil { - t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Sum) - } - if *extStatsAggRes.Sum != 120.0 { - t.Errorf("expected %v; got: %v", 120.0, *extStatsAggRes.Sum) - } - if extStatsAggRes.SumOfSquares == nil { - t.Fatalf("expected != nil; got: %v", *extStatsAggRes.SumOfSquares) - } - if *extStatsAggRes.SumOfSquares != 11808.0 { - t.Errorf("expected %v; got: %v", 11808.0, *extStatsAggRes.SumOfSquares) - } - if extStatsAggRes.Variance == nil { - t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Variance) - } - if *extStatsAggRes.Variance != 2336.0 { - t.Errorf("expected %v; got: %v", 2336.0, *extStatsAggRes.Variance) - } - if extStatsAggRes.StdDeviation == nil { - t.Fatalf("expected != nil; got: %v", *extStatsAggRes.StdDeviation) - } - if *extStatsAggRes.StdDeviation != 48.33218389437829 { - t.Errorf("expected %v; got: %v", 48.33218389437829, *extStatsAggRes.StdDeviation) - } - - // valueCountRetweets - valueCountAggRes, found := agg.ValueCount("valueCountRetweets") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if valueCountAggRes == nil { - t.Fatalf("expected != nil; got: nil") - } - if valueCountAggRes.Value == nil { - t.Fatalf("expected != nil; got: %v", *valueCountAggRes.Value) - } - if *valueCountAggRes.Value != 3.0 { - t.Errorf("expected %v; got: %v", 3.0, *valueCountAggRes.Value) - } - - // percentilesRetweets - percentilesAggRes, found := agg.Percentiles("percentilesRetweets") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if percentilesAggRes == nil { - t.Fatalf("expected != nil; got: nil") - } - // ES 1.4.x returns 7: {"1.0":...} - // ES 1.5.0 returns 14: {"1.0":..., "1.0_as_string":...} - // So we're relaxing the test here. - if len(percentilesAggRes.Values) == 0 { - t.Errorf("expected at least %d value; got: %d\nValues are: %#v", 1, len(percentilesAggRes.Values), percentilesAggRes.Values) - } - if _, found := percentilesAggRes.Values["0.0"]; found { - t.Errorf("expected %v; got: %v", false, found) - } - if percentilesAggRes.Values["1.0"] != 0.24 { - t.Errorf("expected %v; got: %v", 0.24, percentilesAggRes.Values["1.0"]) - } - if percentilesAggRes.Values["25.0"] != 6.0 { - t.Errorf("expected %v; got: %v", 6.0, percentilesAggRes.Values["25.0"]) - } - if percentilesAggRes.Values["99.0"] != 106.08 { - t.Errorf("expected %v; got: %v", 106.08, percentilesAggRes.Values["99.0"]) - } - - // percentileRanksRetweets - percentileRanksAggRes, found := agg.PercentileRanks("percentileRanksRetweets") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if percentileRanksAggRes == nil { - t.Fatalf("expected != nil; got: nil") - } - if len(percentileRanksAggRes.Values) == 0 { - t.Errorf("expected at least %d value; got %d\nValues are: %#v", 1, len(percentileRanksAggRes.Values), percentileRanksAggRes.Values) - } - if _, found := percentileRanksAggRes.Values["0.0"]; found { - t.Errorf("expected %v; got: %v", true, found) - } - if percentileRanksAggRes.Values["25.0"] != 21.180555555555557 { - t.Errorf("expected %v; got: %v", 21.180555555555557, percentileRanksAggRes.Values["25.0"]) - } - if percentileRanksAggRes.Values["50.0"] != 29.86111111111111 { - t.Errorf("expected %v; got: %v", 29.86111111111111, percentileRanksAggRes.Values["50.0"]) - } - if percentileRanksAggRes.Values["75.0"] != 38.54166666666667 { - t.Errorf("expected %v; got: %v", 38.54166666666667, percentileRanksAggRes.Values["75.0"]) - } - - // usersCardinality - cardAggRes, found := agg.Cardinality("usersCardinality") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if cardAggRes == nil { - t.Fatalf("expected != nil; got: nil") - } - if cardAggRes.Value == nil { - t.Fatalf("expected != nil; got: %v", *cardAggRes.Value) - } - if *cardAggRes.Value != 2 { - t.Errorf("expected %v; got: %v", 2, *cardAggRes.Value) - } - - // retweetsFilter - filterAggRes, found := agg.Filter("retweetsFilter") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if filterAggRes == nil { - t.Fatalf("expected != nil; got: nil") - } - if filterAggRes.DocCount != 2 { - t.Fatalf("expected %v; got: %v", 2, filterAggRes.DocCount) - } - - // Retrieve sub-aggregation - avgRetweetsAggRes, found := filterAggRes.Avg("avgRetweetsSub") - if !found { - t.Error("expected sub-aggregation \"avgRetweets\" to be found; got false") - } - if avgRetweetsAggRes == nil { - t.Fatal("expected sub-aggregation \"avgRetweets\"; got nil") - } - if avgRetweetsAggRes.Value == nil { - t.Fatalf("expected != nil; got: %v", avgRetweetsAggRes.Value) - } - if *avgRetweetsAggRes.Value != 54.0 { - t.Errorf("expected %v; got: %v", 54.0, *avgRetweetsAggRes.Value) - } - - // queryFilter - queryFilterAggRes, found := agg.Filter("queryFilter") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if queryFilterAggRes == nil { - t.Fatalf("expected != nil; got: nil") - } - if queryFilterAggRes.DocCount != 2 { - t.Fatalf("expected %v; got: %v", 2, queryFilterAggRes.DocCount) - } - - // significantTerms - stAggRes, found := agg.SignificantTerms("significantTerms") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if stAggRes == nil { - t.Fatalf("expected != nil; got: nil") - } - if stAggRes.DocCount != 3 { - t.Errorf("expected %v; got: %v", 3, stAggRes.DocCount) - } - if len(stAggRes.Buckets) != 0 { - t.Errorf("expected %v; got: %v", 0, len(stAggRes.Buckets)) - } - - // sampler - samplerAggRes, found := agg.Sampler("sample") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if samplerAggRes == nil { - t.Fatalf("expected != nil; got: nil") - } - if samplerAggRes.DocCount != 2 { - t.Errorf("expected %v; got: %v", 2, samplerAggRes.DocCount) - } - sub, found := samplerAggRes.Aggregations["tagged_with"] - if !found { - t.Fatalf("expected sub aggregation %q", "tagged_with") - } - if sub == nil { - t.Fatalf("expected sub aggregation %q; got: %v", "tagged_with", sub) - } - - // retweetsRange - rangeAggRes, found := agg.Range("retweetsRange") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if rangeAggRes == nil { - t.Fatal("expected != nil; got: nil") - } - if len(rangeAggRes.Buckets) != 3 { - t.Fatalf("expected %d; got: %d", 3, len(rangeAggRes.Buckets)) - } - if rangeAggRes.Buckets[0].DocCount != 1 { - t.Errorf("expected %d; got: %d", 1, rangeAggRes.Buckets[0].DocCount) - } - if rangeAggRes.Buckets[1].DocCount != 1 { - t.Errorf("expected %d; got: %d", 1, rangeAggRes.Buckets[1].DocCount) - } - if rangeAggRes.Buckets[2].DocCount != 1 { - t.Errorf("expected %d; got: %d", 1, rangeAggRes.Buckets[2].DocCount) - } - - // retweetsKeyedRange - keyedRangeAggRes, found := agg.KeyedRange("retweetsKeyedRange") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if keyedRangeAggRes == nil { - t.Fatal("expected != nil; got: nil") - } - if len(keyedRangeAggRes.Buckets) != 3 { - t.Fatalf("expected %d; got: %d", 3, len(keyedRangeAggRes.Buckets)) - } - _, found = keyedRangeAggRes.Buckets["no-such-key"] - if found { - t.Fatalf("expected bucket to not be found; got: %v", found) - } - bucket, found := keyedRangeAggRes.Buckets["*-10.0"] - if !found { - t.Fatalf("expected bucket to be found; got: %v", found) - } - if bucket.DocCount != 1 { - t.Errorf("expected %d; got: %d", 1, bucket.DocCount) - } - bucket, found = keyedRangeAggRes.Buckets["10.0-100.0"] - if !found { - t.Fatalf("expected bucket to be found; got: %v", found) - } - if bucket.DocCount != 1 { - t.Errorf("expected %d; got: %d", 1, bucket.DocCount) - } - bucket, found = keyedRangeAggRes.Buckets["100.0-*"] - if !found { - t.Fatalf("expected bucket to be found; got: %v", found) - } - if bucket.DocCount != 1 { - t.Errorf("expected %d; got: %d", 1, bucket.DocCount) - } - - // dateRange - dateRangeRes, found := agg.DateRange("dateRange") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if dateRangeRes == nil { - t.Fatal("expected != nil; got: nil") - } - if dateRangeRes.Buckets[0].DocCount != 1 { - t.Errorf("expected %d; got: %d", 1, dateRangeRes.Buckets[0].DocCount) - } - if dateRangeRes.Buckets[0].From != nil { - t.Fatal("expected From to be nil") - } - if dateRangeRes.Buckets[0].To == nil { - t.Fatal("expected To to be != nil") - } - if *dateRangeRes.Buckets[0].To != 1.325376e+12 { - t.Errorf("expected %v; got: %v", 1.325376e+12, *dateRangeRes.Buckets[0].To) - } - if dateRangeRes.Buckets[0].ToAsString != "2012-01-01T00:00:00.000Z" { - t.Errorf("expected %q; got: %q", "2012-01-01T00:00:00.000Z", dateRangeRes.Buckets[0].ToAsString) - } - if dateRangeRes.Buckets[1].DocCount != 2 { - t.Errorf("expected %d; got: %d", 2, dateRangeRes.Buckets[1].DocCount) - } - if dateRangeRes.Buckets[1].From == nil { - t.Fatal("expected From to be != nil") - } - if *dateRangeRes.Buckets[1].From != 1.325376e+12 { - t.Errorf("expected From = %v; got: %v", 1.325376e+12, *dateRangeRes.Buckets[1].From) - } - if dateRangeRes.Buckets[1].FromAsString != "2012-01-01T00:00:00.000Z" { - t.Errorf("expected FromAsString = %q; got: %q", "2012-01-01T00:00:00.000Z", dateRangeRes.Buckets[1].FromAsString) - } - if dateRangeRes.Buckets[1].To == nil { - t.Fatal("expected To to be != nil") - } - if *dateRangeRes.Buckets[1].To != 1.3569984e+12 { - t.Errorf("expected To = %v; got: %v", 1.3569984e+12, *dateRangeRes.Buckets[1].To) - } - if dateRangeRes.Buckets[1].ToAsString != "2013-01-01T00:00:00.000Z" { - t.Errorf("expected ToAsString = %q; got: %q", "2013-01-01T00:00:00.000Z", dateRangeRes.Buckets[1].ToAsString) - } - if dateRangeRes.Buckets[2].DocCount != 0 { - t.Errorf("expected %d; got: %d", 0, dateRangeRes.Buckets[2].DocCount) - } - if dateRangeRes.Buckets[2].To != nil { - t.Fatal("expected To to be nil") - } - if dateRangeRes.Buckets[2].From == nil { - t.Fatal("expected From to be != nil") - } - if *dateRangeRes.Buckets[2].From != 1.3569984e+12 { - t.Errorf("expected %v; got: %v", 1.3569984e+12, *dateRangeRes.Buckets[2].From) - } - if dateRangeRes.Buckets[2].FromAsString != "2013-01-01T00:00:00.000Z" { - t.Errorf("expected %q; got: %q", "2013-01-01T00:00:00.000Z", dateRangeRes.Buckets[2].FromAsString) - } - - // missingTags - missingRes, found := agg.Missing("missingTags") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if missingRes == nil { - t.Fatalf("expected != nil; got: nil") - } - if missingRes.DocCount != 0 { - t.Errorf("expected searchResult.Aggregations[\"missingTags\"].DocCount = %v; got %v", 0, missingRes.DocCount) - } - - // retweetsHisto - histoRes, found := agg.Histogram("retweetsHisto") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if histoRes == nil { - t.Fatalf("expected != nil; got: nil") - } - if len(histoRes.Buckets) != 2 { - t.Fatalf("expected %d; got: %d", 2, len(histoRes.Buckets)) - } - if histoRes.Buckets[0].DocCount != 2 { - t.Errorf("expected %d; got: %d", 2, histoRes.Buckets[0].DocCount) - } - if histoRes.Buckets[0].Key != 0.0 { - t.Errorf("expected %v; got: %v", 0.0, histoRes.Buckets[0].Key) - } - if histoRes.Buckets[1].DocCount != 1 { - t.Errorf("expected %d; got: %d", 1, histoRes.Buckets[1].DocCount) - } - if histoRes.Buckets[1].Key != 100.0 { - t.Errorf("expected %v; got: %v", 100.0, histoRes.Buckets[1].Key) - } - - // dateHisto - dateHistoRes, found := agg.DateHistogram("dateHisto") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if dateHistoRes == nil { - t.Fatalf("expected != nil; got: nil") - } - if len(dateHistoRes.Buckets) != 2 { - t.Fatalf("expected %d; got: %d", 2, len(dateHistoRes.Buckets)) - } - if dateHistoRes.Buckets[0].DocCount != 1 { - t.Errorf("expected %d; got: %d", 1, dateHistoRes.Buckets[0].DocCount) - } - if dateHistoRes.Buckets[0].Key != 1.29384e+12 { - t.Errorf("expected %v; got: %v", 1.29384e+12, dateHistoRes.Buckets[0].Key) - } - if dateHistoRes.Buckets[0].KeyAsString == nil { - t.Fatalf("expected != nil; got: %q", dateHistoRes.Buckets[0].KeyAsString) - } - if *dateHistoRes.Buckets[0].KeyAsString != "2011-01-01T00:00:00.000Z" { - t.Errorf("expected %q; got: %q", "2011-01-01T00:00:00.000Z", *dateHistoRes.Buckets[0].KeyAsString) - } - if dateHistoRes.Buckets[1].DocCount != 2 { - t.Errorf("expected %d; got: %d", 2, dateHistoRes.Buckets[1].DocCount) - } - if dateHistoRes.Buckets[1].Key != 1.325376e+12 { - t.Errorf("expected %v; got: %v", 1.325376e+12, dateHistoRes.Buckets[1].Key) - } - if dateHistoRes.Buckets[1].KeyAsString == nil { - t.Fatalf("expected != nil; got: %q", dateHistoRes.Buckets[1].KeyAsString) - } - if *dateHistoRes.Buckets[1].KeyAsString != "2012-01-01T00:00:00.000Z" { - t.Errorf("expected %q; got: %q", "2012-01-01T00:00:00.000Z", *dateHistoRes.Buckets[1].KeyAsString) - } - - // topHits - topTags, found := agg.Terms("top-tags") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if topTags == nil { - t.Fatalf("expected != nil; got: nil") - } - if esversion >= "1.4.0" { - if topTags.DocCountErrorUpperBound != 0 { - t.Errorf("expected %v; got: %v", 0, topTags.DocCountErrorUpperBound) - } - if topTags.SumOfOtherDocCount != 1 { - t.Errorf("expected %v; got: %v", 1, topTags.SumOfOtherDocCount) - } - } - if len(topTags.Buckets) != 3 { - t.Fatalf("expected %d; got: %d", 3, len(topTags.Buckets)) - } - if topTags.Buckets[0].DocCount != 2 { - t.Errorf("expected %d; got: %d", 2, topTags.Buckets[0].DocCount) - } - if topTags.Buckets[0].Key != "golang" { - t.Errorf("expected %v; got: %v", "golang", topTags.Buckets[0].Key) - } - topHits, found := topTags.Buckets[0].TopHits("top_tag_hits") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if topHits == nil { - t.Fatal("expected != nil; got: nil") - } - if topHits.Hits == nil { - t.Fatalf("expected != nil; got: nil") - } - if topHits.Hits.TotalHits != 2 { - t.Errorf("expected %d; got: %d", 2, topHits.Hits.TotalHits) - } - if topHits.Hits.Hits == nil { - t.Fatalf("expected != nil; got: nil") - } - if len(topHits.Hits.Hits) != 2 { - t.Fatalf("expected %d; got: %d", 2, len(topHits.Hits.Hits)) - } - hit := topHits.Hits.Hits[0] - if !found { - t.Fatalf("expected %v; got: %v", true, found) - } - if hit == nil { - t.Fatal("expected != nil; got: nil") - } - var tw tweet - if err := json.Unmarshal(*hit.Source, &tw); err != nil { - t.Fatalf("expected no error; got: %v", err) - } - if tw.Message != "Welcome to Golang and Elasticsearch." { - t.Errorf("expected %q; got: %q", "Welcome to Golang and Elasticsearch.", tw.Message) - } - if topTags.Buckets[1].DocCount != 1 { - t.Errorf("expected %d; got: %d", 1, topTags.Buckets[1].DocCount) - } - if topTags.Buckets[1].Key != "cycling" { - t.Errorf("expected %v; got: %v", "cycling", topTags.Buckets[1].Key) - } - topHits, found = topTags.Buckets[1].TopHits("top_tag_hits") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if topHits == nil { - t.Fatal("expected != nil; got: nil") - } - if topHits.Hits == nil { - t.Fatal("expected != nil; got nil") - } - if topHits.Hits.TotalHits != 1 { - t.Errorf("expected %d; got: %d", 1, topHits.Hits.TotalHits) - } - if topTags.Buckets[2].DocCount != 1 { - t.Errorf("expected %d; got: %d", 1, topTags.Buckets[2].DocCount) - } - if topTags.Buckets[2].Key != "elasticsearch" { - t.Errorf("expected %v; got: %v", "elasticsearch", topTags.Buckets[2].Key) - } - topHits, found = topTags.Buckets[2].TopHits("top_tag_hits") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if topHits == nil { - t.Fatal("expected != nil; got: nil") - } - if topHits.Hits == nil { - t.Fatal("expected != nil; got: nil") - } - if topHits.Hits.TotalHits != 1 { - t.Errorf("expected %d; got: %d", 1, topHits.Hits.TotalHits) - } - - // viewport via geo_bounds (1.3.0 has an error in that it doesn't output the aggregation name) - geoBoundsRes, found := agg.GeoBounds("viewport") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if geoBoundsRes == nil { - t.Fatalf("expected != nil; got: nil") - } - - if esversion >= "1.4" { - // Filters agg "countByUser" - countByUserAggRes, found := agg.Filters("countByUser") - if !found { - t.Errorf("expected %v; got: %v", true, found) - } - if countByUserAggRes == nil { - t.Fatalf("expected != nil; got: nil") - } - if len(countByUserAggRes.Buckets) != 2 { - t.Fatalf("expected %d; got: %d", 2, len(countByUserAggRes.Buckets)) - } - if countByUserAggRes.Buckets[0].DocCount != 2 { - t.Errorf("expected %d; got: %d", 2, countByUserAggRes.Buckets[0].DocCount) - } - if countByUserAggRes.Buckets[1].DocCount != 1 { - t.Errorf("expected %d; got: %d", 1, countByUserAggRes.Buckets[1].DocCount) - } - } -} - -// TestAggsMarshal ensures that marshaling aggregations back into a string -// does not yield base64 encoded data. See https://github.com/olivere/elastic/issues/51 -// and https://groups.google.com/forum/#!topic/Golang-Nuts/38ShOlhxAYY for details. -func TestAggsMarshal(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{ - User: "olivere", - Retweets: 108, - Message: "Welcome to Golang and Elasticsearch.", - Image: "http://golang.org/doc/gopher/gophercolor.png", - Tags: []string{"golang", "elasticsearch"}, - Location: "48.1333,11.5667", // lat,lon - Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), - } - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Match all should return all documents - all := NewMatchAllQuery() - dhagg := NewDateHistogramAggregation().Field("created").Interval("year") - - // Run query - builder := client.Search().Index(testIndexName).Query(all) - builder = builder.Aggregation("dhagg", dhagg) - searchResult, err := builder.Do() - if err != nil { - t.Fatal(err) - } - if searchResult.TotalHits() != 1 { - t.Errorf("expected Hits.TotalHits = %d; got: %d", 1, searchResult.TotalHits()) - } - if _, found := searchResult.Aggregations["dhagg"]; !found { - t.Fatalf("expected aggregation %q", "dhagg") - } - buf, err := json.Marshal(searchResult) - if err != nil { - t.Fatal(err) - } - s := string(buf) - if i := strings.Index(s, `{"dhagg":{"buckets":[{"key_as_string":"2012-01-01`); i < 0 { - t.Errorf("expected to serialize aggregation into string; got: %v", s) - } -} - -func TestAggsMetricsMin(t *testing.T) { - s := `{ - "min_price": { - "value": 10 - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.Min("min_price") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Value == nil { - t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) - } - if *agg.Value != float64(10) { - t.Fatalf("expected aggregation value = %v; got: %v", float64(10), *agg.Value) - } -} - -func TestAggsMetricsMax(t *testing.T) { - s := `{ - "max_price": { - "value": 35 - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.Max("max_price") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Value == nil { - t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) - } - if *agg.Value != float64(35) { - t.Fatalf("expected aggregation value = %v; got: %v", float64(35), *agg.Value) - } -} - -func TestAggsMetricsSum(t *testing.T) { - s := `{ - "intraday_return": { - "value": 2.18 - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.Sum("intraday_return") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Value == nil { - t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) - } - if *agg.Value != float64(2.18) { - t.Fatalf("expected aggregation value = %v; got: %v", float64(2.18), *agg.Value) - } -} - -func TestAggsMetricsAvg(t *testing.T) { - s := `{ - "avg_grade": { - "value": 75 - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.Avg("avg_grade") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Value == nil { - t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) - } - if *agg.Value != float64(75) { - t.Fatalf("expected aggregation value = %v; got: %v", float64(75), *agg.Value) - } -} - -func TestAggsMetricsValueCount(t *testing.T) { - s := `{ - "grades_count": { - "value": 10 - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.ValueCount("grades_count") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Value == nil { - t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) - } - if *agg.Value != float64(10) { - t.Fatalf("expected aggregation value = %v; got: %v", float64(10), *agg.Value) - } -} - -func TestAggsMetricsCardinality(t *testing.T) { - s := `{ - "author_count": { - "value": 12 - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.Cardinality("author_count") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Value == nil { - t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) - } - if *agg.Value != float64(12) { - t.Fatalf("expected aggregation value = %v; got: %v", float64(12), *agg.Value) - } -} - -func TestAggsMetricsStats(t *testing.T) { - s := `{ - "grades_stats": { - "count": 6, - "min": 60, - "max": 98, - "avg": 78.5, - "sum": 471 - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.Stats("grades_stats") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Count != int64(6) { - t.Fatalf("expected aggregation Count = %v; got: %v", int64(6), agg.Count) - } - if agg.Min == nil { - t.Fatalf("expected aggregation Min != nil; got: %v", agg.Min) - } - if *agg.Min != float64(60) { - t.Fatalf("expected aggregation Min = %v; got: %v", float64(60), *agg.Min) - } - if agg.Max == nil { - t.Fatalf("expected aggregation Max != nil; got: %v", agg.Max) - } - if *agg.Max != float64(98) { - t.Fatalf("expected aggregation Max = %v; got: %v", float64(98), *agg.Max) - } - if agg.Avg == nil { - t.Fatalf("expected aggregation Avg != nil; got: %v", agg.Avg) - } - if *agg.Avg != float64(78.5) { - t.Fatalf("expected aggregation Avg = %v; got: %v", float64(78.5), *agg.Avg) - } - if agg.Sum == nil { - t.Fatalf("expected aggregation Sum != nil; got: %v", agg.Sum) - } - if *agg.Sum != float64(471) { - t.Fatalf("expected aggregation Sum = %v; got: %v", float64(471), *agg.Sum) - } -} - -func TestAggsMetricsExtendedStats(t *testing.T) { - s := `{ - "grades_stats": { - "count": 6, - "min": 72, - "max": 117.6, - "avg": 94.2, - "sum": 565.2, - "sum_of_squares": 54551.51999999999, - "variance": 218.2799999999976, - "std_deviation": 14.774302013969987 - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.ExtendedStats("grades_stats") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Count != int64(6) { - t.Fatalf("expected aggregation Count = %v; got: %v", int64(6), agg.Count) - } - if agg.Min == nil { - t.Fatalf("expected aggregation Min != nil; got: %v", agg.Min) - } - if *agg.Min != float64(72) { - t.Fatalf("expected aggregation Min = %v; got: %v", float64(72), *agg.Min) - } - if agg.Max == nil { - t.Fatalf("expected aggregation Max != nil; got: %v", agg.Max) - } - if *agg.Max != float64(117.6) { - t.Fatalf("expected aggregation Max = %v; got: %v", float64(117.6), *agg.Max) - } - if agg.Avg == nil { - t.Fatalf("expected aggregation Avg != nil; got: %v", agg.Avg) - } - if *agg.Avg != float64(94.2) { - t.Fatalf("expected aggregation Avg = %v; got: %v", float64(94.2), *agg.Avg) - } - if agg.Sum == nil { - t.Fatalf("expected aggregation Sum != nil; got: %v", agg.Sum) - } - if *agg.Sum != float64(565.2) { - t.Fatalf("expected aggregation Sum = %v; got: %v", float64(565.2), *agg.Sum) - } - if agg.SumOfSquares == nil { - t.Fatalf("expected aggregation sum_of_squares != nil; got: %v", agg.SumOfSquares) - } - if *agg.SumOfSquares != float64(54551.51999999999) { - t.Fatalf("expected aggregation sum_of_squares = %v; got: %v", float64(54551.51999999999), *agg.SumOfSquares) - } - if agg.Variance == nil { - t.Fatalf("expected aggregation Variance != nil; got: %v", agg.Variance) - } - if *agg.Variance != float64(218.2799999999976) { - t.Fatalf("expected aggregation Variance = %v; got: %v", float64(218.2799999999976), *agg.Variance) - } - if agg.StdDeviation == nil { - t.Fatalf("expected aggregation StdDeviation != nil; got: %v", agg.StdDeviation) - } - if *agg.StdDeviation != float64(14.774302013969987) { - t.Fatalf("expected aggregation StdDeviation = %v; got: %v", float64(14.774302013969987), *agg.StdDeviation) - } -} - -func TestAggsMetricsPercentiles(t *testing.T) { - s := `{ - "load_time_outlier": { - "values" : { - "1.0": 15, - "5.0": 20, - "25.0": 23, - "50.0": 25, - "75.0": 29, - "95.0": 60, - "99.0": 150 - } - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.Percentiles("load_time_outlier") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Values == nil { - t.Fatalf("expected aggregation Values != nil; got: %v", agg.Values) - } - if len(agg.Values) != 7 { - t.Fatalf("expected %d aggregation Values; got: %d", 7, len(agg.Values)) - } - if agg.Values["1.0"] != float64(15) { - t.Errorf("expected aggregation value for \"1.0\" = %v; got: %v", float64(15), agg.Values["1.0"]) - } - if agg.Values["5.0"] != float64(20) { - t.Errorf("expected aggregation value for \"5.0\" = %v; got: %v", float64(20), agg.Values["5.0"]) - } - if agg.Values["25.0"] != float64(23) { - t.Errorf("expected aggregation value for \"25.0\" = %v; got: %v", float64(23), agg.Values["25.0"]) - } - if agg.Values["50.0"] != float64(25) { - t.Errorf("expected aggregation value for \"50.0\" = %v; got: %v", float64(25), agg.Values["50.0"]) - } - if agg.Values["75.0"] != float64(29) { - t.Errorf("expected aggregation value for \"75.0\" = %v; got: %v", float64(29), agg.Values["75.0"]) - } - if agg.Values["95.0"] != float64(60) { - t.Errorf("expected aggregation value for \"95.0\" = %v; got: %v", float64(60), agg.Values["95.0"]) - } - if agg.Values["99.0"] != float64(150) { - t.Errorf("expected aggregation value for \"99.0\" = %v; got: %v", float64(150), agg.Values["99.0"]) - } -} - -func TestAggsMetricsPercentileRanks(t *testing.T) { - s := `{ - "load_time_outlier": { - "values" : { - "15": 92, - "30": 100 - } - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.PercentileRanks("load_time_outlier") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Values == nil { - t.Fatalf("expected aggregation Values != nil; got: %v", agg.Values) - } - if len(agg.Values) != 2 { - t.Fatalf("expected %d aggregation Values; got: %d", 7, len(agg.Values)) - } - if agg.Values["15"] != float64(92) { - t.Errorf("expected aggregation value for \"15\" = %v; got: %v", float64(92), agg.Values["15"]) - } - if agg.Values["30"] != float64(100) { - t.Errorf("expected aggregation value for \"30\" = %v; got: %v", float64(100), agg.Values["30"]) - } -} - -func TestAggsMetricsTopHits(t *testing.T) { - s := `{ - "top-tags": { - "buckets": [ - { - "key": "windows-7", - "doc_count": 25365, - "top_tags_hits": { - "hits": { - "total": 25365, - "max_score": 1, - "hits": [ - { - "_index": "stack", - "_type": "question", - "_id": "602679", - "_score": 1, - "_source": { - "title": "Windows port opening" - }, - "sort": [ - 1370143231177 - ] - } - ] - } - } - }, - { - "key": "linux", - "doc_count": 18342, - "top_tags_hits": { - "hits": { - "total": 18342, - "max_score": 1, - "hits": [ - { - "_index": "stack", - "_type": "question", - "_id": "602672", - "_score": 1, - "_source": { - "title": "Ubuntu RFID Screensaver lock-unlock" - }, - "sort": [ - 1370143379747 - ] - } - ] - } - } - }, - { - "key": "windows", - "doc_count": 18119, - "top_tags_hits": { - "hits": { - "total": 18119, - "max_score": 1, - "hits": [ - { - "_index": "stack", - "_type": "question", - "_id": "602678", - "_score": 1, - "_source": { - "title": "If I change my computers date / time, what could be affected?" - }, - "sort": [ - 1370142868283 - ] - } - ] - } - } - } - ] - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.Terms("top-tags") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Buckets == nil { - t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) - } - if len(agg.Buckets) != 3 { - t.Errorf("expected %d bucket entries; got: %d", 3, len(agg.Buckets)) - } - if agg.Buckets[0].Key != "windows-7" { - t.Errorf("expected bucket key = %q; got: %q", "windows-7", agg.Buckets[0].Key) - } - if agg.Buckets[1].Key != "linux" { - t.Errorf("expected bucket key = %q; got: %q", "linux", agg.Buckets[1].Key) - } - if agg.Buckets[2].Key != "windows" { - t.Errorf("expected bucket key = %q; got: %q", "windows", agg.Buckets[2].Key) - } - - // Sub-aggregation of top-hits - subAgg, found := agg.Buckets[0].TopHits("top_tags_hits") - if !found { - t.Fatalf("expected sub aggregation to be found; got: %v", found) - } - if subAgg == nil { - t.Fatalf("expected sub aggregation != nil; got: %v", subAgg) - } - if subAgg.Hits == nil { - t.Fatalf("expected sub aggregation Hits != nil; got: %v", subAgg.Hits) - } - if subAgg.Hits.TotalHits != 25365 { - t.Fatalf("expected sub aggregation Hits.TotalHits = %d; got: %d", 25365, subAgg.Hits.TotalHits) - } - if subAgg.Hits.MaxScore == nil { - t.Fatalf("expected sub aggregation Hits.MaxScore != %v; got: %v", nil, *subAgg.Hits.MaxScore) - } - if *subAgg.Hits.MaxScore != float64(1.0) { - t.Fatalf("expected sub aggregation Hits.MaxScore = %v; got: %v", float64(1.0), *subAgg.Hits.MaxScore) - } - - subAgg, found = agg.Buckets[1].TopHits("top_tags_hits") - if !found { - t.Fatalf("expected sub aggregation to be found; got: %v", found) - } - if subAgg == nil { - t.Fatalf("expected sub aggregation != nil; got: %v", subAgg) - } - if subAgg.Hits == nil { - t.Fatalf("expected sub aggregation Hits != nil; got: %v", subAgg.Hits) - } - if subAgg.Hits.TotalHits != 18342 { - t.Fatalf("expected sub aggregation Hits.TotalHits = %d; got: %d", 18342, subAgg.Hits.TotalHits) - } - if subAgg.Hits.MaxScore == nil { - t.Fatalf("expected sub aggregation Hits.MaxScore != %v; got: %v", nil, *subAgg.Hits.MaxScore) - } - if *subAgg.Hits.MaxScore != float64(1.0) { - t.Fatalf("expected sub aggregation Hits.MaxScore = %v; got: %v", float64(1.0), *subAgg.Hits.MaxScore) - } - - subAgg, found = agg.Buckets[2].TopHits("top_tags_hits") - if !found { - t.Fatalf("expected sub aggregation to be found; got: %v", found) - } - if subAgg == nil { - t.Fatalf("expected sub aggregation != nil; got: %v", subAgg) - } - if subAgg.Hits == nil { - t.Fatalf("expected sub aggregation Hits != nil; got: %v", subAgg.Hits) - } - if subAgg.Hits.TotalHits != 18119 { - t.Fatalf("expected sub aggregation Hits.TotalHits = %d; got: %d", 18119, subAgg.Hits.TotalHits) - } - if subAgg.Hits.MaxScore == nil { - t.Fatalf("expected sub aggregation Hits.MaxScore != %v; got: %v", nil, *subAgg.Hits.MaxScore) - } - if *subAgg.Hits.MaxScore != float64(1.0) { - t.Fatalf("expected sub aggregation Hits.MaxScore = %v; got: %v", float64(1.0), *subAgg.Hits.MaxScore) - } -} - -func TestAggsBucketGlobal(t *testing.T) { - s := `{ - "all_products" : { - "doc_count" : 100, - "avg_price" : { - "value" : 56.3 - } - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.Global("all_products") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.DocCount != 100 { - t.Fatalf("expected aggregation DocCount = %d; got: %d", 100, agg.DocCount) - } - - // Sub-aggregation - subAgg, found := agg.Avg("avg_price") - if !found { - t.Fatalf("expected sub-aggregation to be found; got: %v", found) - } - if subAgg == nil { - t.Fatalf("expected sub-aggregation != nil; got: %v", subAgg) - } - if subAgg.Value == nil { - t.Fatalf("expected sub-aggregation value != nil; got: %v", subAgg.Value) - } - if *subAgg.Value != float64(56.3) { - t.Fatalf("expected sub-aggregation value = %v; got: %v", float64(56.3), *subAgg.Value) - } -} - -func TestAggsBucketFilter(t *testing.T) { - s := `{ - "in_stock_products" : { - "doc_count" : 100, - "avg_price" : { "value" : 56.3 } - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.Filter("in_stock_products") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.DocCount != 100 { - t.Fatalf("expected aggregation DocCount = %d; got: %d", 100, agg.DocCount) - } - - // Sub-aggregation - subAgg, found := agg.Avg("avg_price") - if !found { - t.Fatalf("expected sub-aggregation to be found; got: %v", found) - } - if subAgg == nil { - t.Fatalf("expected sub-aggregation != nil; got: %v", subAgg) - } - if subAgg.Value == nil { - t.Fatalf("expected sub-aggregation value != nil; got: %v", subAgg.Value) - } - if *subAgg.Value != float64(56.3) { - t.Fatalf("expected sub-aggregation value = %v; got: %v", float64(56.3), *subAgg.Value) - } -} - -func TestAggsBucketFiltersWithBuckets(t *testing.T) { - s := `{ - "messages" : { - "buckets" : [ - { - "doc_count" : 34, - "monthly" : { - "buckets" : [] - } - }, - { - "doc_count" : 439, - "monthly" : { - "buckets" : [] - } - } - ] - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.Filters("messages") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Buckets == nil { - t.Fatalf("expected aggregation buckets != %v; got: %v", nil, agg.Buckets) - } - if len(agg.Buckets) != 2 { - t.Fatalf("expected %d buckets; got: %d", 2, len(agg.Buckets)) - } - - if agg.Buckets[0].DocCount != 34 { - t.Fatalf("expected DocCount = %d; got: %d", 34, agg.Buckets[0].DocCount) - } - subAgg, found := agg.Buckets[0].Histogram("monthly") - if !found { - t.Fatalf("expected sub aggregation to be found; got: %v", found) - } - if subAgg == nil { - t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg) - } - - if agg.Buckets[1].DocCount != 439 { - t.Fatalf("expected DocCount = %d; got: %d", 439, agg.Buckets[1].DocCount) - } - subAgg, found = agg.Buckets[1].Histogram("monthly") - if !found { - t.Fatalf("expected sub aggregation to be found; got: %v", found) - } - if subAgg == nil { - t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg) - } -} - -func TestAggsBucketFiltersWithNamedBuckets(t *testing.T) { - s := `{ - "messages" : { - "buckets" : { - "errors" : { - "doc_count" : 34, - "monthly" : { - "buckets" : [] - } - }, - "warnings" : { - "doc_count" : 439, - "monthly" : { - "buckets" : [] - } - } - } - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.Filters("messages") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.NamedBuckets == nil { - t.Fatalf("expected aggregation buckets != %v; got: %v", nil, agg.NamedBuckets) - } - if len(agg.NamedBuckets) != 2 { - t.Fatalf("expected %d buckets; got: %d", 2, len(agg.NamedBuckets)) - } - - if agg.NamedBuckets["errors"].DocCount != 34 { - t.Fatalf("expected DocCount = %d; got: %d", 34, agg.NamedBuckets["errors"].DocCount) - } - subAgg, found := agg.NamedBuckets["errors"].Histogram("monthly") - if !found { - t.Fatalf("expected sub aggregation to be found; got: %v", found) - } - if subAgg == nil { - t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg) - } - - if agg.NamedBuckets["warnings"].DocCount != 439 { - t.Fatalf("expected DocCount = %d; got: %d", 439, agg.NamedBuckets["warnings"].DocCount) - } - subAgg, found = agg.NamedBuckets["warnings"].Histogram("monthly") - if !found { - t.Fatalf("expected sub aggregation to be found; got: %v", found) - } - if subAgg == nil { - t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg) - } -} - -func TestAggsBucketMissing(t *testing.T) { - s := `{ - "products_without_a_price" : { - "doc_count" : 10 - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.Missing("products_without_a_price") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.DocCount != 10 { - t.Fatalf("expected aggregation DocCount = %d; got: %d", 10, agg.DocCount) - } -} - -func TestAggsBucketNested(t *testing.T) { - s := `{ - "resellers": { - "min_price": { - "value" : 350 - } - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.Nested("resellers") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.DocCount != 0 { - t.Fatalf("expected aggregation DocCount = %d; got: %d", 0, agg.DocCount) - } - - // Sub-aggregation - subAgg, found := agg.Avg("min_price") - if !found { - t.Fatalf("expected sub-aggregation to be found; got: %v", found) - } - if subAgg == nil { - t.Fatalf("expected sub-aggregation != nil; got: %v", subAgg) - } - if subAgg.Value == nil { - t.Fatalf("expected sub-aggregation value != nil; got: %v", subAgg.Value) - } - if *subAgg.Value != float64(350) { - t.Fatalf("expected sub-aggregation value = %v; got: %v", float64(350), *subAgg.Value) - } -} - -func TestAggsBucketReverseNested(t *testing.T) { - s := `{ - "comment_to_issue": { - "doc_count" : 10 - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.ReverseNested("comment_to_issue") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.DocCount != 10 { - t.Fatalf("expected aggregation DocCount = %d; got: %d", 10, agg.DocCount) - } -} - -func TestAggsBucketChildren(t *testing.T) { - s := `{ - "to-answers": { - "doc_count" : 10 - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.Children("to-answers") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.DocCount != 10 { - t.Fatalf("expected aggregation DocCount = %d; got: %d", 10, agg.DocCount) - } -} - -func TestAggsBucketTerms(t *testing.T) { - s := `{ - "users" : { - "doc_count_error_upper_bound" : 1, - "sum_other_doc_count" : 2, - "buckets" : [ { - "key" : "olivere", - "doc_count" : 2 - }, { - "key" : "sandrae", - "doc_count" : 1 - } ] - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.Terms("users") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Buckets == nil { - t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) - } - if len(agg.Buckets) != 2 { - t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) - } - if agg.Buckets[0].Key != "olivere" { - t.Errorf("expected key %q; got: %q", "olivere", agg.Buckets[0].Key) - } - if agg.Buckets[0].DocCount != 2 { - t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[0].DocCount) - } - if agg.Buckets[1].Key != "sandrae" { - t.Errorf("expected key %q; got: %q", "sandrae", agg.Buckets[1].Key) - } - if agg.Buckets[1].DocCount != 1 { - t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[1].DocCount) - } -} - -func TestAggsBucketTermsWithNumericKeys(t *testing.T) { - s := `{ - "users" : { - "doc_count_error_upper_bound" : 1, - "sum_other_doc_count" : 2, - "buckets" : [ { - "key" : 17, - "doc_count" : 2 - }, { - "key" : 21, - "doc_count" : 1 - } ] - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.Terms("users") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Buckets == nil { - t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) - } - if len(agg.Buckets) != 2 { - t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) - } - if agg.Buckets[0].Key != float64(17) { - t.Errorf("expected key %v; got: %v", 17, agg.Buckets[0].Key) - } - if got, err := agg.Buckets[0].KeyNumber.Int64(); err != nil { - t.Errorf("expected to convert key to int64; got: %v", err) - } else if got != 17 { - t.Errorf("expected key %v; got: %v", 17, agg.Buckets[0].Key) - } - if agg.Buckets[0].DocCount != 2 { - t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[0].DocCount) - } - if agg.Buckets[1].Key != float64(21) { - t.Errorf("expected key %v; got: %v", 21, agg.Buckets[1].Key) - } - if got, err := agg.Buckets[1].KeyNumber.Int64(); err != nil { - t.Errorf("expected to convert key to int64; got: %v", err) - } else if got != 21 { - t.Errorf("expected key %v; got: %v", 21, agg.Buckets[1].Key) - } - if agg.Buckets[1].DocCount != 1 { - t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[1].DocCount) - } -} - -func TestAggsBucketTermsWithBoolKeys(t *testing.T) { - s := `{ - "users" : { - "doc_count_error_upper_bound" : 1, - "sum_other_doc_count" : 2, - "buckets" : [ { - "key" : true, - "doc_count" : 2 - }, { - "key" : false, - "doc_count" : 1 - } ] - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.Terms("users") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Buckets == nil { - t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) - } - if len(agg.Buckets) != 2 { - t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) - } - if agg.Buckets[0].Key != true { - t.Errorf("expected key %v; got: %v", true, agg.Buckets[0].Key) - } - if agg.Buckets[0].DocCount != 2 { - t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[0].DocCount) - } - if agg.Buckets[1].Key != false { - t.Errorf("expected key %v; got: %v", false, agg.Buckets[1].Key) - } - if agg.Buckets[1].DocCount != 1 { - t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[1].DocCount) - } -} - -func TestAggsBucketSignificantTerms(t *testing.T) { - s := `{ - "significantCrimeTypes" : { - "doc_count": 47347, - "buckets" : [ - { - "key": "Bicycle theft", - "doc_count": 3640, - "score": 0.371235374214817, - "bg_count": 66799 - } - ] - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.SignificantTerms("significantCrimeTypes") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.DocCount != 47347 { - t.Fatalf("expected aggregation DocCount != %d; got: %d", 47347, agg.DocCount) - } - if agg.Buckets == nil { - t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) - } - if len(agg.Buckets) != 1 { - t.Errorf("expected %d bucket entries; got: %d", 1, len(agg.Buckets)) - } - if agg.Buckets[0].Key != "Bicycle theft" { - t.Errorf("expected key = %q; got: %q", "Bicycle theft", agg.Buckets[0].Key) - } - if agg.Buckets[0].DocCount != 3640 { - t.Errorf("expected doc count = %d; got: %d", 3640, agg.Buckets[0].DocCount) - } - if agg.Buckets[0].Score != float64(0.371235374214817) { - t.Errorf("expected score = %v; got: %v", float64(0.371235374214817), agg.Buckets[0].Score) - } - if agg.Buckets[0].BgCount != 66799 { - t.Errorf("expected BgCount = %d; got: %d", 66799, agg.Buckets[0].BgCount) - } -} - -func TestAggsBucketSampler(t *testing.T) { - s := `{ - "sample" : { - "doc_count": 1000, - "keywords": { - "doc_count": 1000, - "buckets" : [ - { - "key": "bend", - "doc_count": 58, - "score": 37.982536582524276, - "bg_count": 103 - } - ] - } - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.Sampler("sample") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.DocCount != 1000 { - t.Fatalf("expected aggregation DocCount != %d; got: %d", 1000, agg.DocCount) - } - sub, found := agg.Aggregations["keywords"] - if !found { - t.Fatal("expected sub aggregation %q", "keywords") - } - if sub == nil { - t.Fatalf("expected sub aggregation %q; got: %v", "keywords", sub) - } -} - -func TestAggsBucketRange(t *testing.T) { - s := `{ - "price_ranges" : { - "buckets": [ - { - "to": 50, - "doc_count": 2 - }, - { - "from": 50, - "to": 100, - "doc_count": 4 - }, - { - "from": 100, - "doc_count": 4 - } - ] - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.Range("price_ranges") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Buckets == nil { - t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) - } - if len(agg.Buckets) != 3 { - t.Errorf("expected %d bucket entries; got: %d", 3, len(agg.Buckets)) - } - if agg.Buckets[0].From != nil { - t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From) - } - if agg.Buckets[0].To == nil { - t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To) - } - if *agg.Buckets[0].To != float64(50) { - t.Errorf("expected To = %v; got: %v", float64(50), *agg.Buckets[0].To) - } - if agg.Buckets[0].DocCount != 2 { - t.Errorf("expected DocCount = %d; got: %d", 2, agg.Buckets[0].DocCount) - } - if agg.Buckets[1].From == nil { - t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From) - } - if *agg.Buckets[1].From != float64(50) { - t.Errorf("expected From = %v; got: %v", float64(50), *agg.Buckets[1].From) - } - if agg.Buckets[1].To == nil { - t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[1].To) - } - if *agg.Buckets[1].To != float64(100) { - t.Errorf("expected To = %v; got: %v", float64(100), *agg.Buckets[1].To) - } - if agg.Buckets[1].DocCount != 4 { - t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[1].DocCount) - } - if agg.Buckets[2].From == nil { - t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[2].From) - } - if *agg.Buckets[2].From != float64(100) { - t.Errorf("expected From = %v; got: %v", float64(100), *agg.Buckets[2].From) - } - if agg.Buckets[2].To != nil { - t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[2].To) - } - if agg.Buckets[2].DocCount != 4 { - t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[2].DocCount) - } -} - -func TestAggsBucketDateRange(t *testing.T) { - s := `{ - "range": { - "buckets": [ - { - "to": 1.3437792E+12, - "to_as_string": "08-2012", - "doc_count": 7 - }, - { - "from": 1.3437792E+12, - "from_as_string": "08-2012", - "doc_count": 2 - } - ] - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.DateRange("range") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Buckets == nil { - t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) - } - if len(agg.Buckets) != 2 { - t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) - } - if agg.Buckets[0].From != nil { - t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From) - } - if agg.Buckets[0].To == nil { - t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To) - } - if *agg.Buckets[0].To != float64(1.3437792E+12) { - t.Errorf("expected To = %v; got: %v", float64(1.3437792E+12), *agg.Buckets[0].To) - } - if agg.Buckets[0].ToAsString != "08-2012" { - t.Errorf("expected ToAsString = %q; got: %q", "08-2012", agg.Buckets[0].ToAsString) - } - if agg.Buckets[0].DocCount != 7 { - t.Errorf("expected DocCount = %d; got: %d", 7, agg.Buckets[0].DocCount) - } - if agg.Buckets[1].From == nil { - t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From) - } - if *agg.Buckets[1].From != float64(1.3437792E+12) { - t.Errorf("expected From = %v; got: %v", float64(1.3437792E+12), *agg.Buckets[1].From) - } - if agg.Buckets[1].FromAsString != "08-2012" { - t.Errorf("expected FromAsString = %q; got: %q", "08-2012", agg.Buckets[1].FromAsString) - } - if agg.Buckets[1].To != nil { - t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[1].To) - } - if agg.Buckets[1].DocCount != 2 { - t.Errorf("expected DocCount = %d; got: %d", 2, agg.Buckets[1].DocCount) - } -} - -func TestAggsBucketIPv4Range(t *testing.T) { - s := `{ - "ip_ranges": { - "buckets" : [ - { - "to": 167772165, - "to_as_string": "10.0.0.5", - "doc_count": 4 - }, - { - "from": 167772165, - "from_as_string": "10.0.0.5", - "doc_count": 6 - } - ] - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.IPv4Range("ip_ranges") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Buckets == nil { - t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) - } - if len(agg.Buckets) != 2 { - t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) - } - if agg.Buckets[0].From != nil { - t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From) - } - if agg.Buckets[0].To == nil { - t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To) - } - if *agg.Buckets[0].To != float64(167772165) { - t.Errorf("expected To = %v; got: %v", float64(167772165), *agg.Buckets[0].To) - } - if agg.Buckets[0].ToAsString != "10.0.0.5" { - t.Errorf("expected ToAsString = %q; got: %q", "10.0.0.5", agg.Buckets[0].ToAsString) - } - if agg.Buckets[0].DocCount != 4 { - t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[0].DocCount) - } - if agg.Buckets[1].From == nil { - t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From) - } - if *agg.Buckets[1].From != float64(167772165) { - t.Errorf("expected From = %v; got: %v", float64(167772165), *agg.Buckets[1].From) - } - if agg.Buckets[1].FromAsString != "10.0.0.5" { - t.Errorf("expected FromAsString = %q; got: %q", "10.0.0.5", agg.Buckets[1].FromAsString) - } - if agg.Buckets[1].To != nil { - t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[1].To) - } - if agg.Buckets[1].DocCount != 6 { - t.Errorf("expected DocCount = %d; got: %d", 6, agg.Buckets[1].DocCount) - } -} - -func TestAggsBucketHistogram(t *testing.T) { - s := `{ - "prices" : { - "buckets": [ - { - "key": 0, - "doc_count": 2 - }, - { - "key": 50, - "doc_count": 4 - }, - { - "key": 150, - "doc_count": 3 - } - ] - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.Histogram("prices") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Buckets == nil { - t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) - } - if len(agg.Buckets) != 3 { - t.Errorf("expected %d buckets; got: %d", 3, len(agg.Buckets)) - } - if agg.Buckets[0].Key != 0 { - t.Errorf("expected key = %v; got: %v", 0, agg.Buckets[0].Key) - } - if agg.Buckets[0].KeyAsString != nil { - t.Fatalf("expected key_as_string = %v; got: %q", nil, *agg.Buckets[0].KeyAsString) - } - if agg.Buckets[0].DocCount != 2 { - t.Errorf("expected doc count = %d; got: %d", 2, agg.Buckets[0].DocCount) - } - if agg.Buckets[1].Key != 50 { - t.Errorf("expected key = %v; got: %v", 50, agg.Buckets[1].Key) - } - if agg.Buckets[1].KeyAsString != nil { - t.Fatalf("expected key_as_string = %v; got: %q", nil, *agg.Buckets[1].KeyAsString) - } - if agg.Buckets[1].DocCount != 4 { - t.Errorf("expected doc count = %d; got: %d", 4, agg.Buckets[1].DocCount) - } - if agg.Buckets[2].Key != 150 { - t.Errorf("expected key = %v; got: %v", 150, agg.Buckets[2].Key) - } - if agg.Buckets[2].KeyAsString != nil { - t.Fatalf("expected key_as_string = %v; got: %q", nil, *agg.Buckets[2].KeyAsString) - } - if agg.Buckets[2].DocCount != 3 { - t.Errorf("expected doc count = %d; got: %d", 3, agg.Buckets[2].DocCount) - } -} - -func TestAggsBucketDateHistogram(t *testing.T) { - s := `{ - "articles_over_time": { - "buckets": [ - { - "key_as_string": "2013-02-02", - "key": 1328140800000, - "doc_count": 1 - }, - { - "key_as_string": "2013-03-02", - "key": 1330646400000, - "doc_count": 2 - } - ] - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.DateHistogram("articles_over_time") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Buckets == nil { - t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) - } - if len(agg.Buckets) != 2 { - t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) - } - if agg.Buckets[0].Key != 1328140800000 { - t.Errorf("expected key %v; got: %v", 1328140800000, agg.Buckets[0].Key) - } - if agg.Buckets[0].KeyAsString == nil { - t.Fatalf("expected key_as_string != nil; got: %v", agg.Buckets[0].KeyAsString) - } - if *agg.Buckets[0].KeyAsString != "2013-02-02" { - t.Errorf("expected key_as_string %q; got: %q", "2013-02-02", *agg.Buckets[0].KeyAsString) - } - if agg.Buckets[0].DocCount != 1 { - t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[0].DocCount) - } - if agg.Buckets[1].Key != 1330646400000 { - t.Errorf("expected key %v; got: %v", 1330646400000, agg.Buckets[1].Key) - } - if agg.Buckets[1].KeyAsString == nil { - t.Fatalf("expected key_as_string != nil; got: %v", agg.Buckets[1].KeyAsString) - } - if *agg.Buckets[1].KeyAsString != "2013-03-02" { - t.Errorf("expected key_as_string %q; got: %q", "2013-03-02", *agg.Buckets[1].KeyAsString) - } - if agg.Buckets[1].DocCount != 2 { - t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[1].DocCount) - } -} - -func TestAggsMetricsGeoBounds(t *testing.T) { - s := `{ - "viewport": { - "bounds": { - "top_left": { - "lat": 80.45, - "lon": -160.22 - }, - "bottom_right": { - "lat": 40.65, - "lon": 42.57 - } - } - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.GeoBounds("viewport") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Bounds.TopLeft.Latitude != float64(80.45) { - t.Fatalf("expected Bounds.TopLeft.Latitude != %v; got: %v", float64(80.45), agg.Bounds.TopLeft.Latitude) - } - if agg.Bounds.TopLeft.Longitude != float64(-160.22) { - t.Fatalf("expected Bounds.TopLeft.Longitude != %v; got: %v", float64(-160.22), agg.Bounds.TopLeft.Longitude) - } - if agg.Bounds.BottomRight.Latitude != float64(40.65) { - t.Fatalf("expected Bounds.BottomRight.Latitude != %v; got: %v", float64(40.65), agg.Bounds.BottomRight.Latitude) - } - if agg.Bounds.BottomRight.Longitude != float64(42.57) { - t.Fatalf("expected Bounds.BottomRight.Longitude != %v; got: %v", float64(42.57), agg.Bounds.BottomRight.Longitude) - } -} - -func TestAggsBucketGeoHash(t *testing.T) { - s := `{ - "myLarge-GrainGeoHashGrid": { - "buckets": [ - { - "key": "svz", - "doc_count": 10964 - }, - { - "key": "sv8", - "doc_count": 3198 - } - ] - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.GeoHash("myLarge-GrainGeoHashGrid") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Buckets == nil { - t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) - } - if len(agg.Buckets) != 2 { - t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets)) - } - if agg.Buckets[0].Key != "svz" { - t.Errorf("expected key %q; got: %q", "svz", agg.Buckets[0].Key) - } - if agg.Buckets[0].DocCount != 10964 { - t.Errorf("expected doc count %d; got: %d", 10964, agg.Buckets[0].DocCount) - } - if agg.Buckets[1].Key != "sv8" { - t.Errorf("expected key %q; got: %q", "sv8", agg.Buckets[1].Key) - } - if agg.Buckets[1].DocCount != 3198 { - t.Errorf("expected doc count %d; got: %d", 3198, agg.Buckets[1].DocCount) - } -} - -func TestAggsBucketGeoDistance(t *testing.T) { - s := `{ - "rings" : { - "buckets": [ - { - "unit": "km", - "to": 100.0, - "doc_count": 3 - }, - { - "unit": "km", - "from": 100.0, - "to": 300.0, - "doc_count": 1 - }, - { - "unit": "km", - "from": 300.0, - "doc_count": 7 - } - ] - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.GeoDistance("rings") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Buckets == nil { - t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets) - } - if len(agg.Buckets) != 3 { - t.Errorf("expected %d bucket entries; got: %d", 3, len(agg.Buckets)) - } - if agg.Buckets[0].From != nil { - t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From) - } - if agg.Buckets[0].To == nil { - t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To) - } - if *agg.Buckets[0].To != float64(100.0) { - t.Errorf("expected To = %v; got: %v", float64(100.0), *agg.Buckets[0].To) - } - if agg.Buckets[0].DocCount != 3 { - t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[0].DocCount) - } - - if agg.Buckets[1].From == nil { - t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From) - } - if *agg.Buckets[1].From != float64(100.0) { - t.Errorf("expected From = %v; got: %v", float64(100.0), *agg.Buckets[1].From) - } - if agg.Buckets[1].To == nil { - t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[1].To) - } - if *agg.Buckets[1].To != float64(300.0) { - t.Errorf("expected From = %v; got: %v", float64(300.0), *agg.Buckets[1].To) - } - if agg.Buckets[1].DocCount != 1 { - t.Errorf("expected DocCount = %d; got: %d", 1, agg.Buckets[1].DocCount) - } - - if agg.Buckets[2].From == nil { - t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[2].From) - } - if *agg.Buckets[2].From != float64(300.0) { - t.Errorf("expected From = %v; got: %v", float64(300.0), *agg.Buckets[2].From) - } - if agg.Buckets[2].To != nil { - t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[2].To) - } - if agg.Buckets[2].DocCount != 7 { - t.Errorf("expected DocCount = %d; got: %d", 7, agg.Buckets[2].DocCount) - } -} - -func TestAggsSubAggregates(t *testing.T) { - rs := `{ - "users" : { - "doc_count_error_upper_bound" : 1, - "sum_other_doc_count" : 2, - "buckets" : [ { - "key" : "olivere", - "doc_count" : 2, - "ts" : { - "buckets" : [ { - "key_as_string" : "2012-01-01T00:00:00.000Z", - "key" : 1325376000000, - "doc_count" : 2 - } ] - } - }, { - "key" : "sandrae", - "doc_count" : 1, - "ts" : { - "buckets" : [ { - "key_as_string" : "2011-01-01T00:00:00.000Z", - "key" : 1293840000000, - "doc_count" : 1 - } ] - } - } ] - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(rs), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - // Access top-level aggregation - users, found := aggs.Terms("users") - if !found { - t.Fatalf("expected users aggregation to be found; got: %v", found) - } - if users == nil { - t.Fatalf("expected users aggregation; got: %v", users) - } - if users.Buckets == nil { - t.Fatalf("expected users buckets; got: %v", users.Buckets) - } - if len(users.Buckets) != 2 { - t.Errorf("expected %d bucket entries; got: %d", 2, len(users.Buckets)) - } - if users.Buckets[0].Key != "olivere" { - t.Errorf("expected key %q; got: %q", "olivere", users.Buckets[0].Key) - } - if users.Buckets[0].DocCount != 2 { - t.Errorf("expected doc count %d; got: %d", 2, users.Buckets[0].DocCount) - } - if users.Buckets[1].Key != "sandrae" { - t.Errorf("expected key %q; got: %q", "sandrae", users.Buckets[1].Key) - } - if users.Buckets[1].DocCount != 1 { - t.Errorf("expected doc count %d; got: %d", 1, users.Buckets[1].DocCount) - } - - // Access sub-aggregation - ts, found := users.Buckets[0].DateHistogram("ts") - if !found { - t.Fatalf("expected ts aggregation to be found; got: %v", found) - } - if ts == nil { - t.Fatalf("expected ts aggregation; got: %v", ts) - } - if ts.Buckets == nil { - t.Fatalf("expected ts buckets; got: %v", ts.Buckets) - } - if len(ts.Buckets) != 1 { - t.Errorf("expected %d bucket entries; got: %d", 1, len(ts.Buckets)) - } - if ts.Buckets[0].Key != 1325376000000 { - t.Errorf("expected key %v; got: %v", 1325376000000, ts.Buckets[0].Key) - } - if ts.Buckets[0].KeyAsString == nil { - t.Fatalf("expected key_as_string != %v; got: %v", nil, ts.Buckets[0].KeyAsString) - } - if *ts.Buckets[0].KeyAsString != "2012-01-01T00:00:00.000Z" { - t.Errorf("expected key_as_string %q; got: %q", "2012-01-01T00:00:00.000Z", *ts.Buckets[0].KeyAsString) - } -} - -func TestAggsPipelineAvgBucket(t *testing.T) { - s := `{ - "avg_monthly_sales" : { - "value" : 328.33333333333333 - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.AvgBucket("avg_monthly_sales") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Value == nil { - t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) - } - if *agg.Value != float64(328.33333333333333) { - t.Fatalf("expected aggregation value = %v; got: %v", float64(328.33333333333333), *agg.Value) - } -} - -func TestAggsPipelineSumBucket(t *testing.T) { - s := `{ - "sum_monthly_sales" : { - "value" : 985 - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.SumBucket("sum_monthly_sales") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Value == nil { - t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) - } - if *agg.Value != float64(985) { - t.Fatalf("expected aggregation value = %v; got: %v", float64(985), *agg.Value) - } -} - -func TestAggsPipelineMaxBucket(t *testing.T) { - s := `{ - "max_monthly_sales" : { - "keys": ["2015/01/01 00:00:00"], - "value" : 550 - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.MaxBucket("max_monthly_sales") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if len(agg.Keys) != 1 { - t.Fatalf("expected 1 key; got: %d", len(agg.Keys)) - } - if got, want := agg.Keys[0], "2015/01/01 00:00:00"; got != want { - t.Fatalf("expected key %q; got: %v (%T)", want, got, got) - } - if agg.Value == nil { - t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) - } - if *agg.Value != float64(550) { - t.Fatalf("expected aggregation value = %v; got: %v", float64(550), *agg.Value) - } -} - -func TestAggsPipelineMinBucket(t *testing.T) { - s := `{ - "min_monthly_sales" : { - "keys": ["2015/02/01 00:00:00"], - "value" : 60 - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.MinBucket("min_monthly_sales") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if len(agg.Keys) != 1 { - t.Fatalf("expected 1 key; got: %d", len(agg.Keys)) - } - if got, want := agg.Keys[0], "2015/02/01 00:00:00"; got != want { - t.Fatalf("expected key %q; got: %v (%T)", want, got, got) - } - if agg.Value == nil { - t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) - } - if *agg.Value != float64(60) { - t.Fatalf("expected aggregation value = %v; got: %v", float64(60), *agg.Value) - } -} - -func TestAggsPipelineMovAvg(t *testing.T) { - s := `{ - "the_movavg" : { - "value" : 12.0 - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.MovAvg("the_movavg") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Value == nil { - t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) - } - if *agg.Value != float64(12.0) { - t.Fatalf("expected aggregation value = %v; got: %v", float64(12.0), *agg.Value) - } -} - -func TestAggsPipelineDerivative(t *testing.T) { - s := `{ - "sales_deriv" : { - "value" : 315 - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.Derivative("sales_deriv") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Value == nil { - t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) - } - if *agg.Value != float64(315) { - t.Fatalf("expected aggregation value = %v; got: %v", float64(315), *agg.Value) - } -} - -func TestAggsPipelineCumulativeSum(t *testing.T) { - s := `{ - "cumulative_sales" : { - "value" : 550 - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.CumulativeSum("cumulative_sales") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Value == nil { - t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) - } - if *agg.Value != float64(550) { - t.Fatalf("expected aggregation value = %v; got: %v", float64(550), *agg.Value) - } -} - -func TestAggsPipelineBucketScript(t *testing.T) { - s := `{ - "t-shirt-percentage" : { - "value" : 20 - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.BucketScript("t-shirt-percentage") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Value == nil { - t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) - } - if *agg.Value != float64(20) { - t.Fatalf("expected aggregation value = %v; got: %v", float64(20), *agg.Value) - } -} - -func TestAggsPipelineSerialDiff(t *testing.T) { - s := `{ - "the_diff" : { - "value" : -722.0 - } -}` - - aggs := new(Aggregations) - err := json.Unmarshal([]byte(s), &aggs) - if err != nil { - t.Fatalf("expected no error decoding; got: %v", err) - } - - agg, found := aggs.SerialDiff("the_diff") - if !found { - t.Fatalf("expected aggregation to be found; got: %v", found) - } - if agg == nil { - t.Fatalf("expected aggregation != nil; got: %v", agg) - } - if agg.Value == nil { - t.Fatalf("expected aggregation value != nil; got: %v", agg.Value) - } - if *agg.Value != float64(-722.0) { - t.Fatalf("expected aggregation value = %v; got: %v", float64(20), *agg.Value) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_bool.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_bool.go deleted file mode 100644 index c2cc8697b..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_bool.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "fmt" - -// A bool query matches documents matching boolean -// combinations of other queries. -// For more details, see: -// http://www.elasticsearch.org/guide/reference/query-dsl/bool-query.html -type BoolQuery struct { - Query - mustClauses []Query - mustNotClauses []Query - filterClauses []Query - shouldClauses []Query - boost *float64 - disableCoord *bool - minimumShouldMatch string - adjustPureNegative *bool - queryName string -} - -// Creates a new bool query. -func NewBoolQuery() *BoolQuery { - return &BoolQuery{ - mustClauses: make([]Query, 0), - mustNotClauses: make([]Query, 0), - filterClauses: make([]Query, 0), - shouldClauses: make([]Query, 0), - } -} - -func (q *BoolQuery) Must(queries ...Query) *BoolQuery { - q.mustClauses = append(q.mustClauses, queries...) - return q -} - -func (q *BoolQuery) MustNot(queries ...Query) *BoolQuery { - q.mustNotClauses = append(q.mustNotClauses, queries...) - return q -} - -func (q *BoolQuery) Filter(filters ...Query) *BoolQuery { - q.filterClauses = append(q.filterClauses, filters...) - return q -} - -func (q *BoolQuery) Should(queries ...Query) *BoolQuery { - q.shouldClauses = append(q.shouldClauses, queries...) - return q -} - -func (q *BoolQuery) Boost(boost float64) *BoolQuery { - q.boost = &boost - return q -} - -func (q *BoolQuery) DisableCoord(disableCoord bool) *BoolQuery { - q.disableCoord = &disableCoord - return q -} - -func (q *BoolQuery) MinimumShouldMatch(minimumShouldMatch string) *BoolQuery { - q.minimumShouldMatch = minimumShouldMatch - return q -} - -func (q *BoolQuery) MinimumNumberShouldMatch(minimumNumberShouldMatch int) *BoolQuery { - q.minimumShouldMatch = fmt.Sprintf("%d", minimumNumberShouldMatch) - return q -} - -func (q *BoolQuery) AdjustPureNegative(adjustPureNegative bool) *BoolQuery { - q.adjustPureNegative = &adjustPureNegative - return q -} - -func (q *BoolQuery) QueryName(queryName string) *BoolQuery { - q.queryName = queryName - return q -} - -// Creates the query source for the bool query. -func (q *BoolQuery) Source() (interface{}, error) { - // { - // "bool" : { - // "must" : { - // "term" : { "user" : "kimchy" } - // }, - // "must_not" : { - // "range" : { - // "age" : { "from" : 10, "to" : 20 } - // } - // }, - // "filter" : [ - // ... - // ] - // "should" : [ - // { - // "term" : { "tag" : "wow" } - // }, - // { - // "term" : { "tag" : "elasticsearch" } - // } - // ], - // "minimum_number_should_match" : 1, - // "boost" : 1.0 - // } - // } - - query := make(map[string]interface{}) - - boolClause := make(map[string]interface{}) - query["bool"] = boolClause - - // must - if len(q.mustClauses) == 1 { - src, err := q.mustClauses[0].Source() - if err != nil { - return nil, err - } - boolClause["must"] = src - } else if len(q.mustClauses) > 1 { - clauses := make([]interface{}, 0) - for _, subQuery := range q.mustClauses { - src, err := subQuery.Source() - if err != nil { - return nil, err - } - clauses = append(clauses, src) - } - boolClause["must"] = clauses - } - - // must_not - if len(q.mustNotClauses) == 1 { - src, err := q.mustNotClauses[0].Source() - if err != nil { - return nil, err - } - boolClause["must_not"] = src - } else if len(q.mustNotClauses) > 1 { - clauses := make([]interface{}, 0) - for _, subQuery := range q.mustNotClauses { - src, err := subQuery.Source() - if err != nil { - return nil, err - } - clauses = append(clauses, src) - } - boolClause["must_not"] = clauses - } - - // filter - if len(q.filterClauses) == 1 { - src, err := q.filterClauses[0].Source() - if err != nil { - return nil, err - } - boolClause["filter"] = src - } else if len(q.filterClauses) > 1 { - clauses := make([]interface{}, 0) - for _, subQuery := range q.filterClauses { - src, err := subQuery.Source() - if err != nil { - return nil, err - } - clauses = append(clauses, src) - } - boolClause["filter"] = clauses - } - - // should - if len(q.shouldClauses) == 1 { - src, err := q.shouldClauses[0].Source() - if err != nil { - return nil, err - } - boolClause["should"] = src - } else if len(q.shouldClauses) > 1 { - clauses := make([]interface{}, 0) - for _, subQuery := range q.shouldClauses { - src, err := subQuery.Source() - if err != nil { - return nil, err - } - clauses = append(clauses, src) - } - boolClause["should"] = clauses - } - - if q.boost != nil { - boolClause["boost"] = *q.boost - } - if q.disableCoord != nil { - boolClause["disable_coord"] = *q.disableCoord - } - if q.minimumShouldMatch != "" { - boolClause["minimum_should_match"] = q.minimumShouldMatch - } - if q.adjustPureNegative != nil { - boolClause["adjust_pure_negative"] = *q.adjustPureNegative - } - if q.queryName != "" { - boolClause["_name"] = q.queryName - } - - return query, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_bool_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_bool_test.go deleted file mode 100644 index 327d3f635..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_bool_test.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestBoolQuery(t *testing.T) { - q := NewBoolQuery() - q = q.Must(NewTermQuery("tag", "wow")) - q = q.MustNot(NewRangeQuery("age").From(10).To(20)) - q = q.Filter(NewTermQuery("account", "1")) - q = q.Should(NewTermQuery("tag", "sometag"), NewTermQuery("tag", "sometagtag")) - q = q.Boost(10) - q = q.DisableCoord(true) - q = q.QueryName("Test") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"bool":{"_name":"Test","boost":10,"disable_coord":true,"filter":{"term":{"account":"1"}},"must":{"term":{"tag":"wow"}},"must_not":{"range":{"age":{"from":10,"include_lower":true,"include_upper":true,"to":20}}},"should":[{"term":{"tag":"sometag"}},{"term":{"tag":"sometagtag"}}]}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_boosting.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_boosting.go deleted file mode 100644 index 7f7a53b8b..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_boosting.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// A boosting query can be used to effectively -// demote results that match a given query. -// For more details, see: -// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-boosting-query.html -type BoostingQuery struct { - Query - positiveClause Query - negativeClause Query - negativeBoost *float64 - boost *float64 -} - -// Creates a new boosting query. -func NewBoostingQuery() *BoostingQuery { - return &BoostingQuery{} -} - -func (q *BoostingQuery) Positive(positive Query) *BoostingQuery { - q.positiveClause = positive - return q -} - -func (q *BoostingQuery) Negative(negative Query) *BoostingQuery { - q.negativeClause = negative - return q -} - -func (q *BoostingQuery) NegativeBoost(negativeBoost float64) *BoostingQuery { - q.negativeBoost = &negativeBoost - return q -} - -func (q *BoostingQuery) Boost(boost float64) *BoostingQuery { - q.boost = &boost - return q -} - -// Creates the query source for the boosting query. -func (q *BoostingQuery) Source() (interface{}, error) { - // { - // "boosting" : { - // "positive" : { - // "term" : { - // "field1" : "value1" - // } - // }, - // "negative" : { - // "term" : { - // "field2" : "value2" - // } - // }, - // "negative_boost" : 0.2 - // } - // } - - query := make(map[string]interface{}) - - boostingClause := make(map[string]interface{}) - query["boosting"] = boostingClause - - // Negative and positive clause as well as negative boost - // are mandatory in the Java client. - - // positive - if q.positiveClause != nil { - src, err := q.positiveClause.Source() - if err != nil { - return nil, err - } - boostingClause["positive"] = src - } - - // negative - if q.negativeClause != nil { - src, err := q.negativeClause.Source() - if err != nil { - return nil, err - } - boostingClause["negative"] = src - } - - if q.negativeBoost != nil { - boostingClause["negative_boost"] = *q.negativeBoost - } - - if q.boost != nil { - boostingClause["boost"] = *q.boost - } - - return query, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_boosting_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_boosting_test.go deleted file mode 100644 index 0ef03dfef..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_boosting_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestBoostingQuery(t *testing.T) { - q := NewBoostingQuery() - q = q.Positive(NewTermQuery("tag", "wow")) - q = q.Negative(NewRangeQuery("age").From(10).To(20)) - q = q.NegativeBoost(0.2) - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"boosting":{"negative":{"range":{"age":{"from":10,"include_lower":true,"include_upper":true,"to":20}}},"negative_boost":0.2,"positive":{"term":{"tag":"wow"}}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_common_terms.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_common_terms.go deleted file mode 100644 index d45825067..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_common_terms.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// CommonTermsQuery is a modern alternative to stopwords -// which improves the precision and recall of search results -// (by taking stopwords into account), without sacrificing performance. -// For more details, see: -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-common-terms-query.html -type CommonTermsQuery struct { - Query - name string - text interface{} - cutoffFreq *float64 - highFreq *float64 - highFreqOp string - highFreqMinimumShouldMatch string - lowFreq *float64 - lowFreqOp string - lowFreqMinimumShouldMatch string - analyzer string - boost *float64 - disableCoord *bool - queryName string -} - -// NewCommonTermsQuery creates and initializes a new common terms query. -func NewCommonTermsQuery(name string, text interface{}) *CommonTermsQuery { - return &CommonTermsQuery{name: name, text: text} -} - -func (q *CommonTermsQuery) CutoffFrequency(f float64) *CommonTermsQuery { - q.cutoffFreq = &f - return q -} - -func (q *CommonTermsQuery) HighFreq(f float64) *CommonTermsQuery { - q.highFreq = &f - return q -} - -func (q *CommonTermsQuery) HighFreqOperator(op string) *CommonTermsQuery { - q.highFreqOp = op - return q -} - -func (q *CommonTermsQuery) HighFreqMinimumShouldMatch(minShouldMatch string) *CommonTermsQuery { - q.highFreqMinimumShouldMatch = minShouldMatch - return q -} - -func (q *CommonTermsQuery) LowFreq(f float64) *CommonTermsQuery { - q.lowFreq = &f - return q -} - -func (q *CommonTermsQuery) LowFreqOperator(op string) *CommonTermsQuery { - q.lowFreqOp = op - return q -} - -func (q *CommonTermsQuery) LowFreqMinimumShouldMatch(minShouldMatch string) *CommonTermsQuery { - q.lowFreqMinimumShouldMatch = minShouldMatch - return q -} - -func (q *CommonTermsQuery) Analyzer(analyzer string) *CommonTermsQuery { - q.analyzer = analyzer - return q -} - -func (q *CommonTermsQuery) Boost(boost float64) *CommonTermsQuery { - q.boost = &boost - return q -} - -func (q *CommonTermsQuery) DisableCoord(disableCoord bool) *CommonTermsQuery { - q.disableCoord = &disableCoord - return q -} - -func (q *CommonTermsQuery) QueryName(queryName string) *CommonTermsQuery { - q.queryName = queryName - return q -} - -// Creates the query source for the common query. -func (q *CommonTermsQuery) Source() (interface{}, error) { - // { - // "common": { - // "body": { - // "query": "this is bonsai cool", - // "cutoff_frequency": 0.001 - // } - // } - // } - source := make(map[string]interface{}) - body := make(map[string]interface{}) - query := make(map[string]interface{}) - - source["common"] = body - body[q.name] = query - query["query"] = q.text - - if q.cutoffFreq != nil { - query["cutoff_frequency"] = *q.cutoffFreq - } - if q.highFreq != nil { - query["high_freq"] = *q.highFreq - } - if q.highFreqOp != "" { - query["high_freq_operator"] = q.highFreqOp - } - if q.lowFreq != nil { - query["low_freq"] = *q.lowFreq - } - if q.lowFreqOp != "" { - query["low_freq_operator"] = q.lowFreqOp - } - if q.lowFreqMinimumShouldMatch != "" || q.highFreqMinimumShouldMatch != "" { - mm := make(map[string]interface{}) - if q.lowFreqMinimumShouldMatch != "" { - mm["low_freq"] = q.lowFreqMinimumShouldMatch - } - if q.highFreqMinimumShouldMatch != "" { - mm["high_freq"] = q.highFreqMinimumShouldMatch - } - query["minimum_should_match"] = mm - } - if q.analyzer != "" { - query["analyzer"] = q.analyzer - } - if q.disableCoord != nil { - query["disable_coord"] = *q.disableCoord - } - if q.boost != nil { - query["boost"] = *q.boost - } - if q.queryName != "" { - query["_name"] = q.queryName - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_common_terms_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_common_terms_test.go deleted file mode 100644 index 02c1c2b60..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_common_terms_test.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - _ "net/http" - "testing" -) - -func TestCommonTermsQuery(t *testing.T) { - q := NewCommonTermsQuery("message", "Golang").CutoffFrequency(0.001) - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"common":{"message":{"cutoff_frequency":0.001,"query":"Golang"}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestSearchQueriesCommonTermsQuery(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Common terms query - q := NewCommonTermsQuery("message", "Golang") - searchResult, err := client.Search().Index(testIndexName).Query(q).Do() - if err != nil { - t.Fatal(err) - } - if searchResult.Hits == nil { - t.Errorf("expected SearchResult.Hits != nil; got nil") - } - if searchResult.Hits.TotalHits != 1 { - t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult.Hits.TotalHits) - } - if len(searchResult.Hits.Hits) != 1 { - t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 1, len(searchResult.Hits.Hits)) - } - - for _, hit := range searchResult.Hits.Hits { - if hit.Index != testIndexName { - t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) - } - item := make(map[string]interface{}) - err := json.Unmarshal(*hit.Source, &item) - if err != nil { - t.Fatal(err) - } - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_constant_score.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_constant_score.go deleted file mode 100644 index c754d279d..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_constant_score.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// ConstantScoreQuery is a query that wraps a filter and simply returns -// a constant score equal to the query boost for every document in the filter. -// -// For more details, see: -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-constant-score-query.html -type ConstantScoreQuery struct { - filter Query - boost *float64 -} - -// ConstantScoreQuery creates and initializes a new constant score query. -func NewConstantScoreQuery(filter Query) *ConstantScoreQuery { - return &ConstantScoreQuery{ - filter: filter, - } -} - -// Boost sets the boost for this query. Documents matching this query -// will (in addition to the normal weightings) have their score multiplied -// by the boost provided. -func (q *ConstantScoreQuery) Boost(boost float64) *ConstantScoreQuery { - q.boost = &boost - return q -} - -// Source returns the query source. -func (q *ConstantScoreQuery) Source() (interface{}, error) { - // "constant_score" : { - // "filter" : { - // .... - // }, - // "boost" : 1.5 - // } - - query := make(map[string]interface{}) - - params := make(map[string]interface{}) - query["constant_score"] = params - - // filter - src, err := q.filter.Source() - if err != nil { - return nil, err - } - params["filter"] = src - - // boost - if q.boost != nil { - params["boost"] = *q.boost - } - - return query, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_constant_score_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_constant_score_test.go deleted file mode 100644 index bdcce659c..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_constant_score_test.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestConstantScoreQuery(t *testing.T) { - q := NewConstantScoreQuery(NewTermQuery("user", "kimchy")).Boost(1.2) - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"constant_score":{"boost":1.2,"filter":{"term":{"user":"kimchy"}}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_dis_max.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_dis_max.go deleted file mode 100644 index c47d6bb12..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_dis_max.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// DisMaxQuery is a query that generates the union of documents produced by -// its subqueries, and that scores each document with the maximum score -// for that document as produced by any subquery, plus a tie breaking -// increment for any additional matching subqueries. -// -// For more details, see: -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-dis-max-query.html -type DisMaxQuery struct { - queries []Query - boost *float64 - tieBreaker *float64 - queryName string -} - -// NewDisMaxQuery creates and initializes a new dis max query. -func NewDisMaxQuery() *DisMaxQuery { - return &DisMaxQuery{ - queries: make([]Query, 0), - } -} - -// Query adds one or more queries to the dis max query. -func (q *DisMaxQuery) Query(queries ...Query) *DisMaxQuery { - q.queries = append(q.queries, queries...) - return q -} - -// Boost sets the boost for this query. Documents matching this query will -// (in addition to the normal weightings) have their score multiplied by -// the boost provided. -func (q *DisMaxQuery) Boost(boost float64) *DisMaxQuery { - q.boost = &boost - return q -} - -// TieBreaker is the factor by which the score of each non-maximum disjunct -// for a document is multiplied with and added into the final score. -// -// If non-zero, the value should be small, on the order of 0.1, which says -// that 10 occurrences of word in a lower-scored field that is also in a -// higher scored field is just as good as a unique word in the lower scored -// field (i.e., one that is not in any higher scored field). -func (q *DisMaxQuery) TieBreaker(tieBreaker float64) *DisMaxQuery { - q.tieBreaker = &tieBreaker - return q -} - -// QueryName sets the query name for the filter that can be used -// when searching for matched filters per hit. -func (q *DisMaxQuery) QueryName(queryName string) *DisMaxQuery { - q.queryName = queryName - return q -} - -// Source returns the JSON serializable content for this query. -func (q *DisMaxQuery) Source() (interface{}, error) { - // { - // "dis_max" : { - // "tie_breaker" : 0.7, - // "boost" : 1.2, - // "queries" : { - // { - // "term" : { "age" : 34 } - // }, - // { - // "term" : { "age" : 35 } - // } - // ] - // } - // } - - query := make(map[string]interface{}) - params := make(map[string]interface{}) - query["dis_max"] = params - - if q.tieBreaker != nil { - params["tie_breaker"] = *q.tieBreaker - } - if q.boost != nil { - params["boost"] = *q.boost - } - if q.queryName != "" { - params["_name"] = q.queryName - } - - // queries - clauses := make([]interface{}, 0) - for _, subQuery := range q.queries { - src, err := subQuery.Source() - if err != nil { - return nil, err - } - clauses = append(clauses, src) - } - params["queries"] = clauses - - return query, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_dis_max_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_dis_max_test.go deleted file mode 100644 index 8b005a61e..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_dis_max_test.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestDisMaxQuery(t *testing.T) { - q := NewDisMaxQuery() - q = q.Query(NewTermQuery("age", 34), NewTermQuery("age", 35)).Boost(1.2).TieBreaker(0.7) - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"dis_max":{"boost":1.2,"queries":[{"term":{"age":34}},{"term":{"age":35}}],"tie_breaker":0.7}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_exists.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_exists.go deleted file mode 100644 index e117673bd..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_exists.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// ExistsQuery is a query that only matches on documents that the field -// has a value in them. -// -// For more details, see: -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-exists-query.html -type ExistsQuery struct { - name string - queryName string -} - -// NewExistsQuery creates and initializes a new dis max query. -func NewExistsQuery(name string) *ExistsQuery { - return &ExistsQuery{ - name: name, - } -} - -// QueryName sets the query name for the filter that can be used -// when searching for matched queries per hit. -func (q *ExistsQuery) QueryName(queryName string) *ExistsQuery { - q.queryName = queryName - return q -} - -// Source returns the JSON serializable content for this query. -func (q *ExistsQuery) Source() (interface{}, error) { - // { - // "exists" : { - // "field" : "user" - // } - // } - - query := make(map[string]interface{}) - params := make(map[string]interface{}) - query["exists"] = params - - params["field"] = q.name - if q.queryName != "" { - params["_name"] = q.queryName - } - - return query, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_exists_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_exists_test.go deleted file mode 100644 index a1112085c..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_exists_test.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestExistsQuery(t *testing.T) { - q := NewExistsQuery("user") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"exists":{"field":"user"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fsq.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fsq.go deleted file mode 100644 index b7fa15e67..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fsq.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// FunctionScoreQuery allows you to modify the score of documents that -// are retrieved by a query. This can be useful if, for example, -// a score function is computationally expensive and it is sufficient -// to compute the score on a filtered set of documents. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html -type FunctionScoreQuery struct { - query Query - filter Query - boost *float64 - maxBoost *float64 - scoreMode string - boostMode string - filters []Query - scoreFuncs []ScoreFunction - minScore *float64 - weight *float64 -} - -// NewFunctionScoreQuery creates and initializes a new function score query. -func NewFunctionScoreQuery() *FunctionScoreQuery { - return &FunctionScoreQuery{ - filters: make([]Query, 0), - scoreFuncs: make([]ScoreFunction, 0), - } -} - -// Query sets the query for the function score query. -func (q *FunctionScoreQuery) Query(query Query) *FunctionScoreQuery { - q.query = query - q.filter = nil - return q -} - -// Filter sets the filter for the function score query. -func (q *FunctionScoreQuery) Filter(filter Query) *FunctionScoreQuery { - q.query = nil - q.filter = filter - return q -} - -// Add adds a score function that will execute on all the documents -// matching the filter. -func (q *FunctionScoreQuery) Add(filter Query, scoreFunc ScoreFunction) *FunctionScoreQuery { - q.filters = append(q.filters, filter) - q.scoreFuncs = append(q.scoreFuncs, scoreFunc) - return q -} - -// AddScoreFunc adds a score function that will execute the function on all documents. -func (q *FunctionScoreQuery) AddScoreFunc(scoreFunc ScoreFunction) *FunctionScoreQuery { - q.filters = append(q.filters, nil) - q.scoreFuncs = append(q.scoreFuncs, scoreFunc) - return q -} - -// ScoreMode defines how results of individual score functions will be aggregated. -// Can be first, avg, max, sum, min, or multiply. -func (q *FunctionScoreQuery) ScoreMode(scoreMode string) *FunctionScoreQuery { - q.scoreMode = scoreMode - return q -} - -// BoostMode defines how the combined result of score functions will -// influence the final score together with the sub query score. -func (q *FunctionScoreQuery) BoostMode(boostMode string) *FunctionScoreQuery { - q.boostMode = boostMode - return q -} - -// MaxBoost is the maximum boost that will be applied by function score. -func (q *FunctionScoreQuery) MaxBoost(maxBoost float64) *FunctionScoreQuery { - q.maxBoost = &maxBoost - return q -} - -// Boost sets the boost for this query. Documents matching this query will -// (in addition to the normal weightings) have their score multiplied by the -// boost provided. -func (q *FunctionScoreQuery) Boost(boost float64) *FunctionScoreQuery { - q.boost = &boost - return q -} - -// MinScore sets the minimum score. -func (q *FunctionScoreQuery) MinScore(minScore float64) *FunctionScoreQuery { - q.minScore = &minScore - return q -} - -// Source returns JSON for the function score query. -func (q *FunctionScoreQuery) Source() (interface{}, error) { - source := make(map[string]interface{}) - query := make(map[string]interface{}) - source["function_score"] = query - - if q.query != nil { - src, err := q.query.Source() - if err != nil { - return nil, err - } - query["query"] = src - } else if q.filter != nil { - src, err := q.filter.Source() - if err != nil { - return nil, err - } - query["filter"] = src - } - - if len(q.filters) == 1 && q.filters[0] == nil { - // Weight needs to be serialized on this level. - if weight := q.scoreFuncs[0].GetWeight(); weight != nil { - query["weight"] = weight - } - // Serialize the score function - src, err := q.scoreFuncs[0].Source() - if err != nil { - return nil, err - } - query[q.scoreFuncs[0].Name()] = src - } else { - funcs := make([]interface{}, len(q.filters)) - for i, filter := range q.filters { - hsh := make(map[string]interface{}) - if filter != nil { - src, err := filter.Source() - if err != nil { - return nil, err - } - hsh["filter"] = src - } - // Weight needs to be serialized on this level. - if weight := q.scoreFuncs[i].GetWeight(); weight != nil { - hsh["weight"] = weight - } - // Serialize the score function - src, err := q.scoreFuncs[i].Source() - if err != nil { - return nil, err - } - hsh[q.scoreFuncs[i].Name()] = src - funcs[i] = hsh - } - query["functions"] = funcs - } - - if q.scoreMode != "" { - query["score_mode"] = q.scoreMode - } - if q.boostMode != "" { - query["boost_mode"] = q.boostMode - } - if q.maxBoost != nil { - query["max_boost"] = *q.maxBoost - } - if q.boost != nil { - query["boost"] = *q.boost - } - if q.minScore != nil { - query["min_score"] = *q.minScore - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fsq_score_funcs.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fsq_score_funcs.go deleted file mode 100644 index fbce3577d..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fsq_score_funcs.go +++ /dev/null @@ -1,567 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "strings" -) - -// ScoreFunction is used in combination with the Function Score Query. -type ScoreFunction interface { - Name() string - GetWeight() *float64 // returns the weight which must be serialized at the level of FunctionScoreQuery - Source() (interface{}, error) -} - -// -- Exponential Decay -- - -// ExponentialDecayFunction builds an exponential decay score function. -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html -// for details. -type ExponentialDecayFunction struct { - fieldName string - origin interface{} - scale interface{} - decay *float64 - offset interface{} - multiValueMode string - weight *float64 -} - -// NewExponentialDecayFunction creates a new ExponentialDecayFunction. -func NewExponentialDecayFunction() *ExponentialDecayFunction { - return &ExponentialDecayFunction{} -} - -// Name represents the JSON field name under which the output of Source -// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). -func (fn *ExponentialDecayFunction) Name() string { - return "exp" -} - -// FieldName specifies the name of the field to which this decay function is applied to. -func (fn *ExponentialDecayFunction) FieldName(fieldName string) *ExponentialDecayFunction { - fn.fieldName = fieldName - return fn -} - -// Origin defines the "central point" by which the decay function calculates -// "distance". -func (fn *ExponentialDecayFunction) Origin(origin interface{}) *ExponentialDecayFunction { - fn.origin = origin - return fn -} - -// Scale defines the scale to be used with Decay. -func (fn *ExponentialDecayFunction) Scale(scale interface{}) *ExponentialDecayFunction { - fn.scale = scale - return fn -} - -// Decay defines how documents are scored at the distance given a Scale. -// If no decay is defined, documents at the distance Scale will be scored 0.5. -func (fn *ExponentialDecayFunction) Decay(decay float64) *ExponentialDecayFunction { - fn.decay = &decay - return fn -} - -// Offset, if defined, computes the decay function only for a distance -// greater than the defined offset. -func (fn *ExponentialDecayFunction) Offset(offset interface{}) *ExponentialDecayFunction { - fn.offset = offset - return fn -} - -// Weight adjusts the score of the score function. -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score -// for details. -func (fn *ExponentialDecayFunction) Weight(weight float64) *ExponentialDecayFunction { - fn.weight = &weight - return fn -} - -// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. -// Returns nil if weight is not specified. -func (fn *ExponentialDecayFunction) GetWeight() *float64 { - return fn.weight -} - -// MultiValueMode specifies how the decay function should be calculated -// on a field that has multiple values. -// Valid modes are: min, max, avg, and sum. -func (fn *ExponentialDecayFunction) MultiValueMode(mode string) *ExponentialDecayFunction { - fn.multiValueMode = mode - return fn -} - -// Source returns the serializable JSON data of this score function. -func (fn *ExponentialDecayFunction) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source[fn.fieldName] = params - if fn.origin != nil { - params["origin"] = fn.origin - } - params["scale"] = fn.scale - if fn.decay != nil && *fn.decay > 0 { - params["decay"] = *fn.decay - } - if fn.offset != nil { - params["offset"] = fn.offset - } - if fn.multiValueMode != "" { - source["multi_value_mode"] = fn.multiValueMode - } - return source, nil -} - -// -- Gauss Decay -- - -// GaussDecayFunction builds a gauss decay score function. -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html -// for details. -type GaussDecayFunction struct { - fieldName string - origin interface{} - scale interface{} - decay *float64 - offset interface{} - multiValueMode string - weight *float64 -} - -// NewGaussDecayFunction returns a new GaussDecayFunction. -func NewGaussDecayFunction() *GaussDecayFunction { - return &GaussDecayFunction{} -} - -// Name represents the JSON field name under which the output of Source -// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). -func (fn *GaussDecayFunction) Name() string { - return "gauss" -} - -// FieldName specifies the name of the field to which this decay function is applied to. -func (fn *GaussDecayFunction) FieldName(fieldName string) *GaussDecayFunction { - fn.fieldName = fieldName - return fn -} - -// Origin defines the "central point" by which the decay function calculates -// "distance". -func (fn *GaussDecayFunction) Origin(origin interface{}) *GaussDecayFunction { - fn.origin = origin - return fn -} - -// Scale defines the scale to be used with Decay. -func (fn *GaussDecayFunction) Scale(scale interface{}) *GaussDecayFunction { - fn.scale = scale - return fn -} - -// Decay defines how documents are scored at the distance given a Scale. -// If no decay is defined, documents at the distance Scale will be scored 0.5. -func (fn *GaussDecayFunction) Decay(decay float64) *GaussDecayFunction { - fn.decay = &decay - return fn -} - -// Offset, if defined, computes the decay function only for a distance -// greater than the defined offset. -func (fn *GaussDecayFunction) Offset(offset interface{}) *GaussDecayFunction { - fn.offset = offset - return fn -} - -// Weight adjusts the score of the score function. -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score -// for details. -func (fn *GaussDecayFunction) Weight(weight float64) *GaussDecayFunction { - fn.weight = &weight - return fn -} - -// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. -// Returns nil if weight is not specified. -func (fn *GaussDecayFunction) GetWeight() *float64 { - return fn.weight -} - -// MultiValueMode specifies how the decay function should be calculated -// on a field that has multiple values. -// Valid modes are: min, max, avg, and sum. -func (fn *GaussDecayFunction) MultiValueMode(mode string) *GaussDecayFunction { - fn.multiValueMode = mode - return fn -} - -// Source returns the serializable JSON data of this score function. -func (fn *GaussDecayFunction) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source[fn.fieldName] = params - if fn.origin != nil { - params["origin"] = fn.origin - } - params["scale"] = fn.scale - if fn.decay != nil && *fn.decay > 0 { - params["decay"] = *fn.decay - } - if fn.offset != nil { - params["offset"] = fn.offset - } - if fn.multiValueMode != "" { - source["multi_value_mode"] = fn.multiValueMode - } - // Notice that the weight has to be serialized in FunctionScoreQuery. - return source, nil -} - -// -- Linear Decay -- - -// LinearDecayFunction builds a linear decay score function. -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html -// for details. -type LinearDecayFunction struct { - fieldName string - origin interface{} - scale interface{} - decay *float64 - offset interface{} - multiValueMode string - weight *float64 -} - -// NewLinearDecayFunction initializes and returns a new LinearDecayFunction. -func NewLinearDecayFunction() *LinearDecayFunction { - return &LinearDecayFunction{} -} - -// Name represents the JSON field name under which the output of Source -// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). -func (fn *LinearDecayFunction) Name() string { - return "linear" -} - -// FieldName specifies the name of the field to which this decay function is applied to. -func (fn *LinearDecayFunction) FieldName(fieldName string) *LinearDecayFunction { - fn.fieldName = fieldName - return fn -} - -// Origin defines the "central point" by which the decay function calculates -// "distance". -func (fn *LinearDecayFunction) Origin(origin interface{}) *LinearDecayFunction { - fn.origin = origin - return fn -} - -// Scale defines the scale to be used with Decay. -func (fn *LinearDecayFunction) Scale(scale interface{}) *LinearDecayFunction { - fn.scale = scale - return fn -} - -// Decay defines how documents are scored at the distance given a Scale. -// If no decay is defined, documents at the distance Scale will be scored 0.5. -func (fn *LinearDecayFunction) Decay(decay float64) *LinearDecayFunction { - fn.decay = &decay - return fn -} - -// Offset, if defined, computes the decay function only for a distance -// greater than the defined offset. -func (fn *LinearDecayFunction) Offset(offset interface{}) *LinearDecayFunction { - fn.offset = offset - return fn -} - -// Weight adjusts the score of the score function. -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score -// for details. -func (fn *LinearDecayFunction) Weight(weight float64) *LinearDecayFunction { - fn.weight = &weight - return fn -} - -// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. -// Returns nil if weight is not specified. -func (fn *LinearDecayFunction) GetWeight() *float64 { - return fn.weight -} - -// MultiValueMode specifies how the decay function should be calculated -// on a field that has multiple values. -// Valid modes are: min, max, avg, and sum. -func (fn *LinearDecayFunction) MultiValueMode(mode string) *LinearDecayFunction { - fn.multiValueMode = mode - return fn -} - -// GetMultiValueMode returns how the decay function should be calculated -// on a field that has multiple values. -// Valid modes are: min, max, avg, and sum. -func (fn *LinearDecayFunction) GetMultiValueMode() string { - return fn.multiValueMode -} - -// Source returns the serializable JSON data of this score function. -func (fn *LinearDecayFunction) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source[fn.fieldName] = params - if fn.origin != nil { - params["origin"] = fn.origin - } - params["scale"] = fn.scale - if fn.decay != nil && *fn.decay > 0 { - params["decay"] = *fn.decay - } - if fn.offset != nil { - params["offset"] = fn.offset - } - if fn.multiValueMode != "" { - source["multi_value_mode"] = fn.multiValueMode - } - // Notice that the weight has to be serialized in FunctionScoreQuery. - return source, nil -} - -// -- Script -- - -// ScriptFunction builds a script score function. It uses a script to -// compute or influence the score of documents that match with the inner -// query or filter. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_script_score -// for details. -type ScriptFunction struct { - script *Script - weight *float64 -} - -// NewScriptFunction initializes and returns a new ScriptFunction. -func NewScriptFunction(script *Script) *ScriptFunction { - return &ScriptFunction{ - script: script, - } -} - -// Name represents the JSON field name under which the output of Source -// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). -func (fn *ScriptFunction) Name() string { - return "script_score" -} - -// Script specifies the script to be executed. -func (fn *ScriptFunction) Script(script *Script) *ScriptFunction { - fn.script = script - return fn -} - -// Weight adjusts the score of the score function. -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score -// for details. -func (fn *ScriptFunction) Weight(weight float64) *ScriptFunction { - fn.weight = &weight - return fn -} - -// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. -// Returns nil if weight is not specified. -func (fn *ScriptFunction) GetWeight() *float64 { - return fn.weight -} - -// Source returns the serializable JSON data of this score function. -func (fn *ScriptFunction) Source() (interface{}, error) { - source := make(map[string]interface{}) - if fn.script != nil { - src, err := fn.script.Source() - if err != nil { - return nil, err - } - source["script"] = src - } - // Notice that the weight has to be serialized in FunctionScoreQuery. - return source, nil -} - -// -- Field value factor -- - -// FieldValueFactorFunction is a function score function that allows you -// to use a field from a document to influence the score. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_field_value_factor. -type FieldValueFactorFunction struct { - field string - factor *float64 - missing *float64 - weight *float64 - modifier string -} - -// NewFieldValueFactorFunction initializes and returns a new FieldValueFactorFunction. -func NewFieldValueFactorFunction() *FieldValueFactorFunction { - return &FieldValueFactorFunction{} -} - -// Name represents the JSON field name under which the output of Source -// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). -func (fn *FieldValueFactorFunction) Name() string { - return "field_value_factor" -} - -// Field is the field to be extracted from the document. -func (fn *FieldValueFactorFunction) Field(field string) *FieldValueFactorFunction { - fn.field = field - return fn -} - -// Factor is the (optional) factor to multiply the field with. If you do not -// specify a factor, the default is 1. -func (fn *FieldValueFactorFunction) Factor(factor float64) *FieldValueFactorFunction { - fn.factor = &factor - return fn -} - -// Modifier to apply to the field value. It can be one of: none, log, log1p, -// log2p, ln, ln1p, ln2p, square, sqrt, or reciprocal. Defaults to: none. -func (fn *FieldValueFactorFunction) Modifier(modifier string) *FieldValueFactorFunction { - fn.modifier = modifier - return fn -} - -// Weight adjusts the score of the score function. -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score -// for details. -func (fn *FieldValueFactorFunction) Weight(weight float64) *FieldValueFactorFunction { - fn.weight = &weight - return fn -} - -// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. -// Returns nil if weight is not specified. -func (fn *FieldValueFactorFunction) GetWeight() *float64 { - return fn.weight -} - -// Missing is used if a document does not have that field. -func (fn *FieldValueFactorFunction) Missing(missing float64) *FieldValueFactorFunction { - fn.missing = &missing - return fn -} - -// Source returns the serializable JSON data of this score function. -func (fn *FieldValueFactorFunction) Source() (interface{}, error) { - source := make(map[string]interface{}) - if fn.field != "" { - source["field"] = fn.field - } - if fn.factor != nil { - source["factor"] = *fn.factor - } - if fn.missing != nil { - source["missing"] = *fn.missing - } - if fn.modifier != "" { - source["modifier"] = strings.ToLower(fn.modifier) - } - // Notice that the weight has to be serialized in FunctionScoreQuery. - return source, nil -} - -// -- Weight Factor -- - -// WeightFactorFunction builds a weight factor function that multiplies -// the weight to the score. -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_weight -// for details. -type WeightFactorFunction struct { - weight float64 -} - -// NewWeightFactorFunction initializes and returns a new WeightFactorFunction. -func NewWeightFactorFunction(weight float64) *WeightFactorFunction { - return &WeightFactorFunction{weight: weight} -} - -// Name represents the JSON field name under which the output of Source -// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). -func (fn *WeightFactorFunction) Name() string { - return "weight" -} - -// Weight adjusts the score of the score function. -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score -// for details. -func (fn *WeightFactorFunction) Weight(weight float64) *WeightFactorFunction { - fn.weight = weight - return fn -} - -// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. -// Returns nil if weight is not specified. -func (fn *WeightFactorFunction) GetWeight() *float64 { - return &fn.weight -} - -// Source returns the serializable JSON data of this score function. -func (fn *WeightFactorFunction) Source() (interface{}, error) { - // Notice that the weight has to be serialized in FunctionScoreQuery. - return fn.weight, nil -} - -// -- Random -- - -// RandomFunction builds a random score function. -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_random -// for details. -type RandomFunction struct { - seed interface{} - weight *float64 -} - -// NewRandomFunction initializes and returns a new RandomFunction. -func NewRandomFunction() *RandomFunction { - return &RandomFunction{} -} - -// Name represents the JSON field name under which the output of Source -// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). -func (fn *RandomFunction) Name() string { - return "random_score" -} - -// Seed is documented in 1.6 as a numeric value. However, in the source code -// of the Java client, it also accepts strings. So we accept both here, too. -func (fn *RandomFunction) Seed(seed interface{}) *RandomFunction { - fn.seed = seed - return fn -} - -// Weight adjusts the score of the score function. -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score -// for details. -func (fn *RandomFunction) Weight(weight float64) *RandomFunction { - fn.weight = &weight - return fn -} - -// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. -// Returns nil if weight is not specified. -func (fn *RandomFunction) GetWeight() *float64 { - return fn.weight -} - -// Source returns the serializable JSON data of this score function. -func (fn *RandomFunction) Source() (interface{}, error) { - source := make(map[string]interface{}) - if fn.seed != nil { - source["seed"] = fn.seed - } - // Notice that the weight has to be serialized in FunctionScoreQuery. - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fsq_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fsq_test.go deleted file mode 100644 index 59f1cd191..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fsq_test.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestFunctionScoreQuery(t *testing.T) { - q := NewFunctionScoreQuery(). - Query(NewTermQuery("name.last", "banon")). - Add(NewTermQuery("name.last", "banon"), NewWeightFactorFunction(1.5)). - AddScoreFunc(NewWeightFactorFunction(3)). - AddScoreFunc(NewRandomFunction()). - Boost(3). - MaxBoost(10). - ScoreMode("avg") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"function_score":{"boost":3,"functions":[{"filter":{"term":{"name.last":"banon"}},"weight":1.5},{"weight":3},{"random_score":{}}],"max_boost":10,"query":{"term":{"name.last":"banon"}},"score_mode":"avg"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestFunctionScoreQueryWithNilFilter(t *testing.T) { - q := NewFunctionScoreQuery(). - Query(NewTermQuery("tag", "wow")). - AddScoreFunc(NewRandomFunction()). - Boost(2.0). - MaxBoost(12.0). - BoostMode("multiply"). - ScoreMode("max") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"function_score":{"boost":2,"boost_mode":"multiply","max_boost":12,"query":{"term":{"tag":"wow"}},"random_score":{},"score_mode":"max"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestFieldValueFactor(t *testing.T) { - q := NewFunctionScoreQuery(). - Query(NewTermQuery("name.last", "banon")). - AddScoreFunc(NewFieldValueFactorFunction().Modifier("sqrt").Factor(2).Field("income")). - Boost(2.0). - MaxBoost(12.0). - BoostMode("multiply"). - ScoreMode("max") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"function_score":{"boost":2,"boost_mode":"multiply","field_value_factor":{"factor":2,"field":"income","modifier":"sqrt"},"max_boost":12,"query":{"term":{"name.last":"banon"}},"score_mode":"max"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestFieldValueFactorWithWeight(t *testing.T) { - q := NewFunctionScoreQuery(). - Query(NewTermQuery("name.last", "banon")). - AddScoreFunc(NewFieldValueFactorFunction().Modifier("sqrt").Factor(2).Field("income").Weight(2.5)). - Boost(2.0). - MaxBoost(12.0). - BoostMode("multiply"). - ScoreMode("max") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"function_score":{"boost":2,"boost_mode":"multiply","field_value_factor":{"factor":2,"field":"income","modifier":"sqrt"},"max_boost":12,"query":{"term":{"name.last":"banon"}},"score_mode":"max","weight":2.5}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestFieldValueFactorWithMultipleScoreFuncsAndWeights(t *testing.T) { - q := NewFunctionScoreQuery(). - Query(NewTermQuery("name.last", "banon")). - AddScoreFunc(NewFieldValueFactorFunction().Modifier("sqrt").Factor(2).Field("income").Weight(2.5)). - AddScoreFunc(NewScriptFunction(NewScript("_score * doc['my_numeric_field'].value")).Weight(1.25)). - AddScoreFunc(NewWeightFactorFunction(0.5)). - Boost(2.0). - MaxBoost(12.0). - BoostMode("multiply"). - ScoreMode("max") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"function_score":{"boost":2,"boost_mode":"multiply","functions":[{"field_value_factor":{"factor":2,"field":"income","modifier":"sqrt"},"weight":2.5},{"script_score":{"script":"_score * doc['my_numeric_field'].value"},"weight":1.25},{"weight":0.5}],"max_boost":12,"query":{"term":{"name.last":"banon"}},"score_mode":"max"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestFunctionScoreQueryWithGaussScoreFunc(t *testing.T) { - q := NewFunctionScoreQuery(). - Query(NewTermQuery("name.last", "banon")). - AddScoreFunc(NewGaussDecayFunction().FieldName("pin.location").Origin("11, 12").Scale("2km").Offset("0km").Decay(0.33)) - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"function_score":{"gauss":{"pin.location":{"decay":0.33,"offset":"0km","origin":"11, 12","scale":"2km"}},"query":{"term":{"name.last":"banon"}}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestFunctionScoreQueryWithGaussScoreFuncAndMultiValueMode(t *testing.T) { - q := NewFunctionScoreQuery(). - Query(NewTermQuery("name.last", "banon")). - AddScoreFunc(NewGaussDecayFunction().FieldName("pin.location").Origin("11, 12").Scale("2km").Offset("0km").Decay(0.33).MultiValueMode("avg")) - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"function_score":{"gauss":{"multi_value_mode":"avg","pin.location":{"decay":0.33,"offset":"0km","origin":"11, 12","scale":"2km"}},"query":{"term":{"name.last":"banon"}}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fuzzy.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fuzzy.go deleted file mode 100644 index da79dc7e6..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fuzzy.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// FuzzyQuery uses similarity based on Levenshtein edit distance for -// string fields, and a +/- margin on numeric and date fields. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-fuzzy-query.html -type FuzzyQuery struct { - name string - value interface{} - boost *float64 - fuzziness interface{} - prefixLength *int - maxExpansions *int - transpositions *bool - rewrite string - queryName string -} - -// NewFuzzyQuery creates a new fuzzy query. -func NewFuzzyQuery(name string, value interface{}) *FuzzyQuery { - q := &FuzzyQuery{ - name: name, - value: value, - } - return q -} - -// Boost sets the boost for this query. Documents matching this query will -// (in addition to the normal weightings) have their score multiplied by -// the boost provided. -func (q *FuzzyQuery) Boost(boost float64) *FuzzyQuery { - q.boost = &boost - return q -} - -// Fuzziness can be an integer/long like 0, 1 or 2 as well as strings -// like "auto", "0..1", "1..4" or "0.0..1.0". -func (q *FuzzyQuery) Fuzziness(fuzziness interface{}) *FuzzyQuery { - q.fuzziness = fuzziness - return q -} - -func (q *FuzzyQuery) PrefixLength(prefixLength int) *FuzzyQuery { - q.prefixLength = &prefixLength - return q -} - -func (q *FuzzyQuery) MaxExpansions(maxExpansions int) *FuzzyQuery { - q.maxExpansions = &maxExpansions - return q -} - -func (q *FuzzyQuery) Transpositions(transpositions bool) *FuzzyQuery { - q.transpositions = &transpositions - return q -} - -func (q *FuzzyQuery) Rewrite(rewrite string) *FuzzyQuery { - q.rewrite = rewrite - return q -} - -// QueryName sets the query name for the filter that can be used when -// searching for matched filters per hit. -func (q *FuzzyQuery) QueryName(queryName string) *FuzzyQuery { - q.queryName = queryName - return q -} - -// Source returns JSON for the function score query. -func (q *FuzzyQuery) Source() (interface{}, error) { - // { - // "fuzzy" : { - // "user" : { - // "value" : "ki", - // "boost" : 1.0, - // "fuzziness" : 2, - // "prefix_length" : 0, - // "max_expansions" : 100 - // } - // } - - source := make(map[string]interface{}) - query := make(map[string]interface{}) - source["fuzzy"] = query - - fq := make(map[string]interface{}) - query[q.name] = fq - - fq["value"] = q.value - - if q.boost != nil { - fq["boost"] = *q.boost - } - if q.transpositions != nil { - fq["transpositions"] = *q.transpositions - } - if q.fuzziness != nil { - fq["fuzziness"] = q.fuzziness - } - if q.prefixLength != nil { - fq["prefix_length"] = *q.prefixLength - } - if q.maxExpansions != nil { - fq["max_expansions"] = *q.maxExpansions - } - if q.rewrite != "" { - fq["rewrite"] = q.rewrite - } - if q.queryName != "" { - fq["_name"] = q.queryName - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fuzzy_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fuzzy_test.go deleted file mode 100644 index fbbfe2f94..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_fuzzy_test.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestFuzzyQuery(t *testing.T) { - q := NewFuzzyQuery("user", "ki").Boost(1.5).Fuzziness(2).PrefixLength(0).MaxExpansions(100) - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"fuzzy":{"user":{"boost":1.5,"fuzziness":2,"max_expansions":100,"prefix_length":0,"value":"ki"}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_bounding_box.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_bounding_box.go deleted file mode 100644 index 808ce82df..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_bounding_box.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "errors" - -// GeoBoundingBoxQuery allows to filter hits based on a point location using -// a bounding box. -// -// For more details, see: -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-geo-bounding-box-query.html -type GeoBoundingBoxQuery struct { - name string - top *float64 - left *float64 - bottom *float64 - right *float64 - typ string - queryName string -} - -// NewGeoBoundingBoxQuery creates and initializes a new GeoBoundingBoxQuery. -func NewGeoBoundingBoxQuery(name string) *GeoBoundingBoxQuery { - return &GeoBoundingBoxQuery{ - name: name, - } -} - -func (q *GeoBoundingBoxQuery) TopLeft(top, left float64) *GeoBoundingBoxQuery { - q.top = &top - q.left = &left - return q -} - -func (q *GeoBoundingBoxQuery) TopLeftFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { - return q.TopLeft(point.Lat, point.Lon) -} - -func (q *GeoBoundingBoxQuery) BottomRight(bottom, right float64) *GeoBoundingBoxQuery { - q.bottom = &bottom - q.right = &right - return q -} - -func (q *GeoBoundingBoxQuery) BottomRightFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { - return q.BottomRight(point.Lat, point.Lon) -} - -func (q *GeoBoundingBoxQuery) BottomLeft(bottom, left float64) *GeoBoundingBoxQuery { - q.bottom = &bottom - q.left = &left - return q -} - -func (q *GeoBoundingBoxQuery) BottomLeftFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { - return q.BottomLeft(point.Lat, point.Lon) -} - -func (q *GeoBoundingBoxQuery) TopRight(top, right float64) *GeoBoundingBoxQuery { - q.top = &top - q.right = &right - return q -} - -func (q *GeoBoundingBoxQuery) TopRightFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { - return q.TopRight(point.Lat, point.Lon) -} - -// Type sets the type of executing the geo bounding box. It can be either -// memory or indexed. It defaults to memory. -func (q *GeoBoundingBoxQuery) Type(typ string) *GeoBoundingBoxQuery { - q.typ = typ - return q -} - -func (q *GeoBoundingBoxQuery) QueryName(queryName string) *GeoBoundingBoxQuery { - q.queryName = queryName - return q -} - -// Source returns JSON for the function score query. -func (q *GeoBoundingBoxQuery) Source() (interface{}, error) { - // { - // "geo_bbox" : { - // ... - // } - // } - - if q.top == nil { - return nil, errors.New("geo_bounding_box requires top latitude to be set") - } - if q.bottom == nil { - return nil, errors.New("geo_bounding_box requires bottom latitude to be set") - } - if q.right == nil { - return nil, errors.New("geo_bounding_box requires right longitude to be set") - } - if q.left == nil { - return nil, errors.New("geo_bounding_box requires left longitude to be set") - } - - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["geo_bbox"] = params - - box := make(map[string]interface{}) - box["top_left"] = []float64{*q.left, *q.top} - box["bottom_right"] = []float64{*q.right, *q.bottom} - params[q.name] = box - - if q.typ != "" { - params["type"] = q.typ - } - if q.queryName != "" { - params["_name"] = q.queryName - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_bounding_box_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_bounding_box_test.go deleted file mode 100644 index 6b15885ca..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_bounding_box_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestGeoBoundingBoxQueryIncomplete(t *testing.T) { - q := NewGeoBoundingBoxQuery("pin.location") - q = q.TopLeft(40.73, -74.1) - // no bottom and no right here - q = q.Type("memory") - src, err := q.Source() - if err == nil { - t.Fatal("expected error") - } - if src != nil { - t.Fatal("expected empty source") - } -} - -func TestGeoBoundingBoxQuery(t *testing.T) { - q := NewGeoBoundingBoxQuery("pin.location") - q = q.TopLeft(40.73, -74.1) - q = q.BottomRight(40.01, -71.12) - q = q.Type("memory") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"geo_bbox":{"pin.location":{"bottom_right":[-71.12,40.01],"top_left":[-74.1,40.73]},"type":"memory"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestGeoBoundingBoxQueryWithGeoPoint(t *testing.T) { - q := NewGeoBoundingBoxQuery("pin.location") - q = q.TopLeftFromGeoPoint(GeoPointFromLatLon(40.73, -74.1)) - q = q.BottomRightFromGeoPoint(GeoPointFromLatLon(40.01, -71.12)) - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"geo_bbox":{"pin.location":{"bottom_right":[-71.12,40.01],"top_left":[-74.1,40.73]}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_distance.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_distance.go deleted file mode 100644 index c1eed8521..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_distance.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// GeoDistanceQuery filters documents that include only hits that exists -// within a specific distance from a geo point. -// -// For more details, see: -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-geo-distance-query.html -type GeoDistanceQuery struct { - name string - distance string - lat float64 - lon float64 - geohash string - distanceType string - optimizeBbox string - queryName string -} - -// NewGeoDistanceQuery creates and initializes a new GeoDistanceQuery. -func NewGeoDistanceQuery(name string) *GeoDistanceQuery { - return &GeoDistanceQuery{name: name} -} - -func (q *GeoDistanceQuery) GeoPoint(point *GeoPoint) *GeoDistanceQuery { - q.lat = point.Lat - q.lon = point.Lon - return q -} - -func (q *GeoDistanceQuery) Point(lat, lon float64) *GeoDistanceQuery { - q.lat = lat - q.lon = lon - return q -} - -func (q *GeoDistanceQuery) Lat(lat float64) *GeoDistanceQuery { - q.lat = lat - return q -} - -func (q *GeoDistanceQuery) Lon(lon float64) *GeoDistanceQuery { - q.lon = lon - return q -} - -func (q *GeoDistanceQuery) GeoHash(geohash string) *GeoDistanceQuery { - q.geohash = geohash - return q -} - -func (q *GeoDistanceQuery) Distance(distance string) *GeoDistanceQuery { - q.distance = distance - return q -} - -func (q *GeoDistanceQuery) DistanceType(distanceType string) *GeoDistanceQuery { - q.distanceType = distanceType - return q -} - -func (q *GeoDistanceQuery) OptimizeBbox(optimizeBbox string) *GeoDistanceQuery { - q.optimizeBbox = optimizeBbox - return q -} - -func (q *GeoDistanceQuery) QueryName(queryName string) *GeoDistanceQuery { - q.queryName = queryName - return q -} - -// Source returns JSON for the function score query. -func (q *GeoDistanceQuery) Source() (interface{}, error) { - // { - // "geo_distance" : { - // "distance" : "200km", - // "pin.location" : { - // "lat" : 40, - // "lon" : -70 - // } - // } - // } - - source := make(map[string]interface{}) - - params := make(map[string]interface{}) - - if q.geohash != "" { - params[q.name] = q.geohash - } else { - location := make(map[string]interface{}) - location["lat"] = q.lat - location["lon"] = q.lon - params[q.name] = location - } - - if q.distance != "" { - params["distance"] = q.distance - } - if q.distanceType != "" { - params["distance_type"] = q.distanceType - } - if q.optimizeBbox != "" { - params["optimize_bbox"] = q.optimizeBbox - } - if q.queryName != "" { - params["_name"] = q.queryName - } - - source["geo_distance"] = params - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_distance_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_distance_test.go deleted file mode 100644 index f0b8ca654..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_distance_test.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestGeoDistanceQuery(t *testing.T) { - q := NewGeoDistanceQuery("pin.location") - q = q.Lat(40) - q = q.Lon(-70) - q = q.Distance("200km") - q = q.DistanceType("plane") - q = q.OptimizeBbox("memory") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"geo_distance":{"distance":"200km","distance_type":"plane","optimize_bbox":"memory","pin.location":{"lat":40,"lon":-70}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestGeoDistanceQueryWithGeoPoint(t *testing.T) { - q := NewGeoDistanceQuery("pin.location") - q = q.GeoPoint(GeoPointFromLatLon(40, -70)) - q = q.Distance("200km") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"geo_distance":{"distance":"200km","pin.location":{"lat":40,"lon":-70}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestGeoDistanceQueryWithGeoHash(t *testing.T) { - q := NewGeoDistanceQuery("pin.location") - q = q.GeoHash("drm3btev3e86") - q = q.Distance("12km") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"geo_distance":{"distance":"12km","pin.location":"drm3btev3e86"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_polygon.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_polygon.go deleted file mode 100644 index b08d7078a..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_polygon.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// GeoPolygonQuery allows to include hits that only fall within a polygon of points. -// -// For more details, see: -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-geo-polygon-query.html -type GeoPolygonQuery struct { - name string - points []*GeoPoint - queryName string -} - -// NewGeoPolygonQuery creates and initializes a new GeoPolygonQuery. -func NewGeoPolygonQuery(name string) *GeoPolygonQuery { - return &GeoPolygonQuery{ - name: name, - points: make([]*GeoPoint, 0), - } -} - -// AddPoint adds a point from latitude and longitude. -func (q *GeoPolygonQuery) AddPoint(lat, lon float64) *GeoPolygonQuery { - q.points = append(q.points, GeoPointFromLatLon(lat, lon)) - return q -} - -// AddGeoPoint adds a GeoPoint. -func (q *GeoPolygonQuery) AddGeoPoint(point *GeoPoint) *GeoPolygonQuery { - q.points = append(q.points, point) - return q -} - -func (q *GeoPolygonQuery) QueryName(queryName string) *GeoPolygonQuery { - q.queryName = queryName - return q -} - -// Source returns JSON for the function score query. -func (q *GeoPolygonQuery) Source() (interface{}, error) { - // "geo_polygon" : { - // "person.location" : { - // "points" : [ - // {"lat" : 40, "lon" : -70}, - // {"lat" : 30, "lon" : -80}, - // {"lat" : 20, "lon" : -90} - // ] - // } - // } - source := make(map[string]interface{}) - - params := make(map[string]interface{}) - source["geo_polygon"] = params - - polygon := make(map[string]interface{}) - params[q.name] = polygon - - points := make([]interface{}, 0) - for _, point := range q.points { - points = append(points, point.Source()) - } - polygon["points"] = points - - if q.queryName != "" { - params["_name"] = q.queryName - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_polygon_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_polygon_test.go deleted file mode 100644 index efe89a8d4..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_geo_polygon_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestGeoPolygonQuery(t *testing.T) { - q := NewGeoPolygonQuery("person.location") - q = q.AddPoint(40, -70) - q = q.AddPoint(30, -80) - point, err := GeoPointFromString("20,-90") - if err != nil { - t.Fatalf("GeoPointFromString failed: %v", err) - } - q = q.AddGeoPoint(point) - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"geo_polygon":{"person.location":{"points":[{"lat":40,"lon":-70},{"lat":30,"lon":-80},{"lat":20,"lon":-90}]}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestGeoPolygonQueryFromGeoPoints(t *testing.T) { - q := NewGeoPolygonQuery("person.location") - q = q.AddGeoPoint(&GeoPoint{Lat: 40, Lon: -70}) - q = q.AddGeoPoint(GeoPointFromLatLon(30, -80)) - point, err := GeoPointFromString("20,-90") - if err != nil { - t.Fatalf("GeoPointFromString failed: %v", err) - } - q = q.AddGeoPoint(point) - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"geo_polygon":{"person.location":{"points":[{"lat":40,"lon":-70},{"lat":30,"lon":-80},{"lat":20,"lon":-90}]}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_child.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_child.go deleted file mode 100644 index a8907546b..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_child.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// HasChildQuery accepts a query and the child type to run against, and results -// in parent documents that have child docs matching the query. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-has-child-query.html -type HasChildQuery struct { - query Query - childType string - boost *float64 - scoreType string - minChildren *int - maxChildren *int - shortCircuitCutoff *int - queryName string - innerHit *InnerHit -} - -// NewHasChildQuery creates and initializes a new has_child query. -func NewHasChildQuery(childType string, query Query) *HasChildQuery { - return &HasChildQuery{ - query: query, - childType: childType, - } -} - -// Boost sets the boost for this query. -func (q *HasChildQuery) Boost(boost float64) *HasChildQuery { - q.boost = &boost - return q -} - -// ScoreType defines how the scores from the matching child documents -// are mapped into the parent document. -func (q *HasChildQuery) ScoreType(scoreType string) *HasChildQuery { - q.scoreType = scoreType - return q -} - -// MinChildren defines the minimum number of children that are required -// to match for the parent to be considered a match. -func (q *HasChildQuery) MinChildren(minChildren int) *HasChildQuery { - q.minChildren = &minChildren - return q -} - -// MaxChildren defines the maximum number of children that are required -// to match for the parent to be considered a match. -func (q *HasChildQuery) MaxChildren(maxChildren int) *HasChildQuery { - q.maxChildren = &maxChildren - return q -} - -// ShortCircuitCutoff configures what cut off point only to evaluate -// parent documents that contain the matching parent id terms instead -// of evaluating all parent docs. -func (q *HasChildQuery) ShortCircuitCutoff(shortCircuitCutoff int) *HasChildQuery { - q.shortCircuitCutoff = &shortCircuitCutoff - return q -} - -// QueryName specifies the query name for the filter that can be used when -// searching for matched filters per hit. -func (q *HasChildQuery) QueryName(queryName string) *HasChildQuery { - q.queryName = queryName - return q -} - -// InnerHit sets the inner hit definition in the scope of this query and -// reusing the defined type and query. -func (q *HasChildQuery) InnerHit(innerHit *InnerHit) *HasChildQuery { - q.innerHit = innerHit - return q -} - -// Source returns JSON for the function score query. -func (q *HasChildQuery) Source() (interface{}, error) { - // { - // "has_child" : { - // "type" : "blog_tag", - // "query" : { - // "term" : { - // "tag" : "something" - // } - // } - // } - // } - source := make(map[string]interface{}) - query := make(map[string]interface{}) - source["has_child"] = query - - src, err := q.query.Source() - if err != nil { - return nil, err - } - query["query"] = src - query["type"] = q.childType - if q.boost != nil { - query["boost"] = *q.boost - } - if q.scoreType != "" { - query["score_type"] = q.scoreType - } - if q.minChildren != nil { - query["min_children"] = *q.minChildren - } - if q.maxChildren != nil { - query["max_children"] = *q.maxChildren - } - if q.shortCircuitCutoff != nil { - query["short_circuit_cutoff"] = *q.shortCircuitCutoff - } - if q.queryName != "" { - query["_name"] = q.queryName - } - if q.innerHit != nil { - src, err := q.innerHit.Source() - if err != nil { - return nil, err - } - query["inner_hits"] = src - } - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_child_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_child_test.go deleted file mode 100644 index 887b2e263..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_child_test.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestHasChildQuery(t *testing.T) { - q := NewHasChildQuery("blog_tag", NewTermQuery("tag", "something")) - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"has_child":{"query":{"term":{"tag":"something"}},"type":"blog_tag"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestHasChildQueryWithInnerHit(t *testing.T) { - q := NewHasChildQuery("blog_tag", NewTermQuery("tag", "something")) - q = q.InnerHit(NewInnerHit().Name("comments")) - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"has_child":{"inner_hits":{"name":"comments"},"query":{"term":{"tag":"something"}},"type":"blog_tag"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_parent.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_parent.go deleted file mode 100644 index 4db1dde7e..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_parent.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// HasParentQuery accepts a query and a parent type. The query is executed -// in the parent document space which is specified by the parent type. -// This query returns child documents which associated parents have matched. -// For the rest has_parent query has the same options and works in the -// same manner as has_child query. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-has-parent-query.html -type HasParentQuery struct { - query Query - parentType string - boost *float64 - scoreType string - queryName string - innerHit *InnerHit -} - -// NewHasParentQuery creates and initializes a new has_parent query. -func NewHasParentQuery(parentType string, query Query) *HasParentQuery { - return &HasParentQuery{ - query: query, - parentType: parentType, - } -} - -// Boost sets the boost for this query. -func (q *HasParentQuery) Boost(boost float64) *HasParentQuery { - q.boost = &boost - return q -} - -// ScoreType defines how the parent score is mapped into the child documents. -func (q *HasParentQuery) ScoreType(scoreType string) *HasParentQuery { - q.scoreType = scoreType - return q -} - -// QueryName specifies the query name for the filter that can be used when -// searching for matched filters per hit. -func (q *HasParentQuery) QueryName(queryName string) *HasParentQuery { - q.queryName = queryName - return q -} - -// InnerHit sets the inner hit definition in the scope of this query and -// reusing the defined type and query. -func (q *HasParentQuery) InnerHit(innerHit *InnerHit) *HasParentQuery { - q.innerHit = innerHit - return q -} - -// Source returns JSON for the function score query. -func (q *HasParentQuery) Source() (interface{}, error) { - // { - // "has_parent" : { - // "parent_type" : "blog", - // "query" : { - // "term" : { - // "tag" : "something" - // } - // } - // } - // } - source := make(map[string]interface{}) - query := make(map[string]interface{}) - source["has_parent"] = query - - src, err := q.query.Source() - if err != nil { - return nil, err - } - query["query"] = src - query["parent_type"] = q.parentType - if q.boost != nil { - query["boost"] = *q.boost - } - if q.scoreType != "" { - query["score_type"] = q.scoreType - } - if q.queryName != "" { - query["_name"] = q.queryName - } - if q.innerHit != nil { - src, err := q.innerHit.Source() - if err != nil { - return nil, err - } - query["inner_hits"] = src - } - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_parent_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_parent_test.go deleted file mode 100644 index b5daefda8..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_has_parent_test.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestHasParentQueryTest(t *testing.T) { - q := NewHasParentQuery("blog", NewTermQuery("tag", "something")) - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"has_parent":{"parent_type":"blog","query":{"term":{"tag":"something"}}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_ids.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_ids.go deleted file mode 100644 index 96f463dc6..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_ids.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// IdsQuery filters documents that only have the provided ids. -// Note, this query uses the _uid field. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-ids-query.html -type IdsQuery struct { - types []string - values []string - boost *float64 - queryName string -} - -// NewIdsQuery creates and initializes a new ids query. -func NewIdsQuery(types ...string) *IdsQuery { - return &IdsQuery{ - types: types, - values: make([]string, 0), - } -} - -// Ids adds ids to the filter. -func (q *IdsQuery) Ids(ids ...string) *IdsQuery { - q.values = append(q.values, ids...) - return q -} - -// Boost sets the boost for this query. -func (q *IdsQuery) Boost(boost float64) *IdsQuery { - q.boost = &boost - return q -} - -// QueryName sets the query name for the filter. -func (q *IdsQuery) QueryName(queryName string) *IdsQuery { - q.queryName = queryName - return q -} - -// Source returns JSON for the function score query. -func (q *IdsQuery) Source() (interface{}, error) { - // { - // "ids" : { - // "type" : "my_type", - // "values" : ["1", "4", "100"] - // } - // } - - source := make(map[string]interface{}) - query := make(map[string]interface{}) - source["ids"] = query - - // type(s) - if len(q.types) == 1 { - query["type"] = q.types[0] - } else if len(q.types) > 1 { - query["types"] = q.types - } - - // values - query["values"] = q.values - - if q.boost != nil { - query["boost"] = *q.boost - } - if q.queryName != "" { - query["_name"] = q.queryName - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_ids_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_ids_test.go deleted file mode 100644 index d1ff9a6b1..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_ids_test.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestIdsQuery(t *testing.T) { - q := NewIdsQuery("my_type").Ids("1", "4", "100").Boost(10.5).QueryName("my_query") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"ids":{"_name":"my_query","boost":10.5,"type":"my_type","values":["1","4","100"]}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_indices.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_indices.go deleted file mode 100644 index 56efab3dd..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_indices.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// IndicesQuery can be used when executed across multiple indices, allowing -// to have a query that executes only when executed on an index that matches -// a specific list of indices, and another query that executes when it is -// executed on an index that does not match the listed indices. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-indices-query.html -type IndicesQuery struct { - query Query - indices []string - noMatchQueryType string - noMatchQuery Query - queryName string -} - -// NewIndicesQuery creates and initializes a new indices query. -func NewIndicesQuery(query Query, indices ...string) *IndicesQuery { - return &IndicesQuery{ - query: query, - indices: indices, - } -} - -// NoMatchQuery sets the query to use when it executes on an index that -// does not match the indices provided. -func (q *IndicesQuery) NoMatchQuery(query Query) *IndicesQuery { - q.noMatchQuery = query - return q -} - -// NoMatchQueryType sets the no match query which can be either all or none. -func (q *IndicesQuery) NoMatchQueryType(typ string) *IndicesQuery { - q.noMatchQueryType = typ - return q -} - -// QueryName sets the query name for the filter. -func (q *IndicesQuery) QueryName(queryName string) *IndicesQuery { - q.queryName = queryName - return q -} - -// Source returns JSON for the function score query. -func (q *IndicesQuery) Source() (interface{}, error) { - // { - // "indices" : { - // "indices" : ["index1", "index2"], - // "query" : { - // "term" : { "tag" : "wow" } - // }, - // "no_match_query" : { - // "term" : { "tag" : "kow" } - // } - // } - // } - - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["indices"] = params - - params["indices"] = q.indices - - src, err := q.query.Source() - if err != nil { - return nil, err - } - params["query"] = src - - if q.noMatchQuery != nil { - src, err := q.noMatchQuery.Source() - if err != nil { - return nil, err - } - params["no_match_query"] = src - } else if q.noMatchQueryType != "" { - params["no_match_query"] = q.noMatchQueryType - } - if q.queryName != "" { - params["_name"] = q.queryName - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_indices_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_indices_test.go deleted file mode 100644 index f011b9ac7..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_indices_test.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestIndicesQuery(t *testing.T) { - q := NewIndicesQuery(NewTermQuery("tag", "wow"), "index1", "index2") - q = q.NoMatchQuery(NewTermQuery("tag", "kow")) - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"indices":{"indices":["index1","index2"],"no_match_query":{"term":{"tag":"kow"}},"query":{"term":{"tag":"wow"}}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestIndicesQueryWithNoMatchQueryType(t *testing.T) { - q := NewIndicesQuery(NewTermQuery("tag", "wow"), "index1", "index2") - q = q.NoMatchQueryType("all") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"indices":{"indices":["index1","index2"],"no_match_query":"all","query":{"term":{"tag":"wow"}}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match.go deleted file mode 100644 index b740b0f0d..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// MatchQuery is a family of queries that accepts text/numerics/dates, -// analyzes them, and constructs a query. -// -// To create a new MatchQuery, use NewMatchQuery. To create specific types -// of queries, e.g. a match_phrase query, use NewMatchPhrQuery(...).Type("phrase"), -// or use one of the shortcuts e.g. NewMatchPhraseQuery(...). -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-match-query.html -type MatchQuery struct { - name string - text interface{} - typ string // boolean, phrase, phrase_prefix - operator string // or / and - analyzer string - boost *float64 - slop *int - fuzziness string - prefixLength *int - maxExpansions *int - minimumShouldMatch string - rewrite string - fuzzyRewrite string - lenient *bool - fuzzyTranspositions *bool - zeroTermsQuery string - cutoffFrequency *float64 - queryName string -} - -// NewMatchQuery creates and initializes a new MatchQuery. -func NewMatchQuery(name string, text interface{}) *MatchQuery { - return &MatchQuery{name: name, text: text} -} - -// NewMatchPhraseQuery creates and initializes a new MatchQuery of type phrase. -func NewMatchPhraseQuery(name string, text interface{}) *MatchQuery { - return &MatchQuery{name: name, text: text, typ: "phrase"} -} - -// NewMatchPhrasePrefixQuery creates and initializes a new MatchQuery of type phrase_prefix. -func NewMatchPhrasePrefixQuery(name string, text interface{}) *MatchQuery { - return &MatchQuery{name: name, text: text, typ: "phrase_prefix"} -} - -// Type can be "boolean", "phrase", or "phrase_prefix". Defaults to "boolean". -func (q *MatchQuery) Type(typ string) *MatchQuery { - q.typ = typ - return q -} - -// Operator sets the operator to use when using a boolean query. -// Can be "AND" or "OR" (default). -func (q *MatchQuery) Operator(operator string) *MatchQuery { - q.operator = operator - return q -} - -// Analyzer explicitly sets the analyzer to use. It defaults to use explicit -// mapping config for the field, or, if not set, the default search analyzer. -func (q *MatchQuery) Analyzer(analyzer string) *MatchQuery { - q.analyzer = analyzer - return q -} - -// Boost sets the boost to apply to this query. -func (q *MatchQuery) Boost(boost float64) *MatchQuery { - q.boost = &boost - return q -} - -// Slop sets the phrase slop if evaluated to a phrase query type. -func (q *MatchQuery) Slop(slop int) *MatchQuery { - q.slop = &slop - return q -} - -// Fuzziness sets the fuzziness when evaluated to a fuzzy query type. -// Defaults to "AUTO". -func (q *MatchQuery) Fuzziness(fuzziness string) *MatchQuery { - q.fuzziness = fuzziness - return q -} - -func (q *MatchQuery) PrefixLength(prefixLength int) *MatchQuery { - q.prefixLength = &prefixLength - return q -} - -// MaxExpansions is used with fuzzy or prefix type queries. It specifies -// the number of term expansions to use. It defaults to unbounded so that -// its recommended to set it to a reasonable value for faster execution. -func (q *MatchQuery) MaxExpansions(maxExpansions int) *MatchQuery { - q.maxExpansions = &maxExpansions - return q -} - -// CutoffFrequency can be a value in [0..1] (or an absolute number >=1). -// It represents the maximum treshold of a terms document frequency to be -// considered a low frequency term. -func (q *MatchQuery) CutoffFrequency(cutoff float64) *MatchQuery { - q.cutoffFrequency = &cutoff - return q -} - -func (q *MatchQuery) MinimumShouldMatch(minimumShouldMatch string) *MatchQuery { - q.minimumShouldMatch = minimumShouldMatch - return q -} - -func (q *MatchQuery) Rewrite(rewrite string) *MatchQuery { - q.rewrite = rewrite - return q -} - -func (q *MatchQuery) FuzzyRewrite(fuzzyRewrite string) *MatchQuery { - q.fuzzyRewrite = fuzzyRewrite - return q -} - -func (q *MatchQuery) FuzzyTranspositions(fuzzyTranspositions bool) *MatchQuery { - q.fuzzyTranspositions = &fuzzyTranspositions - return q -} - -// Lenient specifies whether format based failures will be ignored. -func (q *MatchQuery) Lenient(lenient bool) *MatchQuery { - q.lenient = &lenient - return q -} - -// ZeroTermsQuery can be "all" or "none". -func (q *MatchQuery) ZeroTermsQuery(zeroTermsQuery string) *MatchQuery { - q.zeroTermsQuery = zeroTermsQuery - return q -} - -// QueryName sets the query name for the filter that can be used when -// searching for matched filters per hit. -func (q *MatchQuery) QueryName(queryName string) *MatchQuery { - q.queryName = queryName - return q -} - -// Source returns JSON for the function score query. -func (q *MatchQuery) Source() (interface{}, error) { - // {"match":{"name":{"query":"value","type":"boolean/phrase"}}} - source := make(map[string]interface{}) - - match := make(map[string]interface{}) - source["match"] = match - - query := make(map[string]interface{}) - match[q.name] = query - - query["query"] = q.text - - if q.typ != "" { - query["type"] = q.typ - } - if q.operator != "" { - query["operator"] = q.operator - } - if q.analyzer != "" { - query["analyzer"] = q.analyzer - } - if q.boost != nil { - query["boost"] = *q.boost - } - if q.slop != nil { - query["slop"] = *q.slop - } - if q.fuzziness != "" { - query["fuzziness"] = q.fuzziness - } - if q.prefixLength != nil { - query["prefix_length"] = *q.prefixLength - } - if q.maxExpansions != nil { - query["max_expansions"] = *q.maxExpansions - } - if q.minimumShouldMatch != "" { - query["minimum_should_match"] = q.minimumShouldMatch - } - if q.rewrite != "" { - query["rewrite"] = q.rewrite - } - if q.fuzzyRewrite != "" { - query["fuzzy_rewrite"] = q.fuzzyRewrite - } - if q.lenient != nil { - query["lenient"] = *q.lenient - } - if q.fuzzyTranspositions != nil { - query["fuzzy_transpositions"] = *q.fuzzyTranspositions - } - if q.zeroTermsQuery != "" { - query["zero_terms_query"] = q.zeroTermsQuery - } - if q.cutoffFrequency != nil { - query["cutoff_frequency"] = q.cutoffFrequency - } - if q.queryName != "" { - query["_name"] = q.queryName - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match_all.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match_all.go deleted file mode 100644 index 5b5ca590e..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match_all.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// MatchAllQuery is the most simple query, which matches all documents, -// giving them all a _score of 1.0. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-match-all-query.html -type MatchAllQuery struct { - boost *float64 -} - -// NewMatchAllQuery creates and initializes a new match all query. -func NewMatchAllQuery() *MatchAllQuery { - return &MatchAllQuery{} -} - -// Boost sets the boost for this query. Documents matching this query will -// (in addition to the normal weightings) have their score multiplied by the -// boost provided. -func (q *MatchAllQuery) Boost(boost float64) *MatchAllQuery { - q.boost = &boost - return q -} - -// Source returns JSON for the function score query. -func (q MatchAllQuery) Source() (interface{}, error) { - // { - // "match_all" : { ... } - // } - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["match_all"] = params - if q.boost != nil { - params["boost"] = *q.boost - } - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match_all_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match_all_test.go deleted file mode 100644 index 0dcebb1f6..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match_all_test.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestMatchAllQuery(t *testing.T) { - q := NewMatchAllQuery() - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"match_all":{}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestMatchAllQueryWithBoost(t *testing.T) { - q := NewMatchAllQuery().Boost(3.14) - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"match_all":{"boost":3.14}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match_test.go deleted file mode 100644 index ade59351f..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_match_test.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestMatchQuery(t *testing.T) { - q := NewMatchQuery("message", "this is a test") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"match":{"message":{"query":"this is a test"}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestMatchPhraseQuery(t *testing.T) { - q := NewMatchPhraseQuery("message", "this is a test") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"match":{"message":{"query":"this is a test","type":"phrase"}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestMatchPhrasePrefixQuery(t *testing.T) { - q := NewMatchPhrasePrefixQuery("message", "this is a test") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"match":{"message":{"query":"this is a test","type":"phrase_prefix"}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestMatchQueryWithOptions(t *testing.T) { - q := NewMatchQuery("message", "this is a test").Analyzer("whitespace").Operator("or").Boost(2.5) - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"match":{"message":{"analyzer":"whitespace","boost":2.5,"operator":"or","query":"this is a test"}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_missing.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_missing.go deleted file mode 100644 index 0fff3f55c..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_missing.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// MissingQuery returns documents that have only null values or no value -// in the original field. -// -// For details, see: -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-missing-query.html -type MissingQuery struct { - name string - queryName string - nullValue *bool - existence *bool -} - -// NewMissingQuery creates and initializes a new MissingQuery. -func NewMissingQuery(name string) *MissingQuery { - return &MissingQuery{name: name} -} - -// QueryName sets the query name for the query that can be used when -// searching for matched filters hit. -func (q *MissingQuery) QueryName(queryName string) *MissingQuery { - q.queryName = queryName - return q -} - -// NullValue indicates whether the missing filter automatically includes -// fields with null value configured in the mappings. Defaults to false. -func (q *MissingQuery) NullValue(nullValue bool) *MissingQuery { - q.nullValue = &nullValue - return q -} - -// Existence indicates whether the missing filter includes documents where -// the field doesn't exist in the docs. -func (q *MissingQuery) Existence(existence bool) *MissingQuery { - q.existence = &existence - return q -} - -// Source returns JSON for the query. -func (q *MissingQuery) Source() (interface{}, error) { - // { - // "missing" : { - // "field" : "..." - // } - // } - - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["missing"] = params - params["field"] = q.name - if q.nullValue != nil { - params["null_value"] = *q.nullValue - } - if q.existence != nil { - params["existence"] = *q.existence - } - if q.queryName != "" { - params["_name"] = q.queryName - } - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_missing_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_missing_test.go deleted file mode 100644 index 096b0b3cd..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_missing_test.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestMissingQuery(t *testing.T) { - q := NewMissingQuery("user") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"missing":{"field":"user"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestMissingQueryWithParams(t *testing.T) { - q := NewMissingQuery("user").NullValue(true).Existence(true).QueryName("_my_query") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"missing":{"_name":"_my_query","existence":true,"field":"user","null_value":true}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_more_like_this.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_more_like_this.go deleted file mode 100644 index afce3f05c..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_more_like_this.go +++ /dev/null @@ -1,412 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "errors" - -// MoreLikeThis query (MLT Query) finds documents that are "like" a given -// set of documents. In order to do so, MLT selects a set of representative -// terms of these input documents, forms a query using these terms, executes -// the query and returns the results. The user controls the input documents, -// how the terms should be selected and how the query is formed. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-mlt-query.html -type MoreLikeThisQuery struct { - fields []string - docs []*MoreLikeThisQueryItem - unlikeDocs []*MoreLikeThisQueryItem - include *bool - minimumShouldMatch string - minTermFreq *int - maxQueryTerms *int - stopWords []string - minDocFreq *int - maxDocFreq *int - minWordLen *int - maxWordLen *int - boostTerms *float64 - boost *float64 - analyzer string - failOnUnsupportedField *bool - queryName string -} - -// NewMoreLikeThisQuery creates and initializes a new MoreLikeThisQuery. -func NewMoreLikeThisQuery() *MoreLikeThisQuery { - return &MoreLikeThisQuery{ - fields: make([]string, 0), - stopWords: make([]string, 0), - docs: make([]*MoreLikeThisQueryItem, 0), - unlikeDocs: make([]*MoreLikeThisQueryItem, 0), - } -} - -// Field adds one or more field names to the query. -func (q *MoreLikeThisQuery) Field(fields ...string) *MoreLikeThisQuery { - q.fields = append(q.fields, fields...) - return q -} - -// StopWord sets the stopwords. Any word in this set is considered -// "uninteresting" and ignored. Even if your Analyzer allows stopwords, -// you might want to tell the MoreLikeThis code to ignore them, as for -// the purposes of document similarity it seems reasonable to assume that -// "a stop word is never interesting". -func (q *MoreLikeThisQuery) StopWord(stopWords ...string) *MoreLikeThisQuery { - q.stopWords = append(q.stopWords, stopWords...) - return q -} - -// LikeText sets the text to use in order to find documents that are "like" this. -func (q *MoreLikeThisQuery) LikeText(likeTexts ...string) *MoreLikeThisQuery { - for _, s := range likeTexts { - item := NewMoreLikeThisQueryItem().LikeText(s) - q.docs = append(q.docs, item) - } - return q -} - -// LikeItems sets the documents to use in order to find documents that are "like" this. -func (q *MoreLikeThisQuery) LikeItems(docs ...*MoreLikeThisQueryItem) *MoreLikeThisQuery { - q.docs = append(q.docs, docs...) - return q -} - -// IgnoreLikeText sets the text from which the terms should not be selected from. -func (q *MoreLikeThisQuery) IgnoreLikeText(ignoreLikeText ...string) *MoreLikeThisQuery { - for _, s := range ignoreLikeText { - item := NewMoreLikeThisQueryItem().LikeText(s) - q.unlikeDocs = append(q.unlikeDocs, item) - } - return q -} - -// IgnoreLikeItems sets the documents from which the terms should not be selected from. -func (q *MoreLikeThisQuery) IgnoreLikeItems(ignoreDocs ...*MoreLikeThisQueryItem) *MoreLikeThisQuery { - q.unlikeDocs = append(q.unlikeDocs, ignoreDocs...) - return q -} - -// Ids sets the document ids to use in order to find documents that are "like" this. -func (q *MoreLikeThisQuery) Ids(ids ...string) *MoreLikeThisQuery { - for _, id := range ids { - item := NewMoreLikeThisQueryItem().Id(id) - q.docs = append(q.docs, item) - } - return q -} - -// Include specifies whether the input documents should also be included -// in the results returned. Defaults to false. -func (q *MoreLikeThisQuery) Include(include bool) *MoreLikeThisQuery { - q.include = &include - return q -} - -// MinimumShouldMatch sets the number of terms that must match the generated -// query expressed in the common syntax for minimum should match. -// The default value is "30%". -// -// This used to be "PercentTermsToMatch" in Elasticsearch versions before 2.0. -func (q *MoreLikeThisQuery) MinimumShouldMatch(minimumShouldMatch string) *MoreLikeThisQuery { - q.minimumShouldMatch = minimumShouldMatch - return q -} - -// MinTermFreq is the frequency below which terms will be ignored in the -// source doc. The default frequency is 2. -func (q *MoreLikeThisQuery) MinTermFreq(minTermFreq int) *MoreLikeThisQuery { - q.minTermFreq = &minTermFreq - return q -} - -// MaxQueryTerms sets the maximum number of query terms that will be included -// in any generated query. It defaults to 25. -func (q *MoreLikeThisQuery) MaxQueryTerms(maxQueryTerms int) *MoreLikeThisQuery { - q.maxQueryTerms = &maxQueryTerms - return q -} - -// MinDocFreq sets the frequency at which words will be ignored which do -// not occur in at least this many docs. The default is 5. -func (q *MoreLikeThisQuery) MinDocFreq(minDocFreq int) *MoreLikeThisQuery { - q.minDocFreq = &minDocFreq - return q -} - -// MaxDocFreq sets the maximum frequency for which words may still appear. -// Words that appear in more than this many docs will be ignored. -// It defaults to unbounded. -func (q *MoreLikeThisQuery) MaxDocFreq(maxDocFreq int) *MoreLikeThisQuery { - q.maxDocFreq = &maxDocFreq - return q -} - -// MinWordLength sets the minimum word length below which words will be -// ignored. It defaults to 0. -func (q *MoreLikeThisQuery) MinWordLen(minWordLen int) *MoreLikeThisQuery { - q.minWordLen = &minWordLen - return q -} - -// MaxWordLen sets the maximum word length above which words will be ignored. -// Defaults to unbounded (0). -func (q *MoreLikeThisQuery) MaxWordLen(maxWordLen int) *MoreLikeThisQuery { - q.maxWordLen = &maxWordLen - return q -} - -// BoostTerms sets the boost factor to use when boosting terms. -// It defaults to 1. -func (q *MoreLikeThisQuery) BoostTerms(boostTerms float64) *MoreLikeThisQuery { - q.boostTerms = &boostTerms - return q -} - -// Analyzer specifies the analyzer that will be use to analyze the text. -// Defaults to the analyzer associated with the field. -func (q *MoreLikeThisQuery) Analyzer(analyzer string) *MoreLikeThisQuery { - q.analyzer = analyzer - return q -} - -// Boost sets the boost for this query. -func (q *MoreLikeThisQuery) Boost(boost float64) *MoreLikeThisQuery { - q.boost = &boost - return q -} - -// FailOnUnsupportedField indicates whether to fail or return no result -// when this query is run against a field which is not supported such as -// a binary/numeric field. -func (q *MoreLikeThisQuery) FailOnUnsupportedField(fail bool) *MoreLikeThisQuery { - q.failOnUnsupportedField = &fail - return q -} - -// QueryName sets the query name for the filter that can be used when -// searching for matched_filters per hit. -func (q *MoreLikeThisQuery) QueryName(queryName string) *MoreLikeThisQuery { - q.queryName = queryName - return q -} - -// Source creates the source for the MLT query. -// It may return an error if the caller forgot to specify any documents to -// be "liked" in the MoreLikeThisQuery. -func (q *MoreLikeThisQuery) Source() (interface{}, error) { - // { - // "match_all" : { ... } - // } - if len(q.docs) == 0 { - return nil, errors.New(`more_like_this requires some documents to be "liked"`) - } - - source := make(map[string]interface{}) - - params := make(map[string]interface{}) - source["mlt"] = params - - if len(q.fields) > 0 { - params["fields"] = q.fields - } - - likes := make([]interface{}, 0) - for _, doc := range q.docs { - src, err := doc.Source() - if err != nil { - return nil, err - } - likes = append(likes, src) - } - params["like"] = likes - - if len(q.unlikeDocs) > 0 { - dontLikes := make([]interface{}, 0) - for _, doc := range q.unlikeDocs { - src, err := doc.Source() - if err != nil { - return nil, err - } - dontLikes = append(dontLikes, src) - } - params["unlike"] = dontLikes - } - - if q.minimumShouldMatch != "" { - params["minimum_should_match"] = q.minimumShouldMatch - } - if q.minTermFreq != nil { - params["min_term_freq"] = *q.minTermFreq - } - if q.maxQueryTerms != nil { - params["max_query_terms"] = *q.maxQueryTerms - } - if len(q.stopWords) > 0 { - params["stop_words"] = q.stopWords - } - if q.minDocFreq != nil { - params["min_doc_freq"] = *q.minDocFreq - } - if q.maxDocFreq != nil { - params["max_doc_freq"] = *q.maxDocFreq - } - if q.minWordLen != nil { - params["min_word_len"] = *q.minWordLen - } - if q.maxWordLen != nil { - params["max_word_len"] = *q.maxWordLen - } - if q.boostTerms != nil { - params["boost_terms"] = *q.boostTerms - } - if q.boost != nil { - params["boost"] = *q.boost - } - if q.analyzer != "" { - params["analyzer"] = q.analyzer - } - if q.failOnUnsupportedField != nil { - params["fail_on_unsupported_field"] = *q.failOnUnsupportedField - } - if q.queryName != "" { - params["_name"] = q.queryName - } - if q.include != nil { - params["include"] = *q.include - } - - return source, nil -} - -// -- MoreLikeThisQueryItem -- - -// MoreLikeThisQueryItem represents a single item of a MoreLikeThisQuery -// to be "liked" or "unliked". -type MoreLikeThisQueryItem struct { - likeText string - - index string - typ string - id string - doc interface{} - fields []string - routing string - fsc *FetchSourceContext - version int64 - versionType string -} - -// NewMoreLikeThisQueryItem creates and initializes a MoreLikeThisQueryItem. -func NewMoreLikeThisQueryItem() *MoreLikeThisQueryItem { - return &MoreLikeThisQueryItem{ - version: -1, - } -} - -// LikeText represents a text to be "liked". -func (item *MoreLikeThisQueryItem) LikeText(likeText string) *MoreLikeThisQueryItem { - item.likeText = likeText - return item -} - -// Index represents the index of the item. -func (item *MoreLikeThisQueryItem) Index(index string) *MoreLikeThisQueryItem { - item.index = index - return item -} - -// Type represents the document type of the item. -func (item *MoreLikeThisQueryItem) Type(typ string) *MoreLikeThisQueryItem { - item.typ = typ - return item -} - -// Id represents the document id of the item. -func (item *MoreLikeThisQueryItem) Id(id string) *MoreLikeThisQueryItem { - item.id = id - return item -} - -// Doc represents a raw document template for the item. -func (item *MoreLikeThisQueryItem) Doc(doc interface{}) *MoreLikeThisQueryItem { - item.doc = doc - return item -} - -// Fields represents the list of fields of the item. -func (item *MoreLikeThisQueryItem) Fields(fields ...string) *MoreLikeThisQueryItem { - item.fields = append(item.fields, fields...) - return item -} - -// Routing sets the routing associated with the item. -func (item *MoreLikeThisQueryItem) Routing(routing string) *MoreLikeThisQueryItem { - item.routing = routing - return item -} - -// FetchSourceContext represents the fetch source of the item which controls -// if and how _source should be returned. -func (item *MoreLikeThisQueryItem) FetchSourceContext(fsc *FetchSourceContext) *MoreLikeThisQueryItem { - item.fsc = fsc - return item -} - -// Version specifies the version of the item. -func (item *MoreLikeThisQueryItem) Version(version int64) *MoreLikeThisQueryItem { - item.version = version - return item -} - -// VersionType represents the version type of the item. -func (item *MoreLikeThisQueryItem) VersionType(versionType string) *MoreLikeThisQueryItem { - item.versionType = versionType - return item -} - -// Source returns the JSON-serializable fragment of the entity. -func (item *MoreLikeThisQueryItem) Source() (interface{}, error) { - if item.likeText != "" { - return item.likeText, nil - } - - source := make(map[string]interface{}) - - if item.index != "" { - source["_index"] = item.index - } - if item.typ != "" { - source["_type"] = item.typ - } - if item.id != "" { - source["_id"] = item.id - } - if item.doc != nil { - source["doc"] = item.doc - } - if len(item.fields) > 0 { - source["fields"] = item.fields - } - if item.routing != "" { - source["_routing"] = item.routing - } - if item.fsc != nil { - src, err := item.fsc.Source() - if err != nil { - return nil, err - } - source["_source"] = src - } - if item.version >= 0 { - source["_version"] = item.version - } - if item.versionType != "" { - source["_version_type"] = item.versionType - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_more_like_this_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_more_like_this_test.go deleted file mode 100644 index 64bfe4305..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_more_like_this_test.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestMoreLikeThisQuerySourceWithLikeText(t *testing.T) { - q := NewMoreLikeThisQuery().LikeText("Golang topic").Field("message") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatal(err) - } - got := string(data) - expected := `{"mlt":{"fields":["message"],"like":["Golang topic"]}}` - if got != expected { - t.Fatalf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestMoreLikeThisQuerySourceWithLikeAndUnlikeItems(t *testing.T) { - q := NewMoreLikeThisQuery() - q = q.LikeItems( - NewMoreLikeThisQueryItem().Id("1"), - NewMoreLikeThisQueryItem().Index(testIndexName2).Type("comment").Id("2").Routing("routing_id"), - ) - q = q.IgnoreLikeItems(NewMoreLikeThisQueryItem().Id("3")) - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatal(err) - } - got := string(data) - expected := `{"mlt":{"like":[{"_id":"1"},{"_id":"2","_index":"elastic-test2","_routing":"routing_id","_type":"comment"}],"unlike":[{"_id":"3"}]}}` - if got != expected { - t.Fatalf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestMoreLikeThisQuery(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "olivere", Message: "Another Golang topic."} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Common query - mltq := NewMoreLikeThisQuery().LikeText("Golang topic").Field("message") - res, err := client.Search(). - Index(testIndexName). - Query(mltq). - Do() - if err != nil { - t.Fatal(err) - } - if res.Hits == nil { - t.Errorf("expected SearchResult.Hits != nil; got nil") - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_multi_match.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_multi_match.go deleted file mode 100644 index b9f74a0d3..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_multi_match.go +++ /dev/null @@ -1,275 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "fmt" - "strings" -) - -// MultiMatchQuery builds on the MatchQuery to allow multi-field queries. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-multi-match-query.html -type MultiMatchQuery struct { - text interface{} - fields []string - fieldBoosts map[string]*float64 - typ string // best_fields, boolean, most_fields, cross_fields, phrase, phrase_prefix - operator string // AND or OR - analyzer string - boost *float64 - slop *int - fuzziness string - prefixLength *int - maxExpansions *int - minimumShouldMatch string - rewrite string - fuzzyRewrite string - tieBreaker *float64 - lenient *bool - cutoffFrequency *float64 - zeroTermsQuery string - queryName string -} - -// MultiMatchQuery creates and initializes a new MultiMatchQuery. -func NewMultiMatchQuery(text interface{}, fields ...string) *MultiMatchQuery { - q := &MultiMatchQuery{ - text: text, - fields: make([]string, 0), - fieldBoosts: make(map[string]*float64), - } - q.fields = append(q.fields, fields...) - return q -} - -// Field adds a field to run the multi match against. -func (q *MultiMatchQuery) Field(field string) *MultiMatchQuery { - q.fields = append(q.fields, field) - return q -} - -// FieldWithBoost adds a field to run the multi match against with a specific boost. -func (q *MultiMatchQuery) FieldWithBoost(field string, boost float64) *MultiMatchQuery { - q.fields = append(q.fields, field) - q.fieldBoosts[field] = &boost - return q -} - -// Type can be "best_fields", "boolean", "most_fields", "cross_fields", -// "phrase", or "phrase_prefix". -func (q *MultiMatchQuery) Type(typ string) *MultiMatchQuery { - var zero = float64(0.0) - var one = float64(1.0) - - switch strings.ToLower(typ) { - default: // best_fields / boolean - q.typ = "best_fields" - q.tieBreaker = &zero - case "most_fields": - q.typ = "most_fields" - q.tieBreaker = &one - case "cross_fields": - q.typ = "cross_fields" - q.tieBreaker = &zero - case "phrase": - q.typ = "phrase" - q.tieBreaker = &zero - case "phrase_prefix": - q.typ = "phrase_prefix" - q.tieBreaker = &zero - } - return q -} - -// Operator sets the operator to use when using boolean query. -// It can be either AND or OR (default). -func (q *MultiMatchQuery) Operator(operator string) *MultiMatchQuery { - q.operator = operator - return q -} - -// Analyzer sets the analyzer to use explicitly. It defaults to use explicit -// mapping config for the field, or, if not set, the default search analyzer. -func (q *MultiMatchQuery) Analyzer(analyzer string) *MultiMatchQuery { - q.analyzer = analyzer - return q -} - -// Boost sets the boost for this query. -func (q *MultiMatchQuery) Boost(boost float64) *MultiMatchQuery { - q.boost = &boost - return q -} - -// Slop sets the phrase slop if evaluated to a phrase query type. -func (q *MultiMatchQuery) Slop(slop int) *MultiMatchQuery { - q.slop = &slop - return q -} - -// Fuzziness sets the fuzziness used when evaluated to a fuzzy query type. -// It defaults to "AUTO". -func (q *MultiMatchQuery) Fuzziness(fuzziness string) *MultiMatchQuery { - q.fuzziness = fuzziness - return q -} - -// PrefixLength for the fuzzy process. -func (q *MultiMatchQuery) PrefixLength(prefixLength int) *MultiMatchQuery { - q.prefixLength = &prefixLength - return q -} - -// MaxExpansions is the number of term expansions to use when using fuzzy -// or prefix type query. It defaults to unbounded so it's recommended -// to set it to a reasonable value for faster execution. -func (q *MultiMatchQuery) MaxExpansions(maxExpansions int) *MultiMatchQuery { - q.maxExpansions = &maxExpansions - return q -} - -// MinimumShouldMatch represents the minimum number of optional should clauses -// to match. -func (q *MultiMatchQuery) MinimumShouldMatch(minimumShouldMatch string) *MultiMatchQuery { - q.minimumShouldMatch = minimumShouldMatch - return q -} - -func (q *MultiMatchQuery) Rewrite(rewrite string) *MultiMatchQuery { - q.rewrite = rewrite - return q -} - -func (q *MultiMatchQuery) FuzzyRewrite(fuzzyRewrite string) *MultiMatchQuery { - q.fuzzyRewrite = fuzzyRewrite - return q -} - -// TieBreaker for "best-match" disjunction queries (OR queries). -// The tie breaker capability allows documents that match more than one -// query clause (in this case on more than one field) to be scored better -// than documents that match only the best of the fields, without confusing -// this with the better case of two distinct matches in the multiple fields. -// -// A tie-breaker value of 1.0 is interpreted as a signal to score queries as -// "most-match" queries where all matching query clauses are considered for scoring. -func (q *MultiMatchQuery) TieBreaker(tieBreaker float64) *MultiMatchQuery { - q.tieBreaker = &tieBreaker - return q -} - -// Lenient indicates whether format based failures will be ignored. -func (q *MultiMatchQuery) Lenient(lenient bool) *MultiMatchQuery { - q.lenient = &lenient - return q -} - -// CutoffFrequency sets a cutoff value in [0..1] (or absolute number >=1) -// representing the maximum threshold of a terms document frequency to be -// considered a low frequency term. -func (q *MultiMatchQuery) CutoffFrequency(cutoff float64) *MultiMatchQuery { - q.cutoffFrequency = &cutoff - return q -} - -// ZeroTermsQuery can be "all" or "none". -func (q *MultiMatchQuery) ZeroTermsQuery(zeroTermsQuery string) *MultiMatchQuery { - q.zeroTermsQuery = zeroTermsQuery - return q -} - -// QueryName sets the query name for the filter that can be used when -// searching for matched filters per hit. -func (q *MultiMatchQuery) QueryName(queryName string) *MultiMatchQuery { - q.queryName = queryName - return q -} - -// Source returns JSON for the query. -func (q *MultiMatchQuery) Source() (interface{}, error) { - // - // { - // "multi_match" : { - // "query" : "this is a test", - // "fields" : [ "subject", "message" ] - // } - // } - - source := make(map[string]interface{}) - - multiMatch := make(map[string]interface{}) - source["multi_match"] = multiMatch - - multiMatch["query"] = q.text - - if len(q.fields) > 0 { - fields := make([]string, 0) - for _, field := range q.fields { - if boost, found := q.fieldBoosts[field]; found { - if boost != nil { - fields = append(fields, fmt.Sprintf("%s^%f", field, *boost)) - } else { - fields = append(fields, field) - } - } else { - fields = append(fields, field) - } - } - multiMatch["fields"] = fields - } - - if q.typ != "" { - multiMatch["type"] = q.typ - } - - if q.operator != "" { - multiMatch["operator"] = q.operator - } - if q.analyzer != "" { - multiMatch["analyzer"] = q.analyzer - } - if q.boost != nil { - multiMatch["boost"] = *q.boost - } - if q.slop != nil { - multiMatch["slop"] = *q.slop - } - if q.fuzziness != "" { - multiMatch["fuzziness"] = q.fuzziness - } - if q.prefixLength != nil { - multiMatch["prefix_length"] = *q.prefixLength - } - if q.maxExpansions != nil { - multiMatch["max_expansions"] = *q.maxExpansions - } - if q.minimumShouldMatch != "" { - multiMatch["minimum_should_match"] = q.minimumShouldMatch - } - if q.rewrite != "" { - multiMatch["rewrite"] = q.rewrite - } - if q.fuzzyRewrite != "" { - multiMatch["fuzzy_rewrite"] = q.fuzzyRewrite - } - if q.tieBreaker != nil { - multiMatch["tie_breaker"] = *q.tieBreaker - } - if q.lenient != nil { - multiMatch["lenient"] = *q.lenient - } - if q.cutoffFrequency != nil { - multiMatch["cutoff_frequency"] = *q.cutoffFrequency - } - if q.zeroTermsQuery != "" { - multiMatch["zero_terms_query"] = q.zeroTermsQuery - } - if q.queryName != "" { - multiMatch["_name"] = q.queryName - } - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_multi_match_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_multi_match_test.go deleted file mode 100644 index 508726bed..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_multi_match_test.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestMultiMatchQuery(t *testing.T) { - q := NewMultiMatchQuery("this is a test", "subject", "message") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestMultiMatchQueryBestFields(t *testing.T) { - q := NewMultiMatchQuery("this is a test", "subject", "message").Type("best_fields") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"best_fields"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestMultiMatchQueryMostFields(t *testing.T) { - q := NewMultiMatchQuery("this is a test", "subject", "message").Type("most_fields") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":1,"type":"most_fields"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestMultiMatchQueryCrossFields(t *testing.T) { - q := NewMultiMatchQuery("this is a test", "subject", "message").Type("cross_fields") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"cross_fields"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestMultiMatchQueryPhrase(t *testing.T) { - q := NewMultiMatchQuery("this is a test", "subject", "message").Type("phrase") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"phrase"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestMultiMatchQueryPhrasePrefix(t *testing.T) { - q := NewMultiMatchQuery("this is a test", "subject", "message").Type("phrase_prefix") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"phrase_prefix"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestMultiMatchQueryBestFieldsWithCustomTieBreaker(t *testing.T) { - q := NewMultiMatchQuery("this is a test", "subject", "message"). - Type("best_fields"). - TieBreaker(0.3) - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0.3,"type":"best_fields"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_nested.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_nested.go deleted file mode 100644 index 0a598f8bf..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_nested.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// NestedQuery allows to query nested objects / docs. -// The query is executed against the nested objects / docs as if they were -// indexed as separate docs (they are, internally) and resulting in the -// root parent doc (or parent nested mapping). -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-nested-query.html -type NestedQuery struct { - query Query - path string - scoreMode string - boost *float64 - queryName string - innerHit *InnerHit -} - -// NewNestedQuery creates and initializes a new NestedQuery. -func NewNestedQuery(path string, query Query) *NestedQuery { - return &NestedQuery{path: path, query: query} -} - -// ScoreMode specifies the score mode. -func (q *NestedQuery) ScoreMode(scoreMode string) *NestedQuery { - q.scoreMode = scoreMode - return q -} - -// Boost sets the boost for this query. -func (q *NestedQuery) Boost(boost float64) *NestedQuery { - q.boost = &boost - return q -} - -// QueryName sets the query name for the filter that can be used -// when searching for matched_filters per hit -func (q *NestedQuery) QueryName(queryName string) *NestedQuery { - q.queryName = queryName - return q -} - -// InnerHit sets the inner hit definition in the scope of this nested query -// and reusing the defined path and query. -func (q *NestedQuery) InnerHit(innerHit *InnerHit) *NestedQuery { - q.innerHit = innerHit - return q -} - -// Source returns JSON for the query. -func (q *NestedQuery) Source() (interface{}, error) { - query := make(map[string]interface{}) - nq := make(map[string]interface{}) - query["nested"] = nq - - src, err := q.query.Source() - if err != nil { - return nil, err - } - nq["query"] = src - - nq["path"] = q.path - - if q.scoreMode != "" { - nq["score_mode"] = q.scoreMode - } - if q.boost != nil { - nq["boost"] = *q.boost - } - if q.queryName != "" { - nq["_name"] = q.queryName - } - if q.innerHit != nil { - src, err := q.innerHit.Source() - if err != nil { - return nil, err - } - nq["inner_hits"] = src - } - return query, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_nested_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_nested_test.go deleted file mode 100644 index b068c59b1..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_nested_test.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestNestedQuery(t *testing.T) { - bq := NewBoolQuery() - bq = bq.Must(NewTermQuery("obj1.name", "blue")) - bq = bq.Must(NewRangeQuery("obj1.count").Gt(5)) - q := NewNestedQuery("obj1", bq).QueryName("qname") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"nested":{"_name":"qname","path":"obj1","query":{"bool":{"must":[{"term":{"obj1.name":"blue"}},{"range":{"obj1.count":{"from":5,"include_lower":false,"include_upper":true,"to":null}}}]}}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestNestedQueryWithInnerHit(t *testing.T) { - bq := NewBoolQuery() - bq = bq.Must(NewTermQuery("obj1.name", "blue")) - bq = bq.Must(NewRangeQuery("obj1.count").Gt(5)) - q := NewNestedQuery("obj1", bq) - q = q.QueryName("qname") - q = q.InnerHit(NewInnerHit().Name("comments").Query(NewTermQuery("user", "olivere"))) - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"nested":{"_name":"qname","inner_hits":{"name":"comments","query":{"term":{"user":"olivere"}}},"path":"obj1","query":{"bool":{"must":[{"term":{"obj1.name":"blue"}},{"range":{"obj1.count":{"from":5,"include_lower":false,"include_upper":true,"to":null}}}]}}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_not.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_not.go deleted file mode 100644 index 7a1ee8e08..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_not.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// NotQuery filters out matched documents using a query. -// -// For details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-not-query.html -type NotQuery struct { - filter Query - queryName string -} - -// NewNotQuery creates and initializes a new NotQuery. -func NewNotQuery(filter Query) *NotQuery { - return &NotQuery{ - filter: filter, - } -} - -// QueryName sets the query name for the filter that can be used -// when searching for matched_filters per hit -func (q *NotQuery) QueryName(queryName string) *NotQuery { - q.queryName = queryName - return q -} - -// Source returns JSON for the query. -func (q *NotQuery) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["not"] = params - - src, err := q.filter.Source() - if err != nil { - return nil, err - } - params["query"] = src - if q.queryName != "" { - params["_name"] = q.queryName - } - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_not_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_not_test.go deleted file mode 100644 index 4c4f1c0ab..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_not_test.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestNotQuery(t *testing.T) { - f := NewNotQuery(NewTermQuery("user", "olivere")) - src, err := f.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"not":{"query":{"term":{"user":"olivere"}}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestNotQueryWithParams(t *testing.T) { - postDateFilter := NewRangeQuery("postDate").From("2010-03-01").To("2010-04-01") - f := NewNotQuery(postDateFilter) - f = f.QueryName("MyQueryName") - src, err := f.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"not":{"_name":"MyQueryName","query":{"range":{"postDate":{"from":"2010-03-01","include_lower":true,"include_upper":true,"to":"2010-04-01"}}}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_prefix.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_prefix.go deleted file mode 100644 index 1628ba8cc..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_prefix.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// PrefixQuery matches documents that have fields containing terms -// with a specified prefix (not analyzed). -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-prefix-query.html -type PrefixQuery struct { - name string - prefix string - boost *float64 - rewrite string - queryName string -} - -// NewPrefixQuery creates and initializes a new PrefixQuery. -func NewPrefixQuery(name string, prefix string) *PrefixQuery { - return &PrefixQuery{name: name, prefix: prefix} -} - -// Boost sets the boost for this query. -func (q *PrefixQuery) Boost(boost float64) *PrefixQuery { - q.boost = &boost - return q -} - -func (q *PrefixQuery) Rewrite(rewrite string) *PrefixQuery { - q.rewrite = rewrite - return q -} - -// QueryName sets the query name for the filter that can be used when -// searching for matched_filters per hit. -func (q *PrefixQuery) QueryName(queryName string) *PrefixQuery { - q.queryName = queryName - return q -} - -// Source returns JSON for the query. -func (q *PrefixQuery) Source() (interface{}, error) { - source := make(map[string]interface{}) - query := make(map[string]interface{}) - source["prefix"] = query - - if q.boost == nil && q.rewrite == "" && q.queryName == "" { - query[q.name] = q.prefix - } else { - subQuery := make(map[string]interface{}) - subQuery["prefix"] = q.prefix - if q.boost != nil { - subQuery["boost"] = *q.boost - } - if q.rewrite != "" { - subQuery["rewrite"] = q.rewrite - } - if q.queryName != "" { - subQuery["_name"] = q.queryName - } - query[q.name] = subQuery - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_prefix_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_prefix_test.go deleted file mode 100644 index ce1b74e41..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_prefix_test.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestPrefixQuery(t *testing.T) { - q := NewPrefixQuery("user", "ki") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"prefix":{"user":"ki"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestPrefixQueryWithOptions(t *testing.T) { - q := NewPrefixQuery("user", "ki") - q = q.QueryName("my_query_name") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"prefix":{"user":{"_name":"my_query_name","prefix":"ki"}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_query_string.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_query_string.go deleted file mode 100644 index 53e4f344f..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_query_string.go +++ /dev/null @@ -1,349 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "fmt" -) - -// QueryStringQuery uses the query parser in order to parse its content. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html -type QueryStringQuery struct { - queryString string - defaultField string - defaultOperator string - analyzer string - quoteAnalyzer string - quoteFieldSuffix string - autoGeneratePhraseQueries *bool - allowLeadingWildcard *bool - lowercaseExpandedTerms *bool - enablePositionIncrements *bool - analyzeWildcard *bool - locale string - boost *float64 - fuzziness string - fuzzyPrefixLength *int - fuzzyMaxExpansions *int - fuzzyRewrite string - phraseSlop *int - fields []string - fieldBoosts map[string]*float64 - useDisMax *bool - tieBreaker *float64 - rewrite string - minimumShouldMatch string - lenient *bool - queryName string - timeZone string - maxDeterminizedStates *int -} - -// NewQueryStringQuery creates and initializes a new QueryStringQuery. -func NewQueryStringQuery(queryString string) *QueryStringQuery { - return &QueryStringQuery{ - queryString: queryString, - fields: make([]string, 0), - fieldBoosts: make(map[string]*float64), - } -} - -// DefaultField specifies the field to run against when no prefix field -// is specified. Only relevant when not explicitly adding fields the query -// string will run against. -func (q *QueryStringQuery) DefaultField(defaultField string) *QueryStringQuery { - q.defaultField = defaultField - return q -} - -// Field adds a field to run the query string against. -func (q *QueryStringQuery) Field(field string) *QueryStringQuery { - q.fields = append(q.fields, field) - return q -} - -// FieldWithBoost adds a field to run the query string against with a specific boost. -func (q *QueryStringQuery) FieldWithBoost(field string, boost float64) *QueryStringQuery { - q.fields = append(q.fields, field) - q.fieldBoosts[field] = &boost - return q -} - -// UseDisMax specifies whether to combine queries using dis max or boolean -// query when more zhan one field is used with the query string. Defaults -// to dismax (true). -func (q *QueryStringQuery) UseDisMax(useDisMax bool) *QueryStringQuery { - q.useDisMax = &useDisMax - return q -} - -// TieBreaker is used when more than one field is used with the query string, -// and combined queries are using dismax. -func (q *QueryStringQuery) TieBreaker(tieBreaker float64) *QueryStringQuery { - q.tieBreaker = &tieBreaker - return q -} - -// DefaultOperator sets the boolean operator of the query parser used to -// parse the query string. -// -// In default mode (OR) terms without any modifiers -// are considered optional, e.g. "capital of Hungary" is equal to -// "capital OR of OR Hungary". -// -// In AND mode, terms are considered to be in conjunction. The above mentioned -// query is then parsed as "capital AND of AND Hungary". -func (q *QueryStringQuery) DefaultOperator(operator string) *QueryStringQuery { - q.defaultOperator = operator - return q -} - -// Analyzer is an optional analyzer used to analyze the query string. -// Note, if a field has search analyzer defined for it, then it will be used -// automatically. Defaults to the smart search analyzer. -func (q *QueryStringQuery) Analyzer(analyzer string) *QueryStringQuery { - q.analyzer = analyzer - return q -} - -// QuoteAnalyzer is an optional analyzer to be used to analyze the query string -// for phrase searches. Note, if a field has search analyzer defined for it, -// then it will be used automatically. Defaults to the smart search analyzer. -func (q *QueryStringQuery) QuoteAnalyzer(quoteAnalyzer string) *QueryStringQuery { - q.quoteAnalyzer = quoteAnalyzer - return q -} - -// AutoGeneratePhraseQueries indicates whether or not phrase queries will -// be automatically generated when the analyzer returns more then one term -// from whitespace delimited text. Set to false if phrase queries should only -// be generated when surrounded by double quotes. -func (q *QueryStringQuery) AutoGeneratePhraseQueries(autoGeneratePhraseQueries bool) *QueryStringQuery { - q.autoGeneratePhraseQueries = &autoGeneratePhraseQueries - return q -} - -// MaxDeterminizedState protects against too-difficult regular expression queries. -func (q *QueryStringQuery) MaxDeterminizedState(maxDeterminizedStates int) *QueryStringQuery { - q.maxDeterminizedStates = &maxDeterminizedStates - return q -} - -// AllowLeadingWildcard specifies whether leading wildcards should be allowed -// or not (defaults to true). -func (q *QueryStringQuery) AllowLeadingWildcard(allowLeadingWildcard bool) *QueryStringQuery { - q.allowLeadingWildcard = &allowLeadingWildcard - return q -} - -// LowercaseExpandedTerms indicates whether terms of wildcard, prefix, fuzzy -// and range queries are automatically lower-cased or not. Default is true. -func (q *QueryStringQuery) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *QueryStringQuery { - q.lowercaseExpandedTerms = &lowercaseExpandedTerms - return q -} - -// EnablePositionIncrements indicates whether to enable position increments -// in result query. Defaults to true. -// -// When set, result phrase and multi-phrase queries will be aware of position -// increments. Useful when e.g. a StopFilter increases the position increment -// of the token that follows an omitted token. -func (q *QueryStringQuery) EnablePositionIncrements(enablePositionIncrements bool) *QueryStringQuery { - q.enablePositionIncrements = &enablePositionIncrements - return q -} - -// Fuzziness sets the edit distance for fuzzy queries. Default is "AUTO". -func (q *QueryStringQuery) Fuzziness(fuzziness string) *QueryStringQuery { - q.fuzziness = fuzziness - return q -} - -// FuzzyPrefixLength sets the minimum prefix length for fuzzy queries. -// Default is 1. -func (q *QueryStringQuery) FuzzyPrefixLength(fuzzyPrefixLength int) *QueryStringQuery { - q.fuzzyPrefixLength = &fuzzyPrefixLength - return q -} - -func (q *QueryStringQuery) FuzzyMaxExpansions(fuzzyMaxExpansions int) *QueryStringQuery { - q.fuzzyMaxExpansions = &fuzzyMaxExpansions - return q -} - -func (q *QueryStringQuery) FuzzyRewrite(fuzzyRewrite string) *QueryStringQuery { - q.fuzzyRewrite = fuzzyRewrite - return q -} - -// PhraseSlop sets the default slop for phrases. If zero, then exact matches -// are required. Default value is zero. -func (q *QueryStringQuery) PhraseSlop(phraseSlop int) *QueryStringQuery { - q.phraseSlop = &phraseSlop - return q -} - -// AnalyzeWildcard indicates whether to enabled analysis on wildcard and prefix queries. -func (q *QueryStringQuery) AnalyzeWildcard(analyzeWildcard bool) *QueryStringQuery { - q.analyzeWildcard = &analyzeWildcard - return q -} - -func (q *QueryStringQuery) Rewrite(rewrite string) *QueryStringQuery { - q.rewrite = rewrite - return q -} - -func (q *QueryStringQuery) MinimumShouldMatch(minimumShouldMatch string) *QueryStringQuery { - q.minimumShouldMatch = minimumShouldMatch - return q -} - -// Boost sets the boost for this query. -func (q *QueryStringQuery) Boost(boost float64) *QueryStringQuery { - q.boost = &boost - return q -} - -// QuoteFieldSuffix is an optional field name suffix to automatically -// try and add to the field searched when using quoted text. -func (q *QueryStringQuery) QuoteFieldSuffix(quoteFieldSuffix string) *QueryStringQuery { - q.quoteFieldSuffix = quoteFieldSuffix - return q -} - -// Lenient indicates whether the query string parser should be lenient -// when parsing field values. It defaults to the index setting and if not -// set, defaults to false. -func (q *QueryStringQuery) Lenient(lenient bool) *QueryStringQuery { - q.lenient = &lenient - return q -} - -// QueryName sets the query name for the filter that can be used when -// searching for matched_filters per hit. -func (q *QueryStringQuery) QueryName(queryName string) *QueryStringQuery { - q.queryName = queryName - return q -} - -func (q *QueryStringQuery) Locale(locale string) *QueryStringQuery { - q.locale = locale - return q -} - -// TimeZone can be used to automatically adjust to/from fields using a -// timezone. Only used with date fields, of course. -func (q *QueryStringQuery) TimeZone(timeZone string) *QueryStringQuery { - q.timeZone = timeZone - return q -} - -// Source returns JSON for the query. -func (q *QueryStringQuery) Source() (interface{}, error) { - source := make(map[string]interface{}) - query := make(map[string]interface{}) - source["query_string"] = query - - query["query"] = q.queryString - - if q.defaultField != "" { - query["default_field"] = q.defaultField - } - - if len(q.fields) > 0 { - fields := make([]string, 0) - for _, field := range q.fields { - if boost, found := q.fieldBoosts[field]; found { - if boost != nil { - fields = append(fields, fmt.Sprintf("%s^%f", field, *boost)) - } else { - fields = append(fields, field) - } - } else { - fields = append(fields, field) - } - } - query["fields"] = fields - } - - if q.tieBreaker != nil { - query["tie_breaker"] = *q.tieBreaker - } - if q.useDisMax != nil { - query["use_dis_max"] = *q.useDisMax - } - if q.defaultOperator != "" { - query["default_operator"] = q.defaultOperator - } - if q.analyzer != "" { - query["analyzer"] = q.analyzer - } - if q.quoteAnalyzer != "" { - query["quote_analyzer"] = q.quoteAnalyzer - } - if q.autoGeneratePhraseQueries != nil { - query["auto_generate_phrase_queries"] = *q.autoGeneratePhraseQueries - } - if q.maxDeterminizedStates != nil { - query["max_determinized_states"] = *q.maxDeterminizedStates - } - if q.allowLeadingWildcard != nil { - query["allow_leading_wildcard"] = *q.allowLeadingWildcard - } - if q.lowercaseExpandedTerms != nil { - query["lowercase_expanded_terms"] = *q.lowercaseExpandedTerms - } - if q.enablePositionIncrements != nil { - query["enable_position_increments"] = *q.enablePositionIncrements - } - if q.fuzziness != "" { - query["fuzziness"] = q.fuzziness - } - if q.boost != nil { - query["boost"] = *q.boost - } - if q.fuzzyPrefixLength != nil { - query["fuzzy_prefix_length"] = *q.fuzzyPrefixLength - } - if q.fuzzyMaxExpansions != nil { - query["fuzzy_max_expansions"] = *q.fuzzyMaxExpansions - } - if q.fuzzyRewrite != "" { - query["fuzzy_rewrite"] = q.fuzzyRewrite - } - if q.phraseSlop != nil { - query["phrase_slop"] = *q.phraseSlop - } - if q.analyzeWildcard != nil { - query["analyze_wildcard"] = *q.analyzeWildcard - } - if q.rewrite != "" { - query["rewrite"] = q.rewrite - } - if q.minimumShouldMatch != "" { - query["minimum_should_match"] = q.minimumShouldMatch - } - if q.quoteFieldSuffix != "" { - query["quote_field_suffix"] = q.quoteFieldSuffix - } - if q.lenient != nil { - query["lenient"] = *q.lenient - } - if q.queryName != "" { - query["_name"] = q.queryName - } - if q.locale != "" { - query["locale"] = q.locale - } - if q.timeZone != "" { - query["time_zone"] = q.timeZone - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_query_string_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_query_string_test.go deleted file mode 100644 index 4d766124a..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_query_string_test.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestQueryStringQuery(t *testing.T) { - q := NewQueryStringQuery(`this AND that OR thus`) - q = q.DefaultField("content") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"query_string":{"default_field":"content","query":"this AND that OR thus"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_range.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_range.go deleted file mode 100644 index f688c25bd..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_range.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// RangeQuery matches documents with fields that have terms within a certain range. -// -// For details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-range-query.html -type RangeQuery struct { - name string - from interface{} - to interface{} - timeZone string - includeLower bool - includeUpper bool - boost *float64 - queryName string - format string -} - -// NewRangeQuery creates and initializes a new RangeQuery. -func NewRangeQuery(name string) *RangeQuery { - return &RangeQuery{name: name, includeLower: true, includeUpper: true} -} - -// From indicates the from part of the RangeQuery. -// Use nil to indicate an unbounded from part. -func (q *RangeQuery) From(from interface{}) *RangeQuery { - q.from = from - return q -} - -// Gt indicates a greater-than value for the from part. -// Use nil to indicate an unbounded from part. -func (q *RangeQuery) Gt(from interface{}) *RangeQuery { - q.from = from - q.includeLower = false - return q -} - -// Gte indicates a greater-than-or-equal value for the from part. -// Use nil to indicate an unbounded from part. -func (q *RangeQuery) Gte(from interface{}) *RangeQuery { - q.from = from - q.includeLower = true - return q -} - -// To indicates the to part of the RangeQuery. -// Use nil to indicate an unbounded to part. -func (q *RangeQuery) To(to interface{}) *RangeQuery { - q.to = to - return q -} - -// Lt indicates a less-than value for the to part. -// Use nil to indicate an unbounded to part. -func (q *RangeQuery) Lt(to interface{}) *RangeQuery { - q.to = to - q.includeUpper = false - return q -} - -// Lte indicates a less-than-or-equal value for the to part. -// Use nil to indicate an unbounded to part. -func (q *RangeQuery) Lte(to interface{}) *RangeQuery { - q.to = to - q.includeUpper = true - return q -} - -// IncludeLower indicates whether the lower bound should be included or not. -// Defaults to true. -func (q *RangeQuery) IncludeLower(includeLower bool) *RangeQuery { - q.includeLower = includeLower - return q -} - -// IncludeUpper indicates whether the upper bound should be included or not. -// Defaults to true. -func (q *RangeQuery) IncludeUpper(includeUpper bool) *RangeQuery { - q.includeUpper = includeUpper - return q -} - -// Boost sets the boost for this query. -func (q *RangeQuery) Boost(boost float64) *RangeQuery { - q.boost = &boost - return q -} - -// QueryName sets the query name for the filter that can be used when -// searching for matched_filters per hit. -func (q *RangeQuery) QueryName(queryName string) *RangeQuery { - q.queryName = queryName - return q -} - -// TimeZone is used for date fields. In that case, we can adjust the -// from/to fields using a timezone. -func (q *RangeQuery) TimeZone(timeZone string) *RangeQuery { - q.timeZone = timeZone - return q -} - -// Format is used for date fields. In that case, we can set the format -// to be used instead of the mapper format. -func (q *RangeQuery) Format(format string) *RangeQuery { - q.format = format - return q -} - -// Source returns JSON for the query. -func (q *RangeQuery) Source() (interface{}, error) { - source := make(map[string]interface{}) - - rangeQ := make(map[string]interface{}) - source["range"] = rangeQ - - params := make(map[string]interface{}) - rangeQ[q.name] = params - - params["from"] = q.from - params["to"] = q.to - if q.timeZone != "" { - params["time_zone"] = q.timeZone - } - if q.format != "" { - params["format"] = q.format - } - params["include_lower"] = q.includeLower - params["include_upper"] = q.includeUpper - - if q.boost != nil { - rangeQ["boost"] = *q.boost - } - - if q.queryName != "" { - rangeQ["_name"] = q.queryName - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_range_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_range_test.go deleted file mode 100644 index 126bb16f2..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_range_test.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestRangeQuery(t *testing.T) { - q := NewRangeQuery("postDate").From("2010-03-01").To("2010-04-01") - q = q.QueryName("my_query") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"range":{"_name":"my_query","postDate":{"from":"2010-03-01","include_lower":true,"include_upper":true,"to":"2010-04-01"}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestRangeQueryWithTimeZone(t *testing.T) { - q := NewRangeQuery("born"). - Gte("2012-01-01"). - Lte("now"). - TimeZone("+1:00") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"range":{"born":{"from":"2012-01-01","include_lower":true,"include_upper":true,"time_zone":"+1:00","to":"now"}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestRangeQueryWithFormat(t *testing.T) { - q := NewRangeQuery("born"). - Gte("2012/01/01"). - Lte("now"). - Format("yyyy/MM/dd") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"range":{"born":{"format":"yyyy/MM/dd","from":"2012/01/01","include_lower":true,"include_upper":true,"to":"now"}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_regexp.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_regexp.go deleted file mode 100644 index ecd9f7fe0..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_regexp.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// RegexpQuery allows you to use regular expression term queries. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-regexp-query.html -type RegexpQuery struct { - name string - regexp string - flags string - boost *float64 - rewrite string - queryName string - maxDeterminizedStates *int -} - -// NewRegexpQuery creates and initializes a new RegexpQuery. -func NewRegexpQuery(name string, regexp string) *RegexpQuery { - return &RegexpQuery{name: name, regexp: regexp} -} - -// Flags sets the regexp flags. -func (q *RegexpQuery) Flags(flags string) *RegexpQuery { - q.flags = flags - return q -} - -// MaxDeterminizedStates protects against complex regular expressions. -func (q *RegexpQuery) MaxDeterminizedStates(maxDeterminizedStates int) *RegexpQuery { - q.maxDeterminizedStates = &maxDeterminizedStates - return q -} - -// Boost sets the boost for this query. -func (q *RegexpQuery) Boost(boost float64) *RegexpQuery { - q.boost = &boost - return q -} - -func (q *RegexpQuery) Rewrite(rewrite string) *RegexpQuery { - q.rewrite = rewrite - return q -} - -// QueryName sets the query name for the filter that can be used -// when searching for matched_filters per hit -func (q *RegexpQuery) QueryName(queryName string) *RegexpQuery { - q.queryName = queryName - return q -} - -// Source returns the JSON-serializable query data. -func (q *RegexpQuery) Source() (interface{}, error) { - source := make(map[string]interface{}) - query := make(map[string]interface{}) - source["regexp"] = query - - x := make(map[string]interface{}) - x["value"] = q.regexp - if q.flags != "" { - x["flags"] = q.flags - } - if q.maxDeterminizedStates != nil { - x["max_determinized_states"] = *q.maxDeterminizedStates - } - if q.boost != nil { - x["boost"] = *q.boost - } - if q.rewrite != "" { - x["rewrite"] = q.rewrite - } - if q.queryName != "" { - x["name"] = q.queryName - } - query[q.name] = x - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_regexp_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_regexp_test.go deleted file mode 100644 index f4dc2355b..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_regexp_test.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestRegexpQuery(t *testing.T) { - q := NewRegexpQuery("name.first", "s.*y") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"regexp":{"name.first":{"value":"s.*y"}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestRegexpQueryWithOptions(t *testing.T) { - q := NewRegexpQuery("name.first", "s.*y"). - Boost(1.2). - Flags("INTERSECTION|COMPLEMENT|EMPTY"). - QueryName("my_query_name") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"regexp":{"name.first":{"boost":1.2,"flags":"INTERSECTION|COMPLEMENT|EMPTY","name":"my_query_name","value":"s.*y"}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_script.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_script.go deleted file mode 100644 index 3baa90574..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_script.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "errors" - -// ScriptQuery allows to define scripts as filters. -// -// For details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-script-query.html -type ScriptQuery struct { - script *Script - queryName string -} - -// NewScriptQuery creates and initializes a new ScriptQuery. -func NewScriptQuery(script *Script) *ScriptQuery { - return &ScriptQuery{ - script: script, - } -} - -// QueryName sets the query name for the filter that can be used -// when searching for matched_filters per hit -func (q *ScriptQuery) QueryName(queryName string) *ScriptQuery { - q.queryName = queryName - return q -} - -// Source returns JSON for the query. -func (q *ScriptQuery) Source() (interface{}, error) { - if q.script == nil { - return nil, errors.New("ScriptQuery expected a script") - } - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["script"] = params - - src, err := q.script.Source() - if err != nil { - return nil, err - } - params["script"] = src - - if q.queryName != "" { - params["_name"] = q.queryName - } - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_script_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_script_test.go deleted file mode 100644 index e10510c10..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_script_test.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestScriptQuery(t *testing.T) { - q := NewScriptQuery(NewScript("doc['num1'.value > 1")) - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"script":{"script":"doc['num1'.value \u003e 1"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestScriptQueryWithParams(t *testing.T) { - q := NewScriptQuery(NewScript("doc['num1'.value > 1")) - q = q.QueryName("MyQueryName") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"script":{"_name":"MyQueryName","script":"doc['num1'.value \u003e 1"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_simple_query_string.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_simple_query_string.go deleted file mode 100644 index fb0a2a9b9..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_simple_query_string.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "fmt" - "strings" -) - -// SimpleQueryStringQuery is a query that uses the SimpleQueryParser -// to parse its context. Unlike the regular query_string query, -// the simple_query_string query will never throw an exception, -// and discards invalid parts of the query. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-simple-query-string-query.html -type SimpleQueryStringQuery struct { - queryText string - analyzer string - operator string - fields []string - fieldBoosts map[string]*float64 - minimumShouldMatch string - flags string - boost *float64 - lowercaseExpandedTerms *bool - lenient *bool - analyzeWildcard *bool - locale string - queryName string -} - -// NewSimpleQueryStringQuery creates and initializes a new SimpleQueryStringQuery. -func NewSimpleQueryStringQuery(text string) *SimpleQueryStringQuery { - return &SimpleQueryStringQuery{ - queryText: text, - fields: make([]string, 0), - fieldBoosts: make(map[string]*float64), - } -} - -// Field adds a field to run the query against. -func (q *SimpleQueryStringQuery) Field(field string) *SimpleQueryStringQuery { - q.fields = append(q.fields, field) - return q -} - -// Field adds a field to run the query against with a specific boost. -func (q *SimpleQueryStringQuery) FieldWithBoost(field string, boost float64) *SimpleQueryStringQuery { - q.fields = append(q.fields, field) - q.fieldBoosts[field] = &boost - return q -} - -// Boost sets the boost for this query. -func (q *SimpleQueryStringQuery) Boost(boost float64) *SimpleQueryStringQuery { - q.boost = &boost - return q -} - -// QueryName sets the query name for the filter that can be used when -// searching for matched_filters per hit. -func (q *SimpleQueryStringQuery) QueryName(queryName string) *SimpleQueryStringQuery { - q.queryName = queryName - return q -} - -// Analyzer specifies the analyzer to use for the query. -func (q *SimpleQueryStringQuery) Analyzer(analyzer string) *SimpleQueryStringQuery { - q.analyzer = analyzer - return q -} - -// DefaultOperator specifies the default operator for the query. -func (q *SimpleQueryStringQuery) DefaultOperator(defaultOperator string) *SimpleQueryStringQuery { - q.operator = defaultOperator - return q -} - -// Flags sets the flags for the query. -func (q *SimpleQueryStringQuery) Flags(flags string) *SimpleQueryStringQuery { - q.flags = flags - return q -} - -// LowercaseExpandedTerms indicates whether terms of wildcard, prefix, fuzzy -// and range queries are automatically lower-cased or not. Default is true. -func (q *SimpleQueryStringQuery) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *SimpleQueryStringQuery { - q.lowercaseExpandedTerms = &lowercaseExpandedTerms - return q -} - -func (q *SimpleQueryStringQuery) Locale(locale string) *SimpleQueryStringQuery { - q.locale = locale - return q -} - -// Lenient indicates whether the query string parser should be lenient -// when parsing field values. It defaults to the index setting and if not -// set, defaults to false. -func (q *SimpleQueryStringQuery) Lenient(lenient bool) *SimpleQueryStringQuery { - q.lenient = &lenient - return q -} - -// AnalyzeWildcard indicates whether to enabled analysis on wildcard and prefix queries. -func (q *SimpleQueryStringQuery) AnalyzeWildcard(analyzeWildcard bool) *SimpleQueryStringQuery { - q.analyzeWildcard = &analyzeWildcard - return q -} - -func (q *SimpleQueryStringQuery) MinimumShouldMatch(minimumShouldMatch string) *SimpleQueryStringQuery { - q.minimumShouldMatch = minimumShouldMatch - return q -} - -// Source returns JSON for the query. -func (q *SimpleQueryStringQuery) Source() (interface{}, error) { - // { - // "simple_query_string" : { - // "query" : "\"fried eggs\" +(eggplant | potato) -frittata", - // "analyzer" : "snowball", - // "fields" : ["body^5","_all"], - // "default_operator" : "and" - // } - // } - - source := make(map[string]interface{}) - - query := make(map[string]interface{}) - source["simple_query_string"] = query - - query["query"] = q.queryText - - if len(q.fields) > 0 { - fields := make([]string, 0) - for _, field := range q.fields { - if boost, found := q.fieldBoosts[field]; found { - if boost != nil { - fields = append(fields, fmt.Sprintf("%s^%f", field, *boost)) - } else { - fields = append(fields, field) - } - } else { - fields = append(fields, field) - } - } - query["fields"] = fields - } - - if q.flags != "" { - query["flags"] = q.flags - } - if q.analyzer != "" { - query["analyzer"] = q.analyzer - } - if q.operator != "" { - query["default_operator"] = strings.ToLower(q.operator) - } - if q.lowercaseExpandedTerms != nil { - query["lowercase_expanded_terms"] = *q.lowercaseExpandedTerms - } - if q.lenient != nil { - query["lenient"] = *q.lenient - } - if q.analyzeWildcard != nil { - query["analyze_wildcard"] = *q.analyzeWildcard - } - if q.locale != "" { - query["locale"] = q.locale - } - if q.queryName != "" { - query["_name"] = q.queryName - } - if q.minimumShouldMatch != "" { - query["minimum_should_match"] = q.minimumShouldMatch - } - if q.boost != nil { - query["boost"] = *q.boost - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_simple_query_string_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_simple_query_string_test.go deleted file mode 100644 index f6be3e5bd..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_simple_query_string_test.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestSimpleQueryStringQuery(t *testing.T) { - q := NewSimpleQueryStringQuery(`"fried eggs" +(eggplant | potato) -frittata`) - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"simple_query_string":{"query":"\"fried eggs\" +(eggplant | potato) -frittata"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestSimpleQueryStringQueryExec(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Match all should return all documents - query := NewSimpleQueryStringQuery("+Golang +Elasticsearch") - searchResult, err := client.Search(). - Index(testIndexName). - Query(query). - Do() - if err != nil { - t.Fatal(err) - } - if searchResult.Hits == nil { - t.Errorf("expected SearchResult.Hits != nil; got nil") - } - if searchResult.Hits.TotalHits != 1 { - t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult.Hits.TotalHits) - } - if len(searchResult.Hits.Hits) != 1 { - t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 1, len(searchResult.Hits.Hits)) - } - - for _, hit := range searchResult.Hits.Hits { - if hit.Index != testIndexName { - t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) - } - item := make(map[string]interface{}) - err := json.Unmarshal(*hit.Source, &item) - if err != nil { - t.Fatal(err) - } - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_template_query.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_template_query.go deleted file mode 100644 index 0611c3ea4..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_template_query.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// TemplateQuery is a query that accepts a query template and a -// map of key/value pairs to fill in template parameters. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-template-query.html -type TemplateQuery struct { - template string - templateType string - vars map[string]interface{} -} - -// NewTemplateQuery creates and initializes a new TemplateQuery. -func NewTemplateQuery(name string) *TemplateQuery { - return &TemplateQuery{ - template: name, - vars: make(map[string]interface{}), - } -} - -// Template specifies the name of the template. -func (q *TemplateQuery) Template(name string) *TemplateQuery { - q.template = name - return q -} - -// TemplateType defines which kind of query we use. The values can be: -// inline, indexed, or file. If undefined, inline is used. -func (q *TemplateQuery) TemplateType(typ string) *TemplateQuery { - q.templateType = typ - return q -} - -// Var sets a single parameter pair. -func (q *TemplateQuery) Var(name string, value interface{}) *TemplateQuery { - q.vars[name] = value - return q -} - -// Vars sets parameters for the template query. -func (q *TemplateQuery) Vars(vars map[string]interface{}) *TemplateQuery { - q.vars = vars - return q -} - -// Source returns the JSON serializable content for the search. -func (q *TemplateQuery) Source() (interface{}, error) { - // { - // "template" : { - // "query" : {"match_{{template}}": {}}, - // "params" : { - // "template": "all" - // } - // } - // } - - query := make(map[string]interface{}) - - tmpl := make(map[string]interface{}) - query["template"] = tmpl - - // TODO(oe): Implementation differs from online documentation at http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-template-query.html - var fieldname string - switch q.templateType { - case "file": // file - fieldname = "file" - case "indexed", "id": // indexed - fieldname = "id" - default: // inline - fieldname = "query" - } - - tmpl[fieldname] = q.template - if len(q.vars) > 0 { - tmpl["params"] = q.vars - } - - return query, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_template_query_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_template_query_test.go deleted file mode 100644 index 8f21ef9f0..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_template_query_test.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestTemplateQueryInlineTest(t *testing.T) { - q := NewTemplateQuery("\"match_{{template}}\": {}}\"").Vars(map[string]interface{}{"template": "all"}) - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"template":{"params":{"template":"all"},"query":"\"match_{{template}}\": {}}\""}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestTemplateQueryIndexedTest(t *testing.T) { - q := NewTemplateQuery("indexedTemplate"). - TemplateType("id"). - Vars(map[string]interface{}{"template": "all"}) - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"template":{"id":"indexedTemplate","params":{"template":"all"}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestTemplateQueryFileTest(t *testing.T) { - q := NewTemplateQuery("storedTemplate"). - TemplateType("file"). - Vars(map[string]interface{}{"template": "all"}) - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"template":{"file":"storedTemplate","params":{"template":"all"}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_term.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_term.go deleted file mode 100644 index c20c5c66e..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_term.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// TermQuery finds documents that contain the exact term specified -// in the inverted index. -// -// For details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-term-query.html -type TermQuery struct { - name string - value interface{} - boost *float64 - queryName string -} - -// NewTermQuery creates and initializes a new TermQuery. -func NewTermQuery(name string, value interface{}) *TermQuery { - return &TermQuery{name: name, value: value} -} - -// Boost sets the boost for this query. -func (q *TermQuery) Boost(boost float64) *TermQuery { - q.boost = &boost - return q -} - -// QueryName sets the query name for the filter that can be used -// when searching for matched_filters per hit -func (q *TermQuery) QueryName(queryName string) *TermQuery { - q.queryName = queryName - return q -} - -// Source returns JSON for the query. -func (q *TermQuery) Source() (interface{}, error) { - // {"term":{"name":"value"}} - source := make(map[string]interface{}) - tq := make(map[string]interface{}) - source["term"] = tq - - if q.boost == nil && q.queryName == "" { - tq[q.name] = q.value - } else { - subQ := make(map[string]interface{}) - subQ["value"] = q.value - if q.boost != nil { - subQ["boost"] = *q.boost - } - if q.queryName != "" { - subQ["_name"] = q.queryName - } - tq[q.name] = subQ - } - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_term_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_term_test.go deleted file mode 100644 index 17c8c9848..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_term_test.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestTermQuery(t *testing.T) { - q := NewTermQuery("user", "ki") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"term":{"user":"ki"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestTermQueryWithOptions(t *testing.T) { - q := NewTermQuery("user", "ki") - q = q.Boost(2.79) - q = q.QueryName("my_tq") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"term":{"user":{"_name":"my_tq","boost":2.79,"value":"ki"}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_terms.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_terms.go deleted file mode 100644 index a7e158859..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_terms.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// TermsQuery filters documents that have fields that match any -// of the provided terms (not analyzed). -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-terms-query.html -type TermsQuery struct { - name string - values []interface{} - queryName string - boost *float64 -} - -// NewTermsQuery creates and initializes a new TermsQuery. -func NewTermsQuery(name string, values ...interface{}) *TermsQuery { - q := &TermsQuery{ - name: name, - values: make([]interface{}, 0), - } - if len(values) > 0 { - q.values = append(q.values, values...) - } - return q -} - -// Boost sets the boost for this query. -func (q *TermsQuery) Boost(boost float64) *TermsQuery { - q.boost = &boost - return q -} - -// QueryName sets the query name for the filter that can be used -// when searching for matched_filters per hit -func (q *TermsQuery) QueryName(queryName string) *TermsQuery { - q.queryName = queryName - return q -} - -// Creates the query source for the term query. -func (q *TermsQuery) Source() (interface{}, error) { - // {"terms":{"name":["value1","value2"]}} - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["terms"] = params - params[q.name] = q.values - if q.boost != nil { - params["boost"] = *q.boost - } - if q.queryName != "" { - params["_name"] = q.queryName - } - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_terms_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_terms_test.go deleted file mode 100644 index 6de743d14..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_terms_test.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestTermsQuery(t *testing.T) { - q := NewTermsQuery("user", "ki") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"terms":{"user":["ki"]}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestTermQuerysWithOptions(t *testing.T) { - q := NewTermsQuery("user", "ki", "ko") - q = q.Boost(2.79) - q = q.QueryName("my_tq") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"terms":{"_name":"my_tq","boost":2.79,"user":["ki","ko"]}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_type.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_type.go deleted file mode 100644 index 884d4ae7b..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_type.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// TypeQuery filters documents matching the provided document / mapping type. -// -// For details, see: -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-type-query.html -type TypeQuery struct { - typ string -} - -func NewTypeQuery(typ string) *TypeQuery { - return &TypeQuery{typ: typ} -} - -// Source returns JSON for the query. -func (q *TypeQuery) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["type"] = params - params["value"] = q.typ - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_type_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_type_test.go deleted file mode 100644 index bde0ed3d3..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_type_test.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestTypeQuery(t *testing.T) { - q := NewTypeQuery("my_type") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"type":{"value":"my_type"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_wildcard.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_wildcard.go deleted file mode 100644 index 127332da3..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_wildcard.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// WildcardQuery matches documents that have fields matching a wildcard -// expression (not analyzed). Supported wildcards are *, which matches -// any character sequence (including the empty one), and ?, which matches -// any single character. Note this query can be slow, as it needs to iterate -// over many terms. In order to prevent extremely slow wildcard queries, -// a wildcard term should not start with one of the wildcards * or ?. -// The wildcard query maps to Lucene WildcardQuery. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-wildcard-query.html -type WildcardQuery struct { - name string - wildcard string - boost *float64 - rewrite string - queryName string -} - -// NewWildcardQuery creates and initializes a new WildcardQuery. -func NewWildcardQuery(name, wildcard string) *WildcardQuery { - return &WildcardQuery{ - name: name, - wildcard: wildcard, - } -} - -// Boost sets the boost for this query. -func (q *WildcardQuery) Boost(boost float64) *WildcardQuery { - q.boost = &boost - return q -} - -func (q *WildcardQuery) Rewrite(rewrite string) *WildcardQuery { - q.rewrite = rewrite - return q -} - -// QueryName sets the name of this query. -func (q *WildcardQuery) QueryName(queryName string) *WildcardQuery { - q.queryName = queryName - return q -} - -// Source returns the JSON serializable body of this query. -func (q *WildcardQuery) Source() (interface{}, error) { - // { - // "wildcard" : { - // "user" : { - // "wildcard" : "ki*y", - // "boost" : 1.0 - // } - // } - - source := make(map[string]interface{}) - - query := make(map[string]interface{}) - source["wildcard"] = query - - wq := make(map[string]interface{}) - query[q.name] = wq - - wq["wildcard"] = q.wildcard - - if q.boost != nil { - wq["boost"] = *q.boost - } - if q.rewrite != "" { - wq["rewrite"] = q.rewrite - } - if q.queryName != "" { - wq["_name"] = q.queryName - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_wildcard_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_wildcard_test.go deleted file mode 100644 index 5cd529aff..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_queries_wildcard_test.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic_test - -import ( - "encoding/json" - "testing" - - "gopkg.in/olivere/elastic.v3" -) - -func ExampleWildcardQuery() { - // Get a client to the local Elasticsearch instance. - client, err := elastic.NewClient() - if err != nil { - // Handle error - panic(err) - } - - // Define wildcard query - q := elastic.NewWildcardQuery("user", "oli*er?").Boost(1.2) - searchResult, err := client.Search(). - Index("twitter"). // search in index "twitter" - Query(q). // use wildcard query defined above - Do() // execute - if err != nil { - // Handle error - panic(err) - } - _ = searchResult -} - -func TestWildcardQuery(t *testing.T) { - q := elastic.NewWildcardQuery("user", "ki*y??") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"wildcard":{"user":{"wildcard":"ki*y??"}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestWildcardQueryWithBoost(t *testing.T) { - q := elastic.NewWildcardQuery("user", "ki*y??").Boost(1.2) - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"wildcard":{"user":{"boost":1.2,"wildcard":"ki*y??"}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_request.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_request.go deleted file mode 100644 index 5fb476dd1..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_request.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "strings" -) - -// SearchRequest combines a search request and its -// query details (see SearchSource). -// It is used in combination with MultiSearch. -type SearchRequest struct { - searchType string // default in ES is "query_then_fetch" - indices []string - types []string - routing *string - preference *string - source interface{} -} - -// NewSearchRequest creates a new search request. -func NewSearchRequest() *SearchRequest { - return &SearchRequest{ - indices: make([]string, 0), - types: make([]string, 0), - } -} - -// SearchRequest must be one of "query_then_fetch", "query_and_fetch", -// "scan", "count", "dfs_query_then_fetch", or "dfs_query_and_fetch". -// Use one of the constants defined via SearchType. -func (r *SearchRequest) SearchType(searchType string) *SearchRequest { - r.searchType = searchType - return r -} - -func (r *SearchRequest) SearchTypeDfsQueryThenFetch() *SearchRequest { - return r.SearchType("dfs_query_then_fetch") -} - -func (r *SearchRequest) SearchTypeDfsQueryAndFetch() *SearchRequest { - return r.SearchType("dfs_query_and_fetch") -} - -func (r *SearchRequest) SearchTypeQueryThenFetch() *SearchRequest { - return r.SearchType("query_then_fetch") -} - -func (r *SearchRequest) SearchTypeQueryAndFetch() *SearchRequest { - return r.SearchType("query_and_fetch") -} - -func (r *SearchRequest) SearchTypeScan() *SearchRequest { - return r.SearchType("scan") -} - -func (r *SearchRequest) SearchTypeCount() *SearchRequest { - return r.SearchType("count") -} - -func (r *SearchRequest) Index(indices ...string) *SearchRequest { - r.indices = append(r.indices, indices...) - return r -} - -func (r *SearchRequest) HasIndices() bool { - return len(r.indices) > 0 -} - -func (r *SearchRequest) Type(types ...string) *SearchRequest { - r.types = append(r.types, types...) - return r -} - -func (r *SearchRequest) Routing(routing string) *SearchRequest { - r.routing = &routing - return r -} - -func (r *SearchRequest) Routings(routings ...string) *SearchRequest { - if routings != nil { - routings := strings.Join(routings, ",") - r.routing = &routings - } else { - r.routing = nil - } - return r -} - -func (r *SearchRequest) Preference(preference string) *SearchRequest { - r.preference = &preference - return r -} - -func (r *SearchRequest) Source(source interface{}) *SearchRequest { - switch v := source.(type) { - case *SearchSource: - src, err := v.Source() - if err != nil { - // Do not do anything in case of an error - return r - } - r.source = src - default: - r.source = source - } - return r -} - -// header is used by MultiSearch to get information about the search header -// of one SearchRequest. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-multi-search.html -func (r *SearchRequest) header() interface{} { - h := make(map[string]interface{}) - if r.searchType != "" { - h["search_type"] = r.searchType - } - - switch len(r.indices) { - case 0: - case 1: - h["index"] = r.indices[0] - default: - h["indices"] = r.indices - } - - switch len(r.types) { - case 0: - case 1: - h["types"] = r.types[0] - default: - h["type"] = r.types - } - - if r.routing != nil && *r.routing != "" { - h["routing"] = *r.routing - } - - if r.preference != nil && *r.preference != "" { - h["preference"] = *r.preference - } - - return h -} - -// bidy is used by MultiSearch to get information about the search body -// of one SearchRequest. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-multi-search.html -func (r *SearchRequest) body() interface{} { - return r.source -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_request_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_request_test.go deleted file mode 100644 index c672b0705..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_request_test.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - _ "net/http" - "testing" -) - -func TestSearchRequestIndex(t *testing.T) { - builder := NewSearchRequest().Index("test") - data, err := json.Marshal(builder.header()) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"index":"test"}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestSearchRequestIndices(t *testing.T) { - builder := NewSearchRequest().Index("test", "test2") - data, err := json.Marshal(builder.header()) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"indices":["test","test2"]}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestSearchRequestHasIndices(t *testing.T) { - builder := NewSearchRequest() - if builder.HasIndices() { - t.Errorf("expected HasIndices to return true; got %v", builder.HasIndices()) - } - builder = builder.Index("test", "test2") - if !builder.HasIndices() { - t.Errorf("expected HasIndices to return false; got %v", builder.HasIndices()) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_source.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_source.go deleted file mode 100644 index 59c9fec67..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_source.go +++ /dev/null @@ -1,511 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "fmt" -) - -// SearchSource enables users to build the search source. -// It resembles the SearchSourceBuilder in Elasticsearch. -type SearchSource struct { - query Query - postQuery Query - from int - size int - explain *bool - version *bool - sorts []SortInfo - sorters []Sorter - trackScores bool - minScore *float64 - timeout string - terminateAfter *int - fieldNames []string - fieldDataFields []string - scriptFields []*ScriptField - fetchSourceContext *FetchSourceContext - aggregations map[string]Aggregation - highlight *Highlight - globalSuggestText string - suggesters []Suggester - rescores []*Rescore - defaultRescoreWindowSize *int - indexBoosts map[string]float64 - stats []string - innerHits map[string]*InnerHit -} - -// NewSearchSource initializes a new SearchSource. -func NewSearchSource() *SearchSource { - return &SearchSource{ - from: -1, - size: -1, - trackScores: false, - sorts: make([]SortInfo, 0), - sorters: make([]Sorter, 0), - fieldDataFields: make([]string, 0), - scriptFields: make([]*ScriptField, 0), - aggregations: make(map[string]Aggregation), - rescores: make([]*Rescore, 0), - indexBoosts: make(map[string]float64), - stats: make([]string, 0), - innerHits: make(map[string]*InnerHit), - } -} - -// Query sets the query to use with this search source. -func (s *SearchSource) Query(query Query) *SearchSource { - s.query = query - return s -} - -// PostFilter will be executed after the query has been executed and -// only affects the search hits, not the aggregations. -// This filter is always executed as the last filtering mechanism. -func (s *SearchSource) PostFilter(postFilter Query) *SearchSource { - s.postQuery = postFilter - return s -} - -// From index to start the search from. Defaults to 0. -func (s *SearchSource) From(from int) *SearchSource { - s.from = from - return s -} - -// Size is the number of search hits to return. Defaults to 10. -func (s *SearchSource) Size(size int) *SearchSource { - s.size = size - return s -} - -// MinScore sets the minimum score below which docs will be filtered out. -func (s *SearchSource) MinScore(minScore float64) *SearchSource { - s.minScore = &minScore - return s -} - -// Explain indicates whether each search hit should be returned with -// an explanation of the hit (ranking). -func (s *SearchSource) Explain(explain bool) *SearchSource { - s.explain = &explain - return s -} - -// Version indicates whether each search hit should be returned with -// a version associated to it. -func (s *SearchSource) Version(version bool) *SearchSource { - s.version = &version - return s -} - -// Timeout controls how long a search is allowed to take, e.g. "1s" or "500ms". -func (s *SearchSource) Timeout(timeout string) *SearchSource { - s.timeout = timeout - return s -} - -// TimeoutInMillis controls how many milliseconds a search is allowed -// to take before it is canceled. -func (s *SearchSource) TimeoutInMillis(timeoutInMillis int) *SearchSource { - s.timeout = fmt.Sprintf("%dms", timeoutInMillis) - return s -} - -// TerminateAfter allows the request to stop after the given number -// of search hits are collected. -func (s *SearchSource) TerminateAfter(terminateAfter int) *SearchSource { - s.terminateAfter = &terminateAfter - return s -} - -// Sort adds a sort order. -func (s *SearchSource) Sort(field string, ascending bool) *SearchSource { - s.sorts = append(s.sorts, SortInfo{Field: field, Ascending: ascending}) - return s -} - -// SortWithInfo adds a sort order. -func (s *SearchSource) SortWithInfo(info SortInfo) *SearchSource { - s.sorts = append(s.sorts, info) - return s -} - -// SortBy adds a sort order. -func (s *SearchSource) SortBy(sorter ...Sorter) *SearchSource { - s.sorters = append(s.sorters, sorter...) - return s -} - -func (s *SearchSource) hasSort() bool { - return len(s.sorts) > 0 || len(s.sorters) > 0 -} - -// TrackScores is applied when sorting and controls if scores will be -// tracked as well. Defaults to false. -func (s *SearchSource) TrackScores(trackScores bool) *SearchSource { - s.trackScores = trackScores - return s -} - -// Aggregation adds an aggreation to perform as part of the search. -func (s *SearchSource) Aggregation(name string, aggregation Aggregation) *SearchSource { - s.aggregations[name] = aggregation - return s -} - -// DefaultRescoreWindowSize sets the rescore window size for rescores -// that don't specify their window. -func (s *SearchSource) DefaultRescoreWindowSize(defaultRescoreWindowSize int) *SearchSource { - s.defaultRescoreWindowSize = &defaultRescoreWindowSize - return s -} - -// Highlight adds highlighting to the search. -func (s *SearchSource) Highlight(highlight *Highlight) *SearchSource { - s.highlight = highlight - return s -} - -// Highlighter returns the highlighter. -func (s *SearchSource) Highlighter() *Highlight { - if s.highlight == nil { - s.highlight = NewHighlight() - } - return s.highlight -} - -// GlobalSuggestText defines the global text to use with all suggesters. -// This avoids repetition. -func (s *SearchSource) GlobalSuggestText(text string) *SearchSource { - s.globalSuggestText = text - return s -} - -// Suggester adds a suggester to the search. -func (s *SearchSource) Suggester(suggester Suggester) *SearchSource { - s.suggesters = append(s.suggesters, suggester) - return s -} - -// Rescorer adds a rescorer to the search. -func (s *SearchSource) Rescorer(rescore *Rescore) *SearchSource { - s.rescores = append(s.rescores, rescore) - return s -} - -// ClearRescorers removes all rescorers from the search. -func (s *SearchSource) ClearRescorers() *SearchSource { - s.rescores = make([]*Rescore, 0) - return s -} - -// FetchSource indicates whether the response should contain the stored -// _source for every hit. -func (s *SearchSource) FetchSource(fetchSource bool) *SearchSource { - if s.fetchSourceContext == nil { - s.fetchSourceContext = NewFetchSourceContext(fetchSource) - } else { - s.fetchSourceContext.SetFetchSource(fetchSource) - } - return s -} - -// FetchSourceContext indicates how the _source should be fetched. -func (s *SearchSource) FetchSourceContext(fetchSourceContext *FetchSourceContext) *SearchSource { - s.fetchSourceContext = fetchSourceContext - return s -} - -// NoFields indicates that no fields should be loaded, resulting in only -// id and type to be returned per field. -func (s *SearchSource) NoFields() *SearchSource { - s.fieldNames = make([]string, 0) - return s -} - -// Field adds a single field to load and return (note, must be stored) as -// part of the search request. If none are specified, the source of the -// document will be returned. -func (s *SearchSource) Field(fieldName string) *SearchSource { - if s.fieldNames == nil { - s.fieldNames = make([]string, 0) - } - s.fieldNames = append(s.fieldNames, fieldName) - return s -} - -// Fields sets the fields to load and return as part of the search request. -// If none are specified, the source of the document will be returned. -func (s *SearchSource) Fields(fieldNames ...string) *SearchSource { - if s.fieldNames == nil { - s.fieldNames = make([]string, 0) - } - s.fieldNames = append(s.fieldNames, fieldNames...) - return s -} - -// FieldDataField adds a single field to load from the field data cache -// and return as part of the search request. -func (s *SearchSource) FieldDataField(fieldDataField string) *SearchSource { - s.fieldDataFields = append(s.fieldDataFields, fieldDataField) - return s -} - -// FieldDataFields adds one or more fields to load from the field data cache -// and return as part of the search request. -func (s *SearchSource) FieldDataFields(fieldDataFields ...string) *SearchSource { - s.fieldDataFields = append(s.fieldDataFields, fieldDataFields...) - return s -} - -// ScriptField adds a single script field with the provided script. -func (s *SearchSource) ScriptField(scriptField *ScriptField) *SearchSource { - s.scriptFields = append(s.scriptFields, scriptField) - return s -} - -// ScriptFields adds one or more script fields with the provided scripts. -func (s *SearchSource) ScriptFields(scriptFields ...*ScriptField) *SearchSource { - s.scriptFields = append(s.scriptFields, scriptFields...) - return s -} - -// IndexBoost sets the boost that a specific index will receive when the -// query is executed against it. -func (s *SearchSource) IndexBoost(index string, boost float64) *SearchSource { - s.indexBoosts[index] = boost - return s -} - -// Stats group this request will be aggregated under. -func (s *SearchSource) Stats(statsGroup ...string) *SearchSource { - s.stats = append(s.stats, statsGroup...) - return s -} - -// InnerHit adds an inner hit to return with the result. -func (s *SearchSource) InnerHit(name string, innerHit *InnerHit) *SearchSource { - s.innerHits[name] = innerHit - return s -} - -// Source returns the serializable JSON for the source builder. -func (s *SearchSource) Source() (interface{}, error) { - source := make(map[string]interface{}) - - if s.from != -1 { - source["from"] = s.from - } - if s.size != -1 { - source["size"] = s.size - } - if s.timeout != "" { - source["timeout"] = s.timeout - } - if s.terminateAfter != nil { - source["terminate_after"] = *s.terminateAfter - } - if s.query != nil { - src, err := s.query.Source() - if err != nil { - return nil, err - } - source["query"] = src - } - if s.postQuery != nil { - src, err := s.postQuery.Source() - if err != nil { - return nil, err - } - source["post_filter"] = src - } - if s.minScore != nil { - source["min_score"] = *s.minScore - } - if s.version != nil { - source["version"] = *s.version - } - if s.explain != nil { - source["explain"] = *s.explain - } - if s.fetchSourceContext != nil { - src, err := s.fetchSourceContext.Source() - if err != nil { - return nil, err - } - source["_source"] = src - } - - if s.fieldNames != nil { - switch len(s.fieldNames) { - case 1: - source["fields"] = s.fieldNames[0] - default: - source["fields"] = s.fieldNames - } - } - - if len(s.fieldDataFields) > 0 { - source["fielddata_fields"] = s.fieldDataFields - } - - if len(s.scriptFields) > 0 { - sfmap := make(map[string]interface{}) - for _, scriptField := range s.scriptFields { - src, err := scriptField.Source() - if err != nil { - return nil, err - } - sfmap[scriptField.FieldName] = src - } - source["script_fields"] = sfmap - } - - if len(s.sorters) > 0 { - sortarr := make([]interface{}, 0) - for _, sorter := range s.sorters { - src, err := sorter.Source() - if err != nil { - return nil, err - } - sortarr = append(sortarr, src) - } - source["sort"] = sortarr - } else if len(s.sorts) > 0 { - sortarr := make([]interface{}, 0) - for _, sort := range s.sorts { - src, err := sort.Source() - if err != nil { - return nil, err - } - sortarr = append(sortarr, src) - } - source["sort"] = sortarr - } - - if s.trackScores { - source["track_scores"] = s.trackScores - } - - if len(s.indexBoosts) > 0 { - source["indices_boost"] = s.indexBoosts - } - - if len(s.aggregations) > 0 { - aggsMap := make(map[string]interface{}) - for name, aggregate := range s.aggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - source["aggregations"] = aggsMap - } - - if s.highlight != nil { - src, err := s.highlight.Source() - if err != nil { - return nil, err - } - source["highlight"] = src - } - - if len(s.suggesters) > 0 { - suggesters := make(map[string]interface{}) - for _, s := range s.suggesters { - src, err := s.Source(false) - if err != nil { - return nil, err - } - suggesters[s.Name()] = src - } - if s.globalSuggestText != "" { - suggesters["text"] = s.globalSuggestText - } - source["suggest"] = suggesters - } - - if len(s.rescores) > 0 { - // Strip empty rescores from request - rescores := make([]*Rescore, 0) - for _, r := range s.rescores { - if !r.IsEmpty() { - rescores = append(rescores, r) - } - } - - if len(rescores) == 1 { - rescores[0].defaultRescoreWindowSize = s.defaultRescoreWindowSize - src, err := rescores[0].Source() - if err != nil { - return nil, err - } - source["rescore"] = src - } else { - slice := make([]interface{}, 0) - for _, r := range rescores { - r.defaultRescoreWindowSize = s.defaultRescoreWindowSize - src, err := r.Source() - if err != nil { - return nil, err - } - slice = append(slice, src) - } - source["rescore"] = slice - } - } - - if len(s.stats) > 0 { - source["stats"] = s.stats - } - - if len(s.innerHits) > 0 { - // Top-level inner hits - // See http://www.elastic.co/guide/en/elasticsearch/reference/1.5/search-request-inner-hits.html#top-level-inner-hits - // "inner_hits": { - // "": { - // "": { - // "": { - // , - // [,"inner_hits" : { []+ } ]? - // } - // } - // }, - // [,"" : { ... } ]* - // } - m := make(map[string]interface{}) - for name, hit := range s.innerHits { - if hit.path != "" { - src, err := hit.Source() - if err != nil { - return nil, err - } - path := make(map[string]interface{}) - path[hit.path] = src - m[name] = map[string]interface{}{ - "path": path, - } - } else if hit.typ != "" { - src, err := hit.Source() - if err != nil { - return nil, err - } - typ := make(map[string]interface{}) - typ[hit.typ] = src - m[name] = map[string]interface{}{ - "type": typ, - } - } else { - // TODO the Java client throws here, because either path or typ must be specified - } - } - source["inner_hits"] = m - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_source_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_source_test.go deleted file mode 100644 index b5ddf61af..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_source_test.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestSearchSourceMatchAllQuery(t *testing.T) { - matchAllQ := NewMatchAllQuery() - builder := NewSearchSource().Query(matchAllQ) - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"query":{"match_all":{}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestSearchSourceNoFields(t *testing.T) { - matchAllQ := NewMatchAllQuery() - builder := NewSearchSource().Query(matchAllQ).NoFields() - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"fields":[],"query":{"match_all":{}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestSearchSourceFields(t *testing.T) { - matchAllQ := NewMatchAllQuery() - builder := NewSearchSource().Query(matchAllQ).Fields("message", "tags") - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"fields":["message","tags"],"query":{"match_all":{}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestSearchSourceFetchSourceDisabled(t *testing.T) { - matchAllQ := NewMatchAllQuery() - builder := NewSearchSource().Query(matchAllQ).FetchSource(false) - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"_source":false,"query":{"match_all":{}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestSearchSourceFetchSourceByWildcards(t *testing.T) { - matchAllQ := NewMatchAllQuery() - fsc := NewFetchSourceContext(true).Include("obj1.*", "obj2.*").Exclude("*.description") - builder := NewSearchSource().Query(matchAllQ).FetchSourceContext(fsc) - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"_source":{"excludes":["*.description"],"includes":["obj1.*","obj2.*"]},"query":{"match_all":{}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestSearchSourceFieldDataFields(t *testing.T) { - matchAllQ := NewMatchAllQuery() - builder := NewSearchSource().Query(matchAllQ).FieldDataFields("test1", "test2") - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"fielddata_fields":["test1","test2"],"query":{"match_all":{}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestSearchSourceScriptFields(t *testing.T) { - matchAllQ := NewMatchAllQuery() - sf1 := NewScriptField("test1", NewScript("doc['my_field_name'].value * 2")) - sf2 := NewScriptField("test2", NewScript("doc['my_field_name'].value * factor").Param("factor", 3.1415927)) - builder := NewSearchSource().Query(matchAllQ).ScriptFields(sf1, sf2) - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"query":{"match_all":{}},"script_fields":{"test1":{"script":"doc['my_field_name'].value * 2"},"test2":{"script":{"inline":"doc['my_field_name'].value * factor","params":{"factor":3.1415927}}}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestSearchSourcePostFilter(t *testing.T) { - matchAllQ := NewMatchAllQuery() - pf := NewTermQuery("tag", "important") - builder := NewSearchSource().Query(matchAllQ).PostFilter(pf) - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"post_filter":{"term":{"tag":"important"}},"query":{"match_all":{}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestSearchSourceHighlight(t *testing.T) { - matchAllQ := NewMatchAllQuery() - hl := NewHighlight().Field("content") - builder := NewSearchSource().Query(matchAllQ).Highlight(hl) - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"highlight":{"fields":{"content":{}}},"query":{"match_all":{}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestSearchSourceRescoring(t *testing.T) { - matchAllQ := NewMatchAllQuery() - rescorerQuery := NewMatchQuery("field1", "the quick brown fox").Type("phrase").Slop(2) - rescorer := NewQueryRescorer(rescorerQuery) - rescorer = rescorer.QueryWeight(0.7) - rescorer = rescorer.RescoreQueryWeight(1.2) - rescore := NewRescore().WindowSize(50).Rescorer(rescorer) - builder := NewSearchSource().Query(matchAllQ).Rescorer(rescore) - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"query":{"match_all":{}},"rescore":{"query":{"query_weight":0.7,"rescore_query":{"match":{"field1":{"query":"the quick brown fox","slop":2,"type":"phrase"}}},"rescore_query_weight":1.2},"window_size":50}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestSearchSourceIndexBoost(t *testing.T) { - matchAllQ := NewMatchAllQuery() - builder := NewSearchSource().Query(matchAllQ).IndexBoost("index1", 1.4).IndexBoost("index2", 1.3) - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"indices_boost":{"index1":1.4,"index2":1.3},"query":{"match_all":{}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestSearchSourceInnerHits(t *testing.T) { - matchAllQ := NewMatchAllQuery() - builder := NewSearchSource().Query(matchAllQ). - InnerHit("comments", NewInnerHit().Type("comment").Query(NewMatchQuery("user", "olivere"))). - InnerHit("views", NewInnerHit().Path("view")) - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"inner_hits":{"comments":{"type":{"comment":{"query":{"match":{"user":{"query":"olivere"}}}}}},"views":{"path":{"view":{}}}},"query":{"match_all":{}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_suggester_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_suggester_test.go deleted file mode 100644 index 02c552af2..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_suggester_test.go +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - _ "encoding/json" - _ "net/http" - "testing" -) - -func TestTermSuggester(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Match all should return all documents - all := NewMatchAllQuery() - - tsName := "my-suggestions" - ts := NewTermSuggester(tsName) - ts = ts.Text("Goolang") - ts = ts.Field("message") - - searchResult, err := client.Search(). - Index(testIndexName). - Query(all). - Suggester(ts). - Do() - if err != nil { - t.Fatal(err) - } - if searchResult.Suggest == nil { - t.Errorf("expected SearchResult.Suggest != nil; got nil") - } - mySuggestions, found := searchResult.Suggest[tsName] - if !found { - t.Errorf("expected to find SearchResult.Suggest[%s]; got false", tsName) - } - if mySuggestions == nil { - t.Errorf("expected SearchResult.Suggest[%s] != nil; got nil", tsName) - } - - if len(mySuggestions) != 1 { - t.Errorf("expected 1 suggestion; got %d", len(mySuggestions)) - } - mySuggestion := mySuggestions[0] - if mySuggestion.Text != "goolang" { - t.Errorf("expected Text = 'goolang'; got %s", mySuggestion.Text) - } - if mySuggestion.Offset != 0 { - t.Errorf("expected Offset = %d; got %d", 0, mySuggestion.Offset) - } - if mySuggestion.Length != 7 { - t.Errorf("expected Length = %d; got %d", 7, mySuggestion.Length) - } - if len(mySuggestion.Options) != 1 { - t.Errorf("expected 1 option; got %d", len(mySuggestion.Options)) - } - myOption := mySuggestion.Options[0] - if myOption.Text != "golang" { - t.Errorf("expected Text = 'golang'; got %s", myOption.Text) - } - if myOption.Score == float64(0.0) { - t.Errorf("expected Score != 0.0; got %v", myOption.Score) - } - if myOption.Freq == 0 { - t.Errorf("expected Freq != 0; got %v", myOption.Freq) - } -} - -func TestPhraseSuggester(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Match all should return all documents - all := NewMatchAllQuery() - - phraseSuggesterName := "my-suggestions" - ps := NewPhraseSuggester(phraseSuggesterName) - ps = ps.Text("Goolang") - ps = ps.Field("message") - - searchResult, err := client.Search(). - Index(testIndexName). - Query(all). - Suggester(ps). - Do() - if err != nil { - t.Fatal(err) - } - if searchResult.Suggest == nil { - t.Errorf("expected SearchResult.Suggest != nil; got nil") - } - mySuggestions, found := searchResult.Suggest[phraseSuggesterName] - if !found { - t.Errorf("expected to find SearchResult.Suggest[%s]; got false", phraseSuggesterName) - } - if mySuggestions == nil { - t.Errorf("expected SearchResult.Suggest[%s] != nil; got nil", phraseSuggesterName) - } - - if len(mySuggestions) != 1 { - t.Errorf("expected 1 suggestion; got %d", len(mySuggestions)) - } - mySuggestion := mySuggestions[0] - if mySuggestion.Text != "Goolang" { - t.Errorf("expected Text = 'Goolang'; got %s", mySuggestion.Text) - } - if mySuggestion.Offset != 0 { - t.Errorf("expected Offset = %d; got %d", 0, mySuggestion.Offset) - } - if mySuggestion.Length != 7 { - t.Errorf("expected Length = %d; got %d", 7, mySuggestion.Length) - } - /* - if len(mySuggestion.Options) != 1 { - t.Errorf("expected 1 option; got %d", len(mySuggestion.Options)) - } - myOption := mySuggestion.Options[0] - if myOption.Text != "golang" { - t.Errorf("expected Text = 'golang'; got %s", myOption.Text) - } - if myOption.Score == float64(0.0) { - t.Errorf("expected Score != 0.0; got %v", myOption.Score) - } - */ -} - -// TODO(oe): I get a "Completion suggester not supported" exception on 0.90.2?! -/* -func TestCompletionSuggester(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Match all should return all documents - all := NewMatchAllQuery() - - suggesterName := "my-suggestions" - cs := NewCompletionSuggester(suggesterName) - cs = cs.Text("Goolang") - cs = cs.Field("message") - - searchResult, err := client.Search(). - Index(testIndexName). - Query(&all). - Suggester(cs). - Do() - if err != nil { - t.Fatal(err) - } - if searchResult.Suggest == nil { - t.Errorf("expected SearchResult.Suggest != nil; got nil") - } - mySuggestions, found := searchResult.Suggest[suggesterName] - if !found { - t.Errorf("expected to find SearchResult.Suggest[%s]; got false") - } - if mySuggestions == nil { - t.Errorf("expected SearchResult.Suggest[%s] != nil; got nil", suggesterName) - } - - if len(mySuggestions) != 1 { - t.Errorf("expected 1 suggestion; got %d", len(mySuggestions)) - } - mySuggestion := mySuggestions[0] - if mySuggestion.Text != "Goolang" { - t.Errorf("expected Text = 'Goolang'; got %s", mySuggestion.Text) - } - if mySuggestion.Offset != 0 { - t.Errorf("expected Offset = %d; got %d", 0, mySuggestion.Offset) - } - if mySuggestion.Length != 7 { - t.Errorf("expected Length = %d; got %d", 7, mySuggestion.Length) - } - if len(mySuggestion.Options) != 1 { - t.Errorf("expected 1 option; got %d", len(mySuggestion.Options)) - } - myOption := mySuggestion.Options[0] - if myOption.Text != "golang" { - t.Errorf("expected Text = 'golang'; got %s", myOption.Text) - } - if myOption.Score == float64(0.0) { - t.Errorf("expected Score != 0.0; got %v", myOption.Score) - } -} -//*/ diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_template.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_template.go deleted file mode 100644 index 229a2712b..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_template.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// PutTemplateService creates or updates a search template. -// The documentation can be found at -// http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html. -type PutTemplateService struct { - client *Client - pretty bool - id string - opType string - version *int - versionType string - bodyJson interface{} - bodyString string -} - -// NewPutTemplateService creates a new PutTemplateService. -func NewPutTemplateService(client *Client) *PutTemplateService { - return &PutTemplateService{ - client: client, - } -} - -// Id is the template ID. -func (s *PutTemplateService) Id(id string) *PutTemplateService { - s.id = id - return s -} - -// OpType is an explicit operation type. -func (s *PutTemplateService) OpType(opType string) *PutTemplateService { - s.opType = opType - return s -} - -// Version is an explicit version number for concurrency control. -func (s *PutTemplateService) Version(version int) *PutTemplateService { - s.version = &version - return s -} - -// VersionType is a specific version type. -func (s *PutTemplateService) VersionType(versionType string) *PutTemplateService { - s.versionType = versionType - return s -} - -// BodyJson is the document as a JSON serializable object. -func (s *PutTemplateService) BodyJson(body interface{}) *PutTemplateService { - s.bodyJson = body - return s -} - -// BodyString is the document as a string. -func (s *PutTemplateService) BodyString(body string) *PutTemplateService { - s.bodyString = body - return s -} - -// buildURL builds the URL for the operation. -func (s *PutTemplateService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{ - "id": s.id, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.version != nil { - params.Set("version", fmt.Sprintf("%d", *s.version)) - } - if s.versionType != "" { - params.Set("version_type", s.versionType) - } - if s.opType != "" { - params.Set("op_type", s.opType) - } - - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *PutTemplateService) Validate() error { - var invalid []string - if s.id == "" { - invalid = append(invalid, "Id") - } - if s.bodyString == "" && s.bodyJson == nil { - invalid = append(invalid, "BodyJson") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *PutTemplateService) Do() (*PutTemplateResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else { - body = s.bodyString - } - - // Get HTTP response - res, err := s.client.PerformRequest("PUT", path, params, body) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(PutTemplateResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// PutTemplateResponse is the response of PutTemplateService.Do. -type PutTemplateResponse struct { - Id string `json:"_id"` - Version int `json:"_version"` - Created bool `json:"created"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_templates_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_templates_test.go deleted file mode 100644 index 3f8bbcb65..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_templates_test.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "testing" -) - -func TestSearchTemplatesLifecycle(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - // Template - tmpl := `{"template":{"query":{"match":{"title":"{{query_string}}"}}}}` - - // Create template - cresp, err := client.PutTemplate().Id("elastic-test").BodyString(tmpl).Do() - if err != nil { - t.Fatal(err) - } - if cresp == nil { - t.Fatalf("expected response != nil; got: %v", cresp) - } - if !cresp.Created { - t.Errorf("expected created = %v; got: %v", true, cresp.Created) - } - - // Get template - resp, err := client.GetTemplate().Id("elastic-test").Do() - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatalf("expected response != nil; got: %v", resp) - } - if resp.Template == "" { - t.Errorf("expected template != %q; got: %q", "", resp.Template) - } - - // Delete template - dresp, err := client.DeleteTemplate().Id("elastic-test").Do() - if err != nil { - t.Fatal(err) - } - if dresp == nil { - t.Fatalf("expected response != nil; got: %v", dresp) - } - if !dresp.Found { - t.Fatalf("expected found = %v; got: %v", true, dresp.Found) - } -} - -func TestSearchTemplatesInlineQuery(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Run query with (inline) search template - // See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-template-query.html - tq := NewTemplateQuery(`{"match_{{template}}": {}}`).Var("template", "all") - resp, err := client.Search(testIndexName).Query(tq).Do() - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatalf("expected response != nil; got: %v", resp) - } - if resp.Hits == nil { - t.Fatalf("expected response hits != nil; got: %v", resp.Hits) - } - if resp.Hits.TotalHits != 3 { - t.Fatalf("expected 3 hits; got: %d", resp.Hits.TotalHits) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/search_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/search_test.go deleted file mode 100644 index 43a6695ff..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/search_test.go +++ /dev/null @@ -1,885 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - _ "net/http" - "reflect" - "testing" - "time" -) - -func TestSearchMatchAll(t *testing.T) { - //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags))) - client := setupTestClientAndCreateIndexAndAddDocs(t) - - // Match all should return all documents - searchResult, err := client.Search(). - Index(testIndexName). - Query(NewMatchAllQuery()). - Size(100). - Pretty(true). - Do() - if err != nil { - t.Fatal(err) - } - if searchResult.Hits == nil { - t.Errorf("expected SearchResult.Hits != nil; got nil") - } - if got, want := searchResult.Hits.TotalHits, int64(12); got != want { - t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", want, got) - } - if got, want := len(searchResult.Hits.Hits), 12; got != want { - t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", want, got) - } - - for _, hit := range searchResult.Hits.Hits { - if hit.Index != testIndexName { - t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) - } - item := make(map[string]interface{}) - err := json.Unmarshal(*hit.Source, &item) - if err != nil { - t.Fatal(err) - } - } -} - -func BenchmarkSearchMatchAll(b *testing.B) { - client := setupTestClientAndCreateIndexAndAddDocs(b) - - for n := 0; n < b.N; n++ { - // Match all should return all documents - all := NewMatchAllQuery() - searchResult, err := client.Search().Index(testIndexName).Query(all).Do() - if err != nil { - b.Fatal(err) - } - if searchResult.Hits == nil { - b.Errorf("expected SearchResult.Hits != nil; got nil") - } - if searchResult.Hits.TotalHits != 4 { - b.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 4, searchResult.Hits.TotalHits) - } - if len(searchResult.Hits.Hits) != 4 { - b.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 4, len(searchResult.Hits.Hits)) - } - } -} - -func TestSearchResultTotalHits(t *testing.T) { - client := setupTestClientAndCreateIndexAndAddDocs(t) - - count, err := client.Count(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - all := NewMatchAllQuery() - searchResult, err := client.Search().Index(testIndexName).Query(all).Do() - if err != nil { - t.Fatal(err) - } - - got := searchResult.TotalHits() - if got != count { - t.Fatalf("expected %d hits; got: %d", count, got) - } - - // No hits - searchResult = &SearchResult{} - got = searchResult.TotalHits() - if got != 0 { - t.Errorf("expected %d hits; got: %d", 0, got) - } -} - -func TestSearchResultEach(t *testing.T) { - client := setupTestClientAndCreateIndexAndAddDocs(t) - - all := NewMatchAllQuery() - searchResult, err := client.Search().Index(testIndexName).Query(all).Do() - if err != nil { - t.Fatal(err) - } - - // Iterate over non-ptr type - var aTweet tweet - count := 0 - for _, item := range searchResult.Each(reflect.TypeOf(aTweet)) { - count++ - _, ok := item.(tweet) - if !ok { - t.Fatalf("expected hit to be serialized as tweet; got: %v", reflect.ValueOf(item)) - } - } - if count == 0 { - t.Errorf("expected to find some hits; got: %d", count) - } - - // Iterate over ptr-type - count = 0 - var aTweetPtr *tweet - for _, item := range searchResult.Each(reflect.TypeOf(aTweetPtr)) { - count++ - tw, ok := item.(*tweet) - if !ok { - t.Fatalf("expected hit to be serialized as tweet; got: %v", reflect.ValueOf(item)) - } - if tw == nil { - t.Fatal("expected hit to not be nil") - } - } - if count == 0 { - t.Errorf("expected to find some hits; got: %d", count) - } - - // Does not iterate when no hits are found - searchResult = &SearchResult{Hits: nil} - count = 0 - for _, item := range searchResult.Each(reflect.TypeOf(aTweet)) { - count++ - _ = item - } - if count != 0 { - t.Errorf("expected to not find any hits; got: %d", count) - } - searchResult = &SearchResult{Hits: &SearchHits{Hits: make([]*SearchHit, 0)}} - count = 0 - for _, item := range searchResult.Each(reflect.TypeOf(aTweet)) { - count++ - _ = item - } - if count != 0 { - t.Errorf("expected to not find any hits; got: %d", count) - } -} - -func TestSearchSorting(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{ - User: "olivere", Retweets: 108, - Message: "Welcome to Golang and Elasticsearch.", - Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), - } - tweet2 := tweet{ - User: "olivere", Retweets: 0, - Message: "Another unrelated topic.", - Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), - } - tweet3 := tweet{ - User: "sandrae", Retweets: 12, - Message: "Cycling is fun.", - Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), - } - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Match all should return all documents - all := NewMatchAllQuery() - searchResult, err := client.Search(). - Index(testIndexName). - Query(all). - Sort("created", false). - Timeout("1s"). - Do() - if err != nil { - t.Fatal(err) - } - if searchResult.Hits == nil { - t.Errorf("expected SearchResult.Hits != nil; got nil") - } - if searchResult.Hits.TotalHits != 3 { - t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) - } - if len(searchResult.Hits.Hits) != 3 { - t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits)) - } - - for _, hit := range searchResult.Hits.Hits { - if hit.Index != testIndexName { - t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) - } - item := make(map[string]interface{}) - err := json.Unmarshal(*hit.Source, &item) - if err != nil { - t.Fatal(err) - } - } -} - -func TestSearchSortingBySorters(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{ - User: "olivere", Retweets: 108, - Message: "Welcome to Golang and Elasticsearch.", - Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), - } - tweet2 := tweet{ - User: "olivere", Retweets: 0, - Message: "Another unrelated topic.", - Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), - } - tweet3 := tweet{ - User: "sandrae", Retweets: 12, - Message: "Cycling is fun.", - Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), - } - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Match all should return all documents - all := NewMatchAllQuery() - searchResult, err := client.Search(). - Index(testIndexName). - Query(all). - SortBy(NewFieldSort("created").Desc(), NewScoreSort()). - Timeout("1s"). - Do() - if err != nil { - t.Fatal(err) - } - if searchResult.Hits == nil { - t.Errorf("expected SearchResult.Hits != nil; got nil") - } - if searchResult.Hits.TotalHits != 3 { - t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) - } - if len(searchResult.Hits.Hits) != 3 { - t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits)) - } - - for _, hit := range searchResult.Hits.Hits { - if hit.Index != testIndexName { - t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) - } - item := make(map[string]interface{}) - err := json.Unmarshal(*hit.Source, &item) - if err != nil { - t.Fatal(err) - } - } -} - -func TestSearchSpecificFields(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Match all should return all documents - all := NewMatchAllQuery() - searchResult, err := client.Search(). - Index(testIndexName). - Query(all). - Fields("message"). - Do() - if err != nil { - t.Fatal(err) - } - if searchResult.Hits == nil { - t.Errorf("expected SearchResult.Hits != nil; got nil") - } - if searchResult.Hits.TotalHits != 3 { - t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) - } - if len(searchResult.Hits.Hits) != 3 { - t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits)) - } - - for _, hit := range searchResult.Hits.Hits { - if hit.Index != testIndexName { - t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) - } - if hit.Source != nil { - t.Fatalf("expected SearchResult.Hits.Hit.Source to be nil; got: %q", hit.Source) - } - if hit.Fields == nil { - t.Fatal("expected SearchResult.Hits.Hit.Fields to be != nil") - } - field, found := hit.Fields["message"] - if !found { - t.Errorf("expected SearchResult.Hits.Hit.Fields[%s] to be found", "message") - } - fields, ok := field.([]interface{}) - if !ok { - t.Errorf("expected []interface{}; got: %v", reflect.TypeOf(fields)) - } - if len(fields) != 1 { - t.Errorf("expected a field with 1 entry; got: %d", len(fields)) - } - message, ok := fields[0].(string) - if !ok { - t.Errorf("expected a string; got: %v", reflect.TypeOf(fields[0])) - } - if message == "" { - t.Errorf("expected a message; got: %q", message) - } - } -} - -func TestSearchExplain(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{ - User: "olivere", Retweets: 108, - Message: "Welcome to Golang and Elasticsearch.", - Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), - } - tweet2 := tweet{ - User: "olivere", Retweets: 0, - Message: "Another unrelated topic.", - Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), - } - tweet3 := tweet{ - User: "sandrae", Retweets: 12, - Message: "Cycling is fun.", - Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), - } - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Match all should return all documents - all := NewMatchAllQuery() - searchResult, err := client.Search(). - Index(testIndexName). - Query(all). - Explain(true). - Timeout("1s"). - // Pretty(true). - Do() - if err != nil { - t.Fatal(err) - } - if searchResult.Hits == nil { - t.Errorf("expected SearchResult.Hits != nil; got nil") - } - if searchResult.Hits.TotalHits != 3 { - t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) - } - if len(searchResult.Hits.Hits) != 3 { - t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits)) - } - - for _, hit := range searchResult.Hits.Hits { - if hit.Index != testIndexName { - t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index) - } - if hit.Explanation == nil { - t.Fatal("expected search explanation") - } - if hit.Explanation.Value <= 0.0 { - t.Errorf("expected explanation value to be > 0.0; got: %v", hit.Explanation.Value) - } - if hit.Explanation.Description == "" { - t.Errorf("expected explanation description != %q; got: %q", "", hit.Explanation.Description) - } - } -} - -func TestSearchSource(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{ - User: "olivere", Retweets: 108, - Message: "Welcome to Golang and Elasticsearch.", - Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), - } - tweet2 := tweet{ - User: "olivere", Retweets: 0, - Message: "Another unrelated topic.", - Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), - } - tweet3 := tweet{ - User: "sandrae", Retweets: 12, - Message: "Cycling is fun.", - Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), - } - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Set up the request JSON manually to pass to the search service via Source() - source := map[string]interface{}{ - "query": map[string]interface{}{ - "match_all": map[string]interface{}{}, - }, - } - - searchResult, err := client.Search(). - Index(testIndexName). - Source(source). // sets the JSON request - Do() - if err != nil { - t.Fatal(err) - } - if searchResult.Hits == nil { - t.Errorf("expected SearchResult.Hits != nil; got nil") - } - if searchResult.Hits.TotalHits != 3 { - t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) - } -} - -func TestSearchSearchSource(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{ - User: "olivere", Retweets: 108, - Message: "Welcome to Golang and Elasticsearch.", - Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), - } - tweet2 := tweet{ - User: "olivere", Retweets: 0, - Message: "Another unrelated topic.", - Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), - } - tweet3 := tweet{ - User: "sandrae", Retweets: 12, - Message: "Cycling is fun.", - Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), - } - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Set up the search source manually and pass it to the search service via SearchSource() - ss := NewSearchSource().Query(NewMatchAllQuery()).From(0).Size(2) - - // One can use ss.Source() to get to the raw interface{} that will be used - // as the search request JSON by the SearchService. - - searchResult, err := client.Search(). - Index(testIndexName). - SearchSource(ss). // sets the SearchSource - Do() - if err != nil { - t.Fatal(err) - } - if searchResult.Hits == nil { - t.Errorf("expected SearchResult.Hits != nil; got nil") - } - if searchResult.Hits.TotalHits != 3 { - t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) - } - if len(searchResult.Hits.Hits) != 2 { - t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 2, len(searchResult.Hits.Hits)) - } -} - -func TestSearchInnerHitsOnHasChild(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - // Check for valid ES version - esversion, err := client.ElasticsearchVersion(DefaultURL) - if err != nil { - t.Fatal(err) - } - if esversion < "1.5.0" { - t.Skip("InnerHits feature is only available for Elasticsearch 1.5+") - return - } - - tweet1 := tweet{ - User: "olivere", Retweets: 108, - Message: "Welcome to Golang and Elasticsearch.", - Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), - } - tweet2 := tweet{ - User: "olivere", Retweets: 0, - Message: "Another unrelated topic.", - Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), - } - comment2a := comment{User: "sandrae", Comment: "What does that even mean?"} - tweet3 := tweet{ - User: "sandrae", Retweets: 12, - Message: "Cycling is fun.", - Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), - } - comment3a := comment{User: "nico", Comment: "You bet."} - comment3b := comment{User: "olivere", Comment: "It sure is."} - - // Add all documents - _, err = client.Index().Index(testIndexName).Type("tweet").Id("t1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("t2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Index().Index(testIndexName).Type("comment").Id("c2a").Parent("t2").BodyJson(&comment2a).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("t3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Index().Index(testIndexName).Type("comment").Id("c3a").Parent("t3").BodyJson(&comment3a).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Index().Index(testIndexName).Type("comment").Id("c3b").Parent("t3").BodyJson(&comment3b).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - bq := NewBoolQuery() - bq = bq.Must(NewMatchAllQuery()) - bq = bq.Filter(NewHasChildQuery("comment", NewMatchAllQuery()). - InnerHit(NewInnerHit().Name("comments"))) - - searchResult, err := client.Search(). - Index(testIndexName). - Query(bq). - Pretty(true). - Do() - if err != nil { - t.Fatal(err) - } - if searchResult.Hits == nil { - t.Errorf("expected SearchResult.Hits != nil; got nil") - } - if searchResult.Hits.TotalHits != 2 { - t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 2, searchResult.Hits.TotalHits) - } - if len(searchResult.Hits.Hits) != 2 { - t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 2, len(searchResult.Hits.Hits)) - } - - hit := searchResult.Hits.Hits[0] - if hit.Id != "t2" { - t.Fatalf("expected tweet %q; got: %q", "t2", hit.Id) - } - if hit.InnerHits == nil { - t.Fatalf("expected inner hits; got: %v", hit.InnerHits) - } - if len(hit.InnerHits) != 1 { - t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits)) - } - innerHits, found := hit.InnerHits["comments"] - if !found { - t.Fatalf("expected inner hits for name %q", "comments") - } - if innerHits == nil || innerHits.Hits == nil { - t.Fatal("expected inner hits != nil") - } - if len(innerHits.Hits.Hits) != 1 { - t.Fatalf("expected %d inner hits; got: %d", 1, len(innerHits.Hits.Hits)) - } - if innerHits.Hits.Hits[0].Id != "c2a" { - t.Fatalf("expected inner hit with id %q; got: %q", "c2a", innerHits.Hits.Hits[0].Id) - } - - hit = searchResult.Hits.Hits[1] - if hit.Id != "t3" { - t.Fatalf("expected tweet %q; got: %q", "t3", hit.Id) - } - if hit.InnerHits == nil { - t.Fatalf("expected inner hits; got: %v", hit.InnerHits) - } - if len(hit.InnerHits) != 1 { - t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits)) - } - innerHits, found = hit.InnerHits["comments"] - if !found { - t.Fatalf("expected inner hits for name %q", "comments") - } - if innerHits == nil || innerHits.Hits == nil { - t.Fatal("expected inner hits != nil") - } - if len(innerHits.Hits.Hits) != 2 { - t.Fatalf("expected %d inner hits; got: %d", 2, len(innerHits.Hits.Hits)) - } - if innerHits.Hits.Hits[0].Id != "c3a" { - t.Fatalf("expected inner hit with id %q; got: %q", "c3a", innerHits.Hits.Hits[0].Id) - } - if innerHits.Hits.Hits[1].Id != "c3b" { - t.Fatalf("expected inner hit with id %q; got: %q", "c3b", innerHits.Hits.Hits[1].Id) - } -} - -func TestSearchInnerHitsOnHasParent(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - // Check for valid ES version - esversion, err := client.ElasticsearchVersion(DefaultURL) - if err != nil { - t.Fatal(err) - } - if esversion < "1.5.0" { - t.Skip("InnerHits feature is only available for Elasticsearch 1.5+") - return - } - - tweet1 := tweet{ - User: "olivere", Retweets: 108, - Message: "Welcome to Golang and Elasticsearch.", - Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC), - } - tweet2 := tweet{ - User: "olivere", Retweets: 0, - Message: "Another unrelated topic.", - Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC), - } - comment2a := comment{User: "sandrae", Comment: "What does that even mean?"} - tweet3 := tweet{ - User: "sandrae", Retweets: 12, - Message: "Cycling is fun.", - Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC), - } - comment3a := comment{User: "nico", Comment: "You bet."} - comment3b := comment{User: "olivere", Comment: "It sure is."} - - // Add all documents - _, err = client.Index().Index(testIndexName).Type("tweet").Id("t1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("t2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Index().Index(testIndexName).Type("comment").Id("c2a").Parent("t2").BodyJson(&comment2a).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("t3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Index().Index(testIndexName).Type("comment").Id("c3a").Parent("t3").BodyJson(&comment3a).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Index().Index(testIndexName).Type("comment").Id("c3b").Parent("t3").BodyJson(&comment3b).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - bq := NewBoolQuery() - bq = bq.Must(NewMatchAllQuery()) - bq = bq.Filter(NewHasParentQuery("tweet", NewMatchAllQuery()). - InnerHit(NewInnerHit().Name("tweets"))) - - searchResult, err := client.Search(). - Index(testIndexName). - Query(bq). - Pretty(true). - Do() - if err != nil { - t.Fatal(err) - } - if searchResult.Hits == nil { - t.Errorf("expected SearchResult.Hits != nil; got nil") - } - if searchResult.Hits.TotalHits != 3 { - t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits) - } - if len(searchResult.Hits.Hits) != 3 { - t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits)) - } - - hit := searchResult.Hits.Hits[0] - if hit.Id != "c2a" { - t.Fatalf("expected tweet %q; got: %q", "c2a", hit.Id) - } - if hit.InnerHits == nil { - t.Fatalf("expected inner hits; got: %v", hit.InnerHits) - } - if len(hit.InnerHits) != 1 { - t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits)) - } - innerHits, found := hit.InnerHits["tweets"] - if !found { - t.Fatalf("expected inner hits for name %q", "tweets") - } - if innerHits == nil || innerHits.Hits == nil { - t.Fatal("expected inner hits != nil") - } - if len(innerHits.Hits.Hits) != 1 { - t.Fatalf("expected %d inner hits; got: %d", 1, len(innerHits.Hits.Hits)) - } - if innerHits.Hits.Hits[0].Id != "t2" { - t.Fatalf("expected inner hit with id %q; got: %q", "t2", innerHits.Hits.Hits[0].Id) - } - - hit = searchResult.Hits.Hits[1] - if hit.Id != "c3a" { - t.Fatalf("expected tweet %q; got: %q", "c3a", hit.Id) - } - if hit.InnerHits == nil { - t.Fatalf("expected inner hits; got: %v", hit.InnerHits) - } - if len(hit.InnerHits) != 1 { - t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits)) - } - innerHits, found = hit.InnerHits["tweets"] - if !found { - t.Fatalf("expected inner hits for name %q", "tweets") - } - if innerHits == nil || innerHits.Hits == nil { - t.Fatal("expected inner hits != nil") - } - if len(innerHits.Hits.Hits) != 1 { - t.Fatalf("expected %d inner hits; got: %d", 1, len(innerHits.Hits.Hits)) - } - if innerHits.Hits.Hits[0].Id != "t3" { - t.Fatalf("expected inner hit with id %q; got: %q", "t3", innerHits.Hits.Hits[0].Id) - } - - hit = searchResult.Hits.Hits[2] - if hit.Id != "c3b" { - t.Fatalf("expected tweet %q; got: %q", "c3b", hit.Id) - } - if hit.InnerHits == nil { - t.Fatalf("expected inner hits; got: %v", hit.InnerHits) - } - if len(hit.InnerHits) != 1 { - t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits)) - } - innerHits, found = hit.InnerHits["tweets"] - if !found { - t.Fatalf("expected inner hits for name %q", "tweets") - } - if innerHits == nil || innerHits.Hits == nil { - t.Fatal("expected inner hits != nil") - } - if len(innerHits.Hits.Hits) != 1 { - t.Fatalf("expected %d inner hits; got: %d", 1, len(innerHits.Hits.Hits)) - } - if innerHits.Hits.Hits[0].Id != "t3" { - t.Fatalf("expected inner hit with id %q; got: %q", "t3", innerHits.Hits.Hits[0].Id) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/setup_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/setup_test.go deleted file mode 100644 index 97af2bb27..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/setup_test.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "fmt" - "log" - "math/rand" - "os" - "time" -) - -const ( - testIndexName = "elastic-test" - testIndexName2 = "elastic-test2" - testMapping = ` -{ - "settings":{ - "number_of_shards":1, - "number_of_replicas":0 - }, - "mappings":{ - "_default_": { - "_timestamp": { - "enabled": true - }, - "_ttl": { - "enabled": true - } - }, - "tweet":{ - "properties":{ - "tags":{ - "type":"string" - }, - "location":{ - "type":"geo_point" - }, - "suggest_field":{ - "type":"completion", - "payloads":true - } - } - }, - "comment":{ - "_parent": { - "type": "tweet" - } - }, - "order":{ - "properties":{ - "article":{ - "type":"string" - }, - "manufacturer":{ - "type":"string", - "index" : "not_analyzed" - }, - "price":{ - "type":"float" - }, - "time":{ - "type":"date", - "format": "YYYY-MM-dd" - } - } - } - } -} -` -) - -type tweet struct { - User string `json:"user"` - Message string `json:"message"` - Retweets int `json:"retweets"` - Image string `json:"image,omitempty"` - Created time.Time `json:"created,omitempty"` - Tags []string `json:"tags,omitempty"` - Location string `json:"location,omitempty"` - Suggest *SuggestField `json:"suggest_field,omitempty"` -} - -func (t tweet) String() string { - return fmt.Sprintf("tweet{User:%q,Message:%q,Retweets:%d}", t.User, t.Message, t.Retweets) -} - -type comment struct { - User string `json:"user"` - Comment string `json:"comment"` - Created time.Time `json:"created,omitempty"` -} - -func (c comment) String() string { - return fmt.Sprintf("comment{User:%q,Comment:%q}", c.User, c.Comment) -} - -type order struct { - Article string `json:"article"` - Manufacturer string `json:"manufacturer"` - Price float64 `json:"price"` - Time string `json:"time,omitempty"` -} - -func (o order) String() string { - return fmt.Sprintf("order{Article:%q,Manufacturer:%q,Price:%v,Time:%v}", o.Article, o.Manufacturer, o.Price, o.Time) -} - -func isTravis() bool { - return os.Getenv("TRAVIS") != "" -} - -func travisGoVersion() string { - return os.Getenv("TRAVIS_GO_VERSION") -} - -type logger interface { - Error(args ...interface{}) - Errorf(format string, args ...interface{}) - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Fail() - FailNow() - Log(args ...interface{}) - Logf(format string, args ...interface{}) -} - -func setupTestClient(t logger, options ...ClientOptionFunc) (client *Client) { - var err error - - client, err = NewClient(options...) - if err != nil { - t.Fatal(err) - } - - client.DeleteIndex(testIndexName).Do() - client.DeleteIndex(testIndexName2).Do() - - return client -} - -func setupTestClientAndCreateIndex(t logger, options ...ClientOptionFunc) *Client { - client := setupTestClient(t, options...) - - // Create index - createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do() - if err != nil { - t.Fatal(err) - } - if createIndex == nil { - t.Errorf("expected result to be != nil; got: %v", createIndex) - } - - // Create second index - createIndex2, err := client.CreateIndex(testIndexName2).Body(testMapping).Do() - if err != nil { - t.Fatal(err) - } - if createIndex2 == nil { - t.Errorf("expected result to be != nil; got: %v", createIndex2) - } - - return client -} - -func setupTestClientAndCreateIndexAndLog(t logger, options ...ClientOptionFunc) *Client { - return setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0))) -} - -func setupTestClientAndCreateIndexAndAddDocs(t logger, options ...ClientOptionFunc) *Client { - client := setupTestClientAndCreateIndex(t, options...) - - // Add tweets - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."} - tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."} - comment1 := comment{User: "nico", Comment: "You bet."} - - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").Routing("someroutingkey").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - _, err = client.Index().Index(testIndexName).Type("comment").Id("1").Parent("3").BodyJson(&comment1).Do() - if err != nil { - t.Fatal(err) - } - - // Add orders - var orders []order - orders = append(orders, order{Article: "Apple MacBook", Manufacturer: "Apple", Price: 1290, Time: "2015-01-18"}) - orders = append(orders, order{Article: "Paper", Manufacturer: "Canon", Price: 100, Time: "2015-03-01"}) - orders = append(orders, order{Article: "Apple iPad", Manufacturer: "Apple", Price: 499, Time: "2015-04-12"}) - orders = append(orders, order{Article: "Dell XPS 13", Manufacturer: "Dell", Price: 1600, Time: "2015-04-18"}) - orders = append(orders, order{Article: "Apple Watch", Manufacturer: "Apple", Price: 349, Time: "2015-04-29"}) - orders = append(orders, order{Article: "Samsung TV", Manufacturer: "Samsung", Price: 790, Time: "2015-05-03"}) - orders = append(orders, order{Article: "Hoodie", Manufacturer: "h&m", Price: 49, Time: "2015-06-03"}) - orders = append(orders, order{Article: "T-Shirt", Manufacturer: "h&m", Price: 19, Time: "2015-06-18"}) - for i, o := range orders { - id := fmt.Sprintf("%d", i) - _, err = client.Index().Index(testIndexName).Type("order").Id(id).BodyJson(&o).Do() - if err != nil { - t.Fatal(err) - } - } - - // Flush - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - return client -} - -var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") - -func randomString(n int) string { - b := make([]rune, n) - for i := range b { - b[i] = letters[rand.Intn(len(letters))] - } - return string(b) -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/sort.go b/services/templeton/vendor/src/github.com/olivere/elastic/sort.go deleted file mode 100644 index 4c845c505..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/sort.go +++ /dev/null @@ -1,480 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "errors" - -// -- Sorter -- - -// Sorter is an interface for sorting strategies, e.g. ScoreSort or FieldSort. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html. -type Sorter interface { - Source() (interface{}, error) -} - -// -- SortInfo -- - -// SortInfo contains information about sorting a field. -type SortInfo struct { - Sorter - Field string - Ascending bool - Missing interface{} - IgnoreUnmapped *bool - SortMode string - NestedFilter Query - NestedPath string -} - -func (info SortInfo) Source() (interface{}, error) { - prop := make(map[string]interface{}) - if info.Ascending { - prop["order"] = "asc" - } else { - prop["order"] = "desc" - } - if info.Missing != nil { - prop["missing"] = info.Missing - } - if info.IgnoreUnmapped != nil { - prop["ignore_unmapped"] = *info.IgnoreUnmapped - } - if info.SortMode != "" { - prop["sort_mode"] = info.SortMode - } - if info.NestedFilter != nil { - prop["nested_filter"] = info.NestedFilter - } - if info.NestedPath != "" { - prop["nested_path"] = info.NestedPath - } - source := make(map[string]interface{}) - source[info.Field] = prop - return source, nil -} - -// -- ScoreSort -- - -// ScoreSort sorts by relevancy score. -type ScoreSort struct { - Sorter - ascending bool -} - -// NewScoreSort creates a new ScoreSort. -func NewScoreSort() ScoreSort { - return ScoreSort{ascending: false} // Descending by default! -} - -// Order defines whether sorting ascending (default) or descending. -func (s ScoreSort) Order(ascending bool) ScoreSort { - s.ascending = ascending - return s -} - -// Asc sets ascending sort order. -func (s ScoreSort) Asc() ScoreSort { - s.ascending = true - return s -} - -// Desc sets descending sort order. -func (s ScoreSort) Desc() ScoreSort { - s.ascending = false - return s -} - -// Source returns the JSON-serializable data. -func (s ScoreSort) Source() (interface{}, error) { - source := make(map[string]interface{}) - x := make(map[string]interface{}) - source["_score"] = x - if s.ascending { - x["reverse"] = true - } - return source, nil -} - -// -- FieldSort -- - -// FieldSort sorts by a given field. -type FieldSort struct { - Sorter - fieldName string - ascending bool - missing interface{} - ignoreUnmapped *bool - unmappedType *string - sortMode *string - nestedFilter Query - nestedPath *string -} - -// NewFieldSort creates a new FieldSort. -func NewFieldSort(fieldName string) FieldSort { - return FieldSort{ - fieldName: fieldName, - ascending: true, - } -} - -// FieldName specifies the name of the field to be used for sorting. -func (s FieldSort) FieldName(fieldName string) FieldSort { - s.fieldName = fieldName - return s -} - -// Order defines whether sorting ascending (default) or descending. -func (s FieldSort) Order(ascending bool) FieldSort { - s.ascending = ascending - return s -} - -// Asc sets ascending sort order. -func (s FieldSort) Asc() FieldSort { - s.ascending = true - return s -} - -// Desc sets descending sort order. -func (s FieldSort) Desc() FieldSort { - s.ascending = false - return s -} - -// Missing sets the value to be used when a field is missing in a document. -// You can also use "_last" or "_first" to sort missing last or first -// respectively. -func (s FieldSort) Missing(missing interface{}) FieldSort { - s.missing = missing - return s -} - -// IgnoreUnmapped specifies what happens if the field does not exist in -// the index. Set it to true to ignore, or set it to false to not ignore (default). -func (s FieldSort) IgnoreUnmapped(ignoreUnmapped bool) FieldSort { - s.ignoreUnmapped = &ignoreUnmapped - return s -} - -// UnmappedType sets the type to use when the current field is not mapped -// in an index. -func (s FieldSort) UnmappedType(typ string) FieldSort { - s.unmappedType = &typ - return s -} - -// SortMode specifies what values to pick in case a document contains -// multiple values for the targeted sort field. Possible values are: -// min, max, sum, and avg. -func (s FieldSort) SortMode(sortMode string) FieldSort { - s.sortMode = &sortMode - return s -} - -// NestedFilter sets a filter that nested objects should match with -// in order to be taken into account for sorting. -func (s FieldSort) NestedFilter(nestedFilter Query) FieldSort { - s.nestedFilter = nestedFilter - return s -} - -// NestedPath is used if sorting occurs on a field that is inside a -// nested object. -func (s FieldSort) NestedPath(nestedPath string) FieldSort { - s.nestedPath = &nestedPath - return s -} - -// Source returns the JSON-serializable data. -func (s FieldSort) Source() (interface{}, error) { - source := make(map[string]interface{}) - x := make(map[string]interface{}) - source[s.fieldName] = x - if s.ascending { - x["order"] = "asc" - } else { - x["order"] = "desc" - } - if s.missing != nil { - x["missing"] = s.missing - } - if s.ignoreUnmapped != nil { - x["ignore_unmapped"] = *s.ignoreUnmapped - } - if s.unmappedType != nil { - x["unmapped_type"] = *s.unmappedType - } - if s.sortMode != nil { - x["mode"] = *s.sortMode - } - if s.nestedFilter != nil { - src, err := s.nestedFilter.Source() - if err != nil { - return nil, err - } - x["nested_filter"] = src - } - if s.nestedPath != nil { - x["nested_path"] = *s.nestedPath - } - return source, nil -} - -// -- GeoDistanceSort -- - -// GeoDistanceSort allows for sorting by geographic distance. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html#_geo_distance_sorting. -type GeoDistanceSort struct { - Sorter - fieldName string - points []*GeoPoint - geohashes []string - geoDistance *string - unit string - ascending bool - sortMode *string - nestedFilter Query - nestedPath *string -} - -// NewGeoDistanceSort creates a new sorter for geo distances. -func NewGeoDistanceSort(fieldName string) GeoDistanceSort { - return GeoDistanceSort{ - fieldName: fieldName, - points: make([]*GeoPoint, 0), - geohashes: make([]string, 0), - ascending: true, - } -} - -// FieldName specifies the name of the (geo) field to use for sorting. -func (s GeoDistanceSort) FieldName(fieldName string) GeoDistanceSort { - s.fieldName = fieldName - return s -} - -// Order defines whether sorting ascending (default) or descending. -func (s GeoDistanceSort) Order(ascending bool) GeoDistanceSort { - s.ascending = ascending - return s -} - -// Asc sets ascending sort order. -func (s GeoDistanceSort) Asc() GeoDistanceSort { - s.ascending = true - return s -} - -// Desc sets descending sort order. -func (s GeoDistanceSort) Desc() GeoDistanceSort { - s.ascending = false - return s -} - -// Point specifies a point to create the range distance aggregations from. -func (s GeoDistanceSort) Point(lat, lon float64) GeoDistanceSort { - s.points = append(s.points, GeoPointFromLatLon(lat, lon)) - return s -} - -// Points specifies the geo point(s) to create the range distance aggregations from. -func (s GeoDistanceSort) Points(points ...*GeoPoint) GeoDistanceSort { - s.points = append(s.points, points...) - return s -} - -// GeoHashes specifies the geo point to create the range distance aggregations from. -func (s GeoDistanceSort) GeoHashes(geohashes ...string) GeoDistanceSort { - s.geohashes = append(s.geohashes, geohashes...) - return s -} - -// GeoDistance represents how to compute the distance. -// It can be sloppy_arc (default), arc, or plane. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html#_geo_distance_sorting. -func (s GeoDistanceSort) GeoDistance(geoDistance string) GeoDistanceSort { - s.geoDistance = &geoDistance - return s -} - -// Unit specifies the distance unit to use. It defaults to km. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/common-options.html#distance-units -// for details. -func (s GeoDistanceSort) Unit(unit string) GeoDistanceSort { - s.unit = unit - return s -} - -// SortMode specifies what values to pick in case a document contains -// multiple values for the targeted sort field. Possible values are: -// min, max, sum, and avg. -func (s GeoDistanceSort) SortMode(sortMode string) GeoDistanceSort { - s.sortMode = &sortMode - return s -} - -// NestedFilter sets a filter that nested objects should match with -// in order to be taken into account for sorting. -func (s GeoDistanceSort) NestedFilter(nestedFilter Query) GeoDistanceSort { - s.nestedFilter = nestedFilter - return s -} - -// NestedPath is used if sorting occurs on a field that is inside a -// nested object. -func (s GeoDistanceSort) NestedPath(nestedPath string) GeoDistanceSort { - s.nestedPath = &nestedPath - return s -} - -// Source returns the JSON-serializable data. -func (s GeoDistanceSort) Source() (interface{}, error) { - source := make(map[string]interface{}) - x := make(map[string]interface{}) - source["_geo_distance"] = x - - // Points - ptarr := make([]interface{}, 0) - for _, pt := range s.points { - ptarr = append(ptarr, pt.Source()) - } - for _, geohash := range s.geohashes { - ptarr = append(ptarr, geohash) - } - x[s.fieldName] = ptarr - - if s.unit != "" { - x["unit"] = s.unit - } - if s.geoDistance != nil { - x["distance_type"] = *s.geoDistance - } - - if !s.ascending { - x["reverse"] = true - } - if s.sortMode != nil { - x["mode"] = *s.sortMode - } - if s.nestedFilter != nil { - src, err := s.nestedFilter.Source() - if err != nil { - return nil, err - } - x["nested_filter"] = src - } - if s.nestedPath != nil { - x["nested_path"] = *s.nestedPath - } - return source, nil -} - -// -- ScriptSort -- - -// ScriptSort sorts by a custom script. See -// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/modules-scripting.html#modules-scripting -// for details about scripting. -type ScriptSort struct { - Sorter - script *Script - typ string - ascending bool - sortMode *string - nestedFilter Query - nestedPath *string -} - -// NewScriptSort creates and initializes a new ScriptSort. -// You must provide a script and a type, e.g. "string" or "number". -func NewScriptSort(script *Script, typ string) ScriptSort { - return ScriptSort{ - script: script, - typ: typ, - ascending: true, - } -} - -// Type sets the script type, which can be either "string" or "number". -func (s ScriptSort) Type(typ string) ScriptSort { - s.typ = typ - return s -} - -// Order defines whether sorting ascending (default) or descending. -func (s ScriptSort) Order(ascending bool) ScriptSort { - s.ascending = ascending - return s -} - -// Asc sets ascending sort order. -func (s ScriptSort) Asc() ScriptSort { - s.ascending = true - return s -} - -// Desc sets descending sort order. -func (s ScriptSort) Desc() ScriptSort { - s.ascending = false - return s -} - -// SortMode specifies what values to pick in case a document contains -// multiple values for the targeted sort field. Possible values are: -// min or max. -func (s ScriptSort) SortMode(sortMode string) ScriptSort { - s.sortMode = &sortMode - return s -} - -// NestedFilter sets a filter that nested objects should match with -// in order to be taken into account for sorting. -func (s ScriptSort) NestedFilter(nestedFilter Query) ScriptSort { - s.nestedFilter = nestedFilter - return s -} - -// NestedPath is used if sorting occurs on a field that is inside a -// nested object. -func (s ScriptSort) NestedPath(nestedPath string) ScriptSort { - s.nestedPath = &nestedPath - return s -} - -// Source returns the JSON-serializable data. -func (s ScriptSort) Source() (interface{}, error) { - if s.script == nil { - return nil, errors.New("ScriptSort expected a script") - } - source := make(map[string]interface{}) - x := make(map[string]interface{}) - source["_script"] = x - - src, err := s.script.Source() - if err != nil { - return nil, err - } - x["script"] = src - - x["type"] = s.typ - - if !s.ascending { - x["reverse"] = true - } - if s.sortMode != nil { - x["mode"] = *s.sortMode - } - if s.nestedFilter != nil { - src, err := s.nestedFilter.Source() - if err != nil { - return nil, err - } - x["nested_filter"] = src - } - if s.nestedPath != nil { - x["nested_path"] = *s.nestedPath - } - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/sort_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/sort_test.go deleted file mode 100644 index a0f9ddfc8..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/sort_test.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestSortInfo(t *testing.T) { - builder := SortInfo{Field: "grade", Ascending: false} - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"grade":{"order":"desc"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestScoreSort(t *testing.T) { - builder := NewScoreSort() - if builder.ascending != false { - t.Error("expected score sorter to be ascending by default") - } - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"_score":{}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestScoreSortOrderAscending(t *testing.T) { - builder := NewScoreSort().Asc() - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"_score":{"reverse":true}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestScoreSortOrderDescending(t *testing.T) { - builder := NewScoreSort().Desc() - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"_score":{}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestFieldSort(t *testing.T) { - builder := NewFieldSort("grade") - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"grade":{"order":"asc"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestFieldSortOrderDesc(t *testing.T) { - builder := NewFieldSort("grade").Desc() - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"grade":{"order":"desc"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestFieldSortComplex(t *testing.T) { - builder := NewFieldSort("price").Desc(). - SortMode("avg"). - Missing("_last"). - UnmappedType("product"). - NestedFilter(NewTermQuery("product.color", "blue")). - NestedPath("variant") - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"price":{"missing":"_last","mode":"avg","nested_filter":{"term":{"product.color":"blue"}},"nested_path":"variant","order":"desc","unmapped_type":"product"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestGeoDistanceSort(t *testing.T) { - builder := NewGeoDistanceSort("pin.location"). - Point(-70, 40). - Order(true). - Unit("km"). - SortMode("min"). - GeoDistance("sloppy_arc") - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"_geo_distance":{"distance_type":"sloppy_arc","mode":"min","pin.location":[{"lat":-70,"lon":40}],"unit":"km"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestGeoDistanceSortOrderDesc(t *testing.T) { - builder := NewGeoDistanceSort("pin.location"). - Point(-70, 40). - Unit("km"). - SortMode("min"). - GeoDistance("sloppy_arc"). - Desc() - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"_geo_distance":{"distance_type":"sloppy_arc","mode":"min","pin.location":[{"lat":-70,"lon":40}],"reverse":true,"unit":"km"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} -func TestScriptSort(t *testing.T) { - builder := NewScriptSort(NewScript("doc['field_name'].value * factor").Param("factor", 1.1), "number").Order(true) - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"_script":{"script":{"inline":"doc['field_name'].value * factor","params":{"factor":1.1}},"type":"number"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestScriptSortOrderDesc(t *testing.T) { - builder := NewScriptSort(NewScript("doc['field_name'].value * factor").Param("factor", 1.1), "number").Desc() - src, err := builder.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"_script":{"reverse":true,"script":{"inline":"doc['field_name'].value * factor","params":{"factor":1.1}},"type":"number"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggest.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggest.go deleted file mode 100644 index 1fb48ac0b..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/suggest.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// SuggestService returns suggestions for text. -type SuggestService struct { - client *Client - pretty bool - routing string - preference string - indices []string - suggesters []Suggester -} - -func NewSuggestService(client *Client) *SuggestService { - builder := &SuggestService{ - client: client, - indices: make([]string, 0), - suggesters: make([]Suggester, 0), - } - return builder -} - -func (s *SuggestService) Index(indices ...string) *SuggestService { - s.indices = append(s.indices, indices...) - return s -} - -func (s *SuggestService) Pretty(pretty bool) *SuggestService { - s.pretty = pretty - return s -} - -func (s *SuggestService) Routing(routing string) *SuggestService { - s.routing = routing - return s -} - -func (s *SuggestService) Preference(preference string) *SuggestService { - s.preference = preference - return s -} - -func (s *SuggestService) Suggester(suggester Suggester) *SuggestService { - s.suggesters = append(s.suggesters, suggester) - return s -} - -func (s *SuggestService) Do() (SuggestResult, error) { - // Build url - path := "/" - - // Indices part - indexPart := make([]string, 0) - for _, index := range s.indices { - index, err := uritemplates.Expand("{index}", map[string]string{ - "index": index, - }) - if err != nil { - return nil, err - } - indexPart = append(indexPart, index) - } - path += strings.Join(indexPart, ",") - - // Suggest - path += "/_suggest" - - // Parameters - params := make(url.Values) - if s.pretty { - params.Set("pretty", fmt.Sprintf("%v", s.pretty)) - } - if s.routing != "" { - params.Set("routing", s.routing) - } - if s.preference != "" { - params.Set("preference", s.preference) - } - - // Set body - body := make(map[string]interface{}) - for _, s := range s.suggesters { - src, err := s.Source(false) - if err != nil { - return nil, err - } - body[s.Name()] = src - } - - // Get response - res, err := s.client.PerformRequest("POST", path, params, body) - if err != nil { - return nil, err - } - - // There is a _shard object that cannot be deserialized. - // So we use json.RawMessage instead. - var suggestions map[string]*json.RawMessage - if err := json.Unmarshal(res.Body, &suggestions); err != nil { - return nil, err - } - - ret := make(SuggestResult) - for name, result := range suggestions { - if name != "_shards" { - var s []Suggestion - if err := json.Unmarshal(*result, &s); err != nil { - return nil, err - } - ret[name] = s - } - } - - return ret, nil -} - -type SuggestResult map[string][]Suggestion - -type Suggestion struct { - Text string `json:"text"` - Offset int `json:"offset"` - Length int `json:"length"` - Options []suggestionOption `json:"options"` -} - -type suggestionOption struct { - Text string `json:"text"` - Score float64 `json:"score"` - Freq int `json:"freq"` - Payload interface{} `json:"payload"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggest_field.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggest_field.go deleted file mode 100644 index 4738d9910..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/suggest_field.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" -) - -// SuggestField can be used by the caller to specify a suggest field -// at index time. For a detailed example, see e.g. -// http://www.elasticsearch.org/blog/you-complete-me/. -type SuggestField struct { - inputs []string - output *string - payload interface{} - weight int - contextQueries []SuggesterContextQuery -} - -func NewSuggestField() *SuggestField { - return &SuggestField{weight: -1} -} - -func (f *SuggestField) Input(input ...string) *SuggestField { - if f.inputs == nil { - f.inputs = make([]string, 0) - } - f.inputs = append(f.inputs, input...) - return f -} - -func (f *SuggestField) Output(output string) *SuggestField { - f.output = &output - return f -} - -func (f *SuggestField) Payload(payload interface{}) *SuggestField { - f.payload = payload - return f -} - -func (f *SuggestField) Weight(weight int) *SuggestField { - f.weight = weight - return f -} - -func (f *SuggestField) ContextQuery(queries ...SuggesterContextQuery) *SuggestField { - f.contextQueries = append(f.contextQueries, queries...) - return f -} - -// MarshalJSON encodes SuggestField into JSON. -func (f *SuggestField) MarshalJSON() ([]byte, error) { - source := make(map[string]interface{}) - - if f.inputs != nil { - switch len(f.inputs) { - case 1: - source["input"] = f.inputs[0] - default: - source["input"] = f.inputs - } - } - - if f.output != nil { - source["output"] = *f.output - } - - if f.payload != nil { - source["payload"] = f.payload - } - - if f.weight >= 0 { - source["weight"] = f.weight - } - - switch len(f.contextQueries) { - case 0: - case 1: - src, err := f.contextQueries[0].Source() - if err != nil { - return nil, err - } - source["context"] = src - default: - var ctxq []interface{} - for _, query := range f.contextQueries { - src, err := query.Source() - if err != nil { - return nil, err - } - ctxq = append(ctxq, src) - } - source["context"] = ctxq - } - - return json.Marshal(source) -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggest_field_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggest_field_test.go deleted file mode 100644 index b01cf0af0..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/suggest_field_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestSuggestField(t *testing.T) { - field := NewSuggestField(). - Input("Welcome to Golang and Elasticsearch.", "Golang and Elasticsearch"). - Output("Golang and Elasticsearch: An introduction."). - Weight(1). - ContextQuery( - NewSuggesterCategoryMapping("color").FieldName("color_field").DefaultValues("red", "green", "blue"), - NewSuggesterGeoMapping("location").Precision("5m").Neighbors(true).DefaultLocations(GeoPointFromLatLon(52.516275, 13.377704)), - ) - data, err := json.Marshal(field) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"context":[{"color":{"default":["red","green","blue"],"path":"color_field","type":"category"}},{"location":{"default":{"lat":52.516275,"lon":13.377704},"neighbors":true,"precision":["5m"],"type":"geo"}}],"input":["Welcome to Golang and Elasticsearch.","Golang and Elasticsearch"],"output":"Golang and Elasticsearch: An introduction.","weight":1}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggest_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggest_test.go deleted file mode 100644 index 50a4a0952..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/suggest_test.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - _ "net/http" - "testing" -) - -func TestSuggestService(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{ - User: "olivere", - Message: "Welcome to Golang and Elasticsearch.", - Tags: []string{"golang", "elasticsearch"}, - Location: "48.1333,11.5667", // lat,lon - Suggest: NewSuggestField(). - Input("Welcome to Golang and Elasticsearch.", "Golang and Elasticsearch"). - Output("Golang and Elasticsearch: An introduction."). - Weight(0), - } - tweet2 := tweet{ - User: "olivere", - Message: "Another unrelated topic.", - Tags: []string{"golang"}, - Location: "48.1189,11.4289", // lat,lon - Suggest: NewSuggestField(). - Input("Another unrelated topic.", "Golang topic."). - Output("About Golang."). - Weight(1), - } - tweet3 := tweet{ - User: "sandrae", - Message: "Cycling is fun.", - Tags: []string{"sports", "cycling"}, - Location: "47.7167,11.7167", // lat,lon - Suggest: NewSuggestField(). - Input("Cycling is fun."). - Output("Cycling is a fun sport."), - } - - // Add all documents - _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do() - if err != nil { - t.Fatal(err) - } - - _, err = client.Flush().Index(testIndexName).Do() - if err != nil { - t.Fatal(err) - } - - // Test _suggest endpoint - termSuggesterName := "my-term-suggester" - termSuggester := NewTermSuggester(termSuggesterName).Text("Goolang").Field("message") - phraseSuggesterName := "my-phrase-suggester" - phraseSuggester := NewPhraseSuggester(phraseSuggesterName).Text("Goolang").Field("message") - completionSuggesterName := "my-completion-suggester" - completionSuggester := NewCompletionSuggester(completionSuggesterName).Text("Go").Field("suggest_field") - - result, err := client.Suggest(). - Index(testIndexName). - Suggester(termSuggester). - Suggester(phraseSuggester). - Suggester(completionSuggester). - Do() - if err != nil { - t.Fatal(err) - } - if result == nil { - t.Errorf("expected result != nil; got nil") - } - if len(result) != 3 { - t.Errorf("expected 3 suggester results; got %d", len(result)) - } - - termSuggestions, found := result[termSuggesterName] - if !found { - t.Errorf("expected to find Suggest[%s]; got false", termSuggesterName) - } - if termSuggestions == nil { - t.Errorf("expected Suggest[%s] != nil; got nil", termSuggesterName) - } - if len(termSuggestions) != 1 { - t.Errorf("expected 1 suggestion; got %d", len(termSuggestions)) - } - - phraseSuggestions, found := result[phraseSuggesterName] - if !found { - t.Errorf("expected to find Suggest[%s]; got false", phraseSuggesterName) - } - if phraseSuggestions == nil { - t.Errorf("expected Suggest[%s] != nil; got nil", phraseSuggesterName) - } - if len(phraseSuggestions) != 1 { - t.Errorf("expected 1 suggestion; got %d", len(phraseSuggestions)) - } - - completionSuggestions, found := result[completionSuggesterName] - if !found { - t.Errorf("expected to find Suggest[%s]; got false", completionSuggesterName) - } - if completionSuggestions == nil { - t.Errorf("expected Suggest[%s] != nil; got nil", completionSuggesterName) - } - if len(completionSuggestions) != 1 { - t.Errorf("expected 1 suggestion; got %d", len(completionSuggestions)) - } - if len(completionSuggestions[0].Options) != 2 { - t.Errorf("expected 2 suggestion options; got %d", len(completionSuggestions[0].Options)) - } - if completionSuggestions[0].Options[0].Text != "About Golang." { - t.Errorf("expected Suggest[%s][0].Options[0].Text == %q; got %q", completionSuggesterName, "About Golang.", completionSuggestions[0].Options[0].Text) - } - if completionSuggestions[0].Options[1].Text != "Golang and Elasticsearch: An introduction." { - t.Errorf("expected Suggest[%s][0].Options[1].Text == %q; got %q", completionSuggesterName, "Golang and Elasticsearch: An introduction.", completionSuggestions[0].Options[1].Text) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester.go deleted file mode 100644 index c342b10d3..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/suggester.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// Represents the generic suggester interface. -// A suggester's only purpose is to return the -// source of the query as a JSON-serializable -// object. Returning a map[string]interface{} -// will do. -type Suggester interface { - Name() string - Source(includeName bool) (interface{}, error) -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion.go deleted file mode 100644 index e0f5a3861..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// CompletionSuggester is a fast suggester for e.g. type-ahead completion. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-completion.html -// for more details. -type CompletionSuggester struct { - Suggester - name string - text string - field string - analyzer string - size *int - shardSize *int - contextQueries []SuggesterContextQuery -} - -// Creates a new completion suggester. -func NewCompletionSuggester(name string) *CompletionSuggester { - return &CompletionSuggester{ - name: name, - contextQueries: make([]SuggesterContextQuery, 0), - } -} - -func (q *CompletionSuggester) Name() string { - return q.name -} - -func (q *CompletionSuggester) Text(text string) *CompletionSuggester { - q.text = text - return q -} - -func (q *CompletionSuggester) Field(field string) *CompletionSuggester { - q.field = field - return q -} - -func (q *CompletionSuggester) Analyzer(analyzer string) *CompletionSuggester { - q.analyzer = analyzer - return q -} - -func (q *CompletionSuggester) Size(size int) *CompletionSuggester { - q.size = &size - return q -} - -func (q *CompletionSuggester) ShardSize(shardSize int) *CompletionSuggester { - q.shardSize = &shardSize - return q -} - -func (q *CompletionSuggester) ContextQuery(query SuggesterContextQuery) *CompletionSuggester { - q.contextQueries = append(q.contextQueries, query) - return q -} - -func (q *CompletionSuggester) ContextQueries(queries ...SuggesterContextQuery) *CompletionSuggester { - q.contextQueries = append(q.contextQueries, queries...) - return q -} - -// completionSuggesterRequest is necessary because the order in which -// the JSON elements are routed to Elasticsearch is relevant. -// We got into trouble when using plain maps because the text element -// needs to go before the completion element. -type completionSuggesterRequest struct { - Text string `json:"text"` - Completion interface{} `json:"completion"` -} - -// Creates the source for the completion suggester. -func (q *CompletionSuggester) Source(includeName bool) (interface{}, error) { - cs := &completionSuggesterRequest{} - - if q.text != "" { - cs.Text = q.text - } - - suggester := make(map[string]interface{}) - cs.Completion = suggester - - if q.analyzer != "" { - suggester["analyzer"] = q.analyzer - } - if q.field != "" { - suggester["field"] = q.field - } - if q.size != nil { - suggester["size"] = *q.size - } - if q.shardSize != nil { - suggester["shard_size"] = *q.shardSize - } - switch len(q.contextQueries) { - case 0: - case 1: - src, err := q.contextQueries[0].Source() - if err != nil { - return nil, err - } - suggester["context"] = src - default: - ctxq := make([]interface{}, 0) - for _, query := range q.contextQueries { - src, err := query.Source() - if err != nil { - return nil, err - } - ctxq = append(ctxq, src) - } - suggester["context"] = ctxq - } - - // TODO(oe) Add completion-suggester specific parameters here - - if !includeName { - return cs, nil - } - - source := make(map[string]interface{}) - source[q.name] = cs - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion_fuzzy.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion_fuzzy.go deleted file mode 100644 index 1c4455a61..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion_fuzzy.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// FuzzyFuzzyCompletionSuggester is a FuzzyCompletionSuggester that allows fuzzy -// completion. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-completion.html -// for details, and -// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-completion.html#fuzzy -// for details about the fuzzy completion suggester. -type FuzzyCompletionSuggester struct { - Suggester - name string - text string - field string - analyzer string - size *int - shardSize *int - contextQueries []SuggesterContextQuery - - fuzziness interface{} - fuzzyTranspositions *bool - fuzzyMinLength *int - fuzzyPrefixLength *int - unicodeAware *bool -} - -// Fuzziness defines the fuzziness which is used in FuzzyCompletionSuggester. -type Fuzziness struct { -} - -// Creates a new completion suggester. -func NewFuzzyCompletionSuggester(name string) *FuzzyCompletionSuggester { - return &FuzzyCompletionSuggester{ - name: name, - contextQueries: make([]SuggesterContextQuery, 0), - } -} - -func (q *FuzzyCompletionSuggester) Name() string { - return q.name -} - -func (q *FuzzyCompletionSuggester) Text(text string) *FuzzyCompletionSuggester { - q.text = text - return q -} - -func (q *FuzzyCompletionSuggester) Field(field string) *FuzzyCompletionSuggester { - q.field = field - return q -} - -func (q *FuzzyCompletionSuggester) Analyzer(analyzer string) *FuzzyCompletionSuggester { - q.analyzer = analyzer - return q -} - -func (q *FuzzyCompletionSuggester) Size(size int) *FuzzyCompletionSuggester { - q.size = &size - return q -} - -func (q *FuzzyCompletionSuggester) ShardSize(shardSize int) *FuzzyCompletionSuggester { - q.shardSize = &shardSize - return q -} - -func (q *FuzzyCompletionSuggester) ContextQuery(query SuggesterContextQuery) *FuzzyCompletionSuggester { - q.contextQueries = append(q.contextQueries, query) - return q -} - -func (q *FuzzyCompletionSuggester) ContextQueries(queries ...SuggesterContextQuery) *FuzzyCompletionSuggester { - q.contextQueries = append(q.contextQueries, queries...) - return q -} - -// Fuzziness defines the strategy used to describe what "fuzzy" actually -// means for the suggester, e.g. 1, 2, "0", "1..2", ">4", or "AUTO". -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/common-options.html#fuzziness -// for a detailed description. -func (q *FuzzyCompletionSuggester) Fuzziness(fuzziness interface{}) *FuzzyCompletionSuggester { - q.fuzziness = fuzziness - return q -} - -func (q *FuzzyCompletionSuggester) FuzzyTranspositions(fuzzyTranspositions bool) *FuzzyCompletionSuggester { - q.fuzzyTranspositions = &fuzzyTranspositions - return q -} - -func (q *FuzzyCompletionSuggester) FuzzyMinLength(minLength int) *FuzzyCompletionSuggester { - q.fuzzyMinLength = &minLength - return q -} - -func (q *FuzzyCompletionSuggester) FuzzyPrefixLength(prefixLength int) *FuzzyCompletionSuggester { - q.fuzzyPrefixLength = &prefixLength - return q -} - -func (q *FuzzyCompletionSuggester) UnicodeAware(unicodeAware bool) *FuzzyCompletionSuggester { - q.unicodeAware = &unicodeAware - return q -} - -// Creates the source for the completion suggester. -func (q *FuzzyCompletionSuggester) Source(includeName bool) (interface{}, error) { - cs := &completionSuggesterRequest{} - - if q.text != "" { - cs.Text = q.text - } - - suggester := make(map[string]interface{}) - cs.Completion = suggester - - if q.analyzer != "" { - suggester["analyzer"] = q.analyzer - } - if q.field != "" { - suggester["field"] = q.field - } - if q.size != nil { - suggester["size"] = *q.size - } - if q.shardSize != nil { - suggester["shard_size"] = *q.shardSize - } - switch len(q.contextQueries) { - case 0: - case 1: - src, err := q.contextQueries[0].Source() - if err != nil { - return nil, err - } - suggester["context"] = src - default: - ctxq := make([]interface{}, 0) - for _, query := range q.contextQueries { - src, err := query.Source() - if err != nil { - return nil, err - } - ctxq = append(ctxq, src) - } - suggester["context"] = ctxq - } - - // Fuzzy Completion Suggester fields - fuzzy := make(map[string]interface{}) - suggester["fuzzy"] = fuzzy - if q.fuzziness != nil { - fuzzy["fuzziness"] = q.fuzziness - } - if q.fuzzyTranspositions != nil { - fuzzy["transpositions"] = *q.fuzzyTranspositions - } - if q.fuzzyMinLength != nil { - fuzzy["min_length"] = *q.fuzzyMinLength - } - if q.fuzzyPrefixLength != nil { - fuzzy["prefix_length"] = *q.fuzzyPrefixLength - } - if q.unicodeAware != nil { - fuzzy["unicode_aware"] = *q.unicodeAware - } - - if !includeName { - return cs, nil - } - - source := make(map[string]interface{}) - source[q.name] = cs - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion_fuzzy_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion_fuzzy_test.go deleted file mode 100644 index 29fcba55f..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion_fuzzy_test.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestFuzzyCompletionSuggesterSource(t *testing.T) { - s := NewFuzzyCompletionSuggester("song-suggest"). - Text("n"). - Field("suggest"). - Fuzziness(2) - src, err := s.Source(true) - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"song-suggest":{"text":"n","completion":{"field":"suggest","fuzzy":{"fuzziness":2}}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestFuzzyCompletionSuggesterWithStringFuzzinessSource(t *testing.T) { - s := NewFuzzyCompletionSuggester("song-suggest"). - Text("n"). - Field("suggest"). - Fuzziness("1..4") - src, err := s.Source(true) - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"song-suggest":{"text":"n","completion":{"field":"suggest","fuzzy":{"fuzziness":"1..4"}}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion_test.go deleted file mode 100644 index 986d3da01..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_completion_test.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestCompletionSuggesterSource(t *testing.T) { - s := NewCompletionSuggester("song-suggest"). - Text("n"). - Field("suggest") - src, err := s.Source(true) - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"song-suggest":{"text":"n","completion":{"field":"suggest"}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context.go deleted file mode 100644 index 0903f2171..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// SuggesterContextQuery is used to define context information within -// a suggestion request. -type SuggesterContextQuery interface { - Source() (interface{}, error) -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_category.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_category.go deleted file mode 100644 index 4b8e43f88..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_category.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// -- SuggesterCategoryMapping -- - -// SuggesterCategoryMapping provides a mapping for a category context in a suggester. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_category_mapping. -type SuggesterCategoryMapping struct { - name string - fieldName string - defaultValues []string -} - -// NewSuggesterCategoryMapping creates a new SuggesterCategoryMapping. -func NewSuggesterCategoryMapping(name string) *SuggesterCategoryMapping { - return &SuggesterCategoryMapping{ - name: name, - defaultValues: make([]string, 0), - } -} - -func (q *SuggesterCategoryMapping) DefaultValues(values ...string) *SuggesterCategoryMapping { - q.defaultValues = append(q.defaultValues, values...) - return q -} - -func (q *SuggesterCategoryMapping) FieldName(fieldName string) *SuggesterCategoryMapping { - q.fieldName = fieldName - return q -} - -// Source returns a map that will be used to serialize the context query as JSON. -func (q *SuggesterCategoryMapping) Source() (interface{}, error) { - source := make(map[string]interface{}) - - x := make(map[string]interface{}) - source[q.name] = x - - x["type"] = "category" - - switch len(q.defaultValues) { - case 0: - x["default"] = q.defaultValues - case 1: - x["default"] = q.defaultValues[0] - default: - x["default"] = q.defaultValues - } - - if q.fieldName != "" { - x["path"] = q.fieldName - } - return source, nil -} - -// -- SuggesterCategoryQuery -- - -// SuggesterCategoryQuery provides querying a category context in a suggester. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_category_query. -type SuggesterCategoryQuery struct { - name string - values []string -} - -// NewSuggesterCategoryQuery creates a new SuggesterCategoryQuery. -func NewSuggesterCategoryQuery(name string, values ...string) *SuggesterCategoryQuery { - q := &SuggesterCategoryQuery{ - name: name, - values: make([]string, 0), - } - if len(values) > 0 { - q.values = append(q.values, values...) - } - return q -} - -func (q *SuggesterCategoryQuery) Values(values ...string) *SuggesterCategoryQuery { - q.values = append(q.values, values...) - return q -} - -// Source returns a map that will be used to serialize the context query as JSON. -func (q *SuggesterCategoryQuery) Source() (interface{}, error) { - source := make(map[string]interface{}) - - switch len(q.values) { - case 0: - source[q.name] = q.values - case 1: - source[q.name] = q.values[0] - default: - source[q.name] = q.values - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_category_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_category_test.go deleted file mode 100644 index 7ca045801..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_category_test.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestSuggesterCategoryMapping(t *testing.T) { - q := NewSuggesterCategoryMapping("color").DefaultValues("red") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"color":{"default":"red","type":"category"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestSuggesterCategoryMappingWithTwoDefaultValues(t *testing.T) { - q := NewSuggesterCategoryMapping("color").DefaultValues("red", "orange") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"color":{"default":["red","orange"],"type":"category"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestSuggesterCategoryMappingWithFieldName(t *testing.T) { - q := NewSuggesterCategoryMapping("color"). - DefaultValues("red", "orange"). - FieldName("color_field") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"color":{"default":["red","orange"],"path":"color_field","type":"category"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestSuggesterCategoryQuery(t *testing.T) { - q := NewSuggesterCategoryQuery("color", "red") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"color":"red"}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestSuggesterCategoryQueryWithTwoValues(t *testing.T) { - q := NewSuggesterCategoryQuery("color", "red", "yellow") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"color":["red","yellow"]}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_geo.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_geo.go deleted file mode 100644 index bde1a4067..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_geo.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// -- SuggesterGeoMapping -- - -// SuggesterGeoMapping provides a mapping for a geolocation context in a suggester. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_geo_location_mapping. -type SuggesterGeoMapping struct { - name string - defaultLocations []*GeoPoint - precision []string - neighbors *bool - fieldName string -} - -// NewSuggesterGeoMapping creates a new SuggesterGeoMapping. -func NewSuggesterGeoMapping(name string) *SuggesterGeoMapping { - return &SuggesterGeoMapping{ - name: name, - defaultLocations: make([]*GeoPoint, 0), - precision: make([]string, 0), - } -} - -func (q *SuggesterGeoMapping) DefaultLocations(locations ...*GeoPoint) *SuggesterGeoMapping { - q.defaultLocations = append(q.defaultLocations, locations...) - return q -} - -func (q *SuggesterGeoMapping) Precision(precision ...string) *SuggesterGeoMapping { - q.precision = append(q.precision, precision...) - return q -} - -func (q *SuggesterGeoMapping) Neighbors(neighbors bool) *SuggesterGeoMapping { - q.neighbors = &neighbors - return q -} - -func (q *SuggesterGeoMapping) FieldName(fieldName string) *SuggesterGeoMapping { - q.fieldName = fieldName - return q -} - -// Source returns a map that will be used to serialize the context query as JSON. -func (q *SuggesterGeoMapping) Source() (interface{}, error) { - source := make(map[string]interface{}) - - x := make(map[string]interface{}) - source[q.name] = x - - x["type"] = "geo" - - if len(q.precision) > 0 { - x["precision"] = q.precision - } - if q.neighbors != nil { - x["neighbors"] = *q.neighbors - } - - switch len(q.defaultLocations) { - case 0: - case 1: - x["default"] = q.defaultLocations[0].Source() - default: - arr := make([]interface{}, 0) - for _, p := range q.defaultLocations { - arr = append(arr, p.Source()) - } - x["default"] = arr - } - - if q.fieldName != "" { - x["path"] = q.fieldName - } - return source, nil -} - -// -- SuggesterGeoQuery -- - -// SuggesterGeoQuery provides querying a geolocation context in a suggester. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_geo_location_query -type SuggesterGeoQuery struct { - name string - location *GeoPoint - precision []string -} - -// NewSuggesterGeoQuery creates a new SuggesterGeoQuery. -func NewSuggesterGeoQuery(name string, location *GeoPoint) *SuggesterGeoQuery { - return &SuggesterGeoQuery{ - name: name, - location: location, - precision: make([]string, 0), - } -} - -func (q *SuggesterGeoQuery) Precision(precision ...string) *SuggesterGeoQuery { - q.precision = append(q.precision, precision...) - return q -} - -// Source returns a map that will be used to serialize the context query as JSON. -func (q *SuggesterGeoQuery) Source() (interface{}, error) { - source := make(map[string]interface{}) - - if len(q.precision) == 0 { - if q.location != nil { - source[q.name] = q.location.Source() - } - } else { - x := make(map[string]interface{}) - source[q.name] = x - - if q.location != nil { - x["value"] = q.location.Source() - } - - switch len(q.precision) { - case 0: - case 1: - x["precision"] = q.precision[0] - default: - x["precision"] = q.precision - } - } - - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_geo_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_geo_test.go deleted file mode 100644 index 331276dab..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_context_geo_test.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestSuggesterGeoMapping(t *testing.T) { - q := NewSuggesterGeoMapping("location"). - Precision("1km", "5m"). - Neighbors(true). - FieldName("pin"). - DefaultLocations(GeoPointFromLatLon(0.0, 0.0)) - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"location":{"default":{"lat":0,"lon":0},"neighbors":true,"path":"pin","precision":["1km","5m"],"type":"geo"}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestSuggesterGeoQuery(t *testing.T) { - q := NewSuggesterGeoQuery("location", GeoPointFromLatLon(11.5, 62.71)).Precision("1km") - src, err := q.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"location":{"precision":"1km","value":{"lat":11.5,"lon":62.71}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_phrase.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_phrase.go deleted file mode 100644 index 60c48d88b..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_phrase.go +++ /dev/null @@ -1,554 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// For more details, see -// http://www.elasticsearch.org/guide/reference/api/search/phrase-suggest/ -type PhraseSuggester struct { - Suggester - name string - text string - field string - analyzer string - size *int - shardSize *int - contextQueries []SuggesterContextQuery - - // fields specific to a phrase suggester - maxErrors *float64 - separator *string - realWordErrorLikelihood *float64 - confidence *float64 - generators map[string][]CandidateGenerator - gramSize *int - smoothingModel SmoothingModel - forceUnigrams *bool - tokenLimit *int - preTag, postTag *string - collateQuery *string - collateFilter *string - collatePreference *string - collateParams map[string]interface{} - collatePrune *bool -} - -// Creates a new phrase suggester. -func NewPhraseSuggester(name string) *PhraseSuggester { - return &PhraseSuggester{ - name: name, - contextQueries: make([]SuggesterContextQuery, 0), - collateParams: make(map[string]interface{}), - } -} - -func (q *PhraseSuggester) Name() string { - return q.name -} - -func (q *PhraseSuggester) Text(text string) *PhraseSuggester { - q.text = text - return q -} - -func (q *PhraseSuggester) Field(field string) *PhraseSuggester { - q.field = field - return q -} - -func (q *PhraseSuggester) Analyzer(analyzer string) *PhraseSuggester { - q.analyzer = analyzer - return q -} - -func (q *PhraseSuggester) Size(size int) *PhraseSuggester { - q.size = &size - return q -} - -func (q *PhraseSuggester) ShardSize(shardSize int) *PhraseSuggester { - q.shardSize = &shardSize - return q -} - -func (q *PhraseSuggester) ContextQuery(query SuggesterContextQuery) *PhraseSuggester { - q.contextQueries = append(q.contextQueries, query) - return q -} - -func (q *PhraseSuggester) ContextQueries(queries ...SuggesterContextQuery) *PhraseSuggester { - q.contextQueries = append(q.contextQueries, queries...) - return q -} - -func (q *PhraseSuggester) GramSize(gramSize int) *PhraseSuggester { - if gramSize >= 1 { - q.gramSize = &gramSize - } - return q -} - -func (q *PhraseSuggester) MaxErrors(maxErrors float64) *PhraseSuggester { - q.maxErrors = &maxErrors - return q -} - -func (q *PhraseSuggester) Separator(separator string) *PhraseSuggester { - q.separator = &separator - return q -} - -func (q *PhraseSuggester) RealWordErrorLikelihood(realWordErrorLikelihood float64) *PhraseSuggester { - q.realWordErrorLikelihood = &realWordErrorLikelihood - return q -} - -func (q *PhraseSuggester) Confidence(confidence float64) *PhraseSuggester { - q.confidence = &confidence - return q -} - -func (q *PhraseSuggester) CandidateGenerator(generator CandidateGenerator) *PhraseSuggester { - if q.generators == nil { - q.generators = make(map[string][]CandidateGenerator) - } - typ := generator.Type() - if _, found := q.generators[typ]; !found { - q.generators[typ] = make([]CandidateGenerator, 0) - } - q.generators[typ] = append(q.generators[typ], generator) - return q -} - -func (q *PhraseSuggester) CandidateGenerators(generators ...CandidateGenerator) *PhraseSuggester { - for _, g := range generators { - q = q.CandidateGenerator(g) - } - return q -} - -func (q *PhraseSuggester) ClearCandidateGenerator() *PhraseSuggester { - q.generators = nil - return q -} - -func (q *PhraseSuggester) ForceUnigrams(forceUnigrams bool) *PhraseSuggester { - q.forceUnigrams = &forceUnigrams - return q -} - -func (q *PhraseSuggester) SmoothingModel(smoothingModel SmoothingModel) *PhraseSuggester { - q.smoothingModel = smoothingModel - return q -} - -func (q *PhraseSuggester) TokenLimit(tokenLimit int) *PhraseSuggester { - q.tokenLimit = &tokenLimit - return q -} - -func (q *PhraseSuggester) Highlight(preTag, postTag string) *PhraseSuggester { - q.preTag = &preTag - q.postTag = &postTag - return q -} - -func (q *PhraseSuggester) CollateQuery(collateQuery string) *PhraseSuggester { - q.collateQuery = &collateQuery - return q -} - -func (q *PhraseSuggester) CollateFilter(collateFilter string) *PhraseSuggester { - q.collateFilter = &collateFilter - return q -} - -func (q *PhraseSuggester) CollatePreference(collatePreference string) *PhraseSuggester { - q.collatePreference = &collatePreference - return q -} - -func (q *PhraseSuggester) CollateParams(collateParams map[string]interface{}) *PhraseSuggester { - q.collateParams = collateParams - return q -} - -func (q *PhraseSuggester) CollatePrune(collatePrune bool) *PhraseSuggester { - q.collatePrune = &collatePrune - return q -} - -// simplePhraseSuggesterRequest is necessary because the order in which -// the JSON elements are routed to Elasticsearch is relevant. -// We got into trouble when using plain maps because the text element -// needs to go before the simple_phrase element. -type phraseSuggesterRequest struct { - Text string `json:"text"` - Phrase interface{} `json:"phrase"` -} - -// Creates the source for the phrase suggester. -func (q *PhraseSuggester) Source(includeName bool) (interface{}, error) { - ps := &phraseSuggesterRequest{} - - if q.text != "" { - ps.Text = q.text - } - - suggester := make(map[string]interface{}) - ps.Phrase = suggester - - if q.analyzer != "" { - suggester["analyzer"] = q.analyzer - } - if q.field != "" { - suggester["field"] = q.field - } - if q.size != nil { - suggester["size"] = *q.size - } - if q.shardSize != nil { - suggester["shard_size"] = *q.shardSize - } - switch len(q.contextQueries) { - case 0: - case 1: - src, err := q.contextQueries[0].Source() - if err != nil { - return nil, err - } - suggester["context"] = src - default: - ctxq := make([]interface{}, 0) - for _, query := range q.contextQueries { - src, err := query.Source() - if err != nil { - return nil, err - } - ctxq = append(ctxq, src) - } - suggester["context"] = ctxq - } - - // Phase-specified parameters - if q.realWordErrorLikelihood != nil { - suggester["real_word_error_likelihood"] = *q.realWordErrorLikelihood - } - if q.confidence != nil { - suggester["confidence"] = *q.confidence - } - if q.separator != nil { - suggester["separator"] = *q.separator - } - if q.maxErrors != nil { - suggester["max_errors"] = *q.maxErrors - } - if q.gramSize != nil { - suggester["gram_size"] = *q.gramSize - } - if q.forceUnigrams != nil { - suggester["force_unigrams"] = *q.forceUnigrams - } - if q.tokenLimit != nil { - suggester["token_limit"] = *q.tokenLimit - } - if q.generators != nil && len(q.generators) > 0 { - for typ, generators := range q.generators { - arr := make([]interface{}, 0) - for _, g := range generators { - src, err := g.Source() - if err != nil { - return nil, err - } - arr = append(arr, src) - } - suggester[typ] = arr - } - } - if q.smoothingModel != nil { - src, err := q.smoothingModel.Source() - if err != nil { - return nil, err - } - x := make(map[string]interface{}) - x[q.smoothingModel.Type()] = src - suggester["smoothing"] = x - } - if q.preTag != nil { - hl := make(map[string]string) - hl["pre_tag"] = *q.preTag - if q.postTag != nil { - hl["post_tag"] = *q.postTag - } - suggester["highlight"] = hl - } - if q.collateQuery != nil || q.collateFilter != nil { - collate := make(map[string]interface{}) - suggester["collate"] = collate - if q.collateQuery != nil { - collate["query"] = *q.collateQuery - } - if q.collateFilter != nil { - collate["filter"] = *q.collateFilter - } - if q.collatePreference != nil { - collate["preference"] = *q.collatePreference - } - if len(q.collateParams) > 0 { - collate["params"] = q.collateParams - } - if q.collatePrune != nil { - collate["prune"] = *q.collatePrune - } - } - - if !includeName { - return ps, nil - } - - source := make(map[string]interface{}) - source[q.name] = ps - return source, nil -} - -// -- Smoothing models -- - -type SmoothingModel interface { - Type() string - Source() (interface{}, error) -} - -// StupidBackoffSmoothingModel implements a stupid backoff smoothing model. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models -// for details about smoothing models. -type StupidBackoffSmoothingModel struct { - discount float64 -} - -func NewStupidBackoffSmoothingModel(discount float64) *StupidBackoffSmoothingModel { - return &StupidBackoffSmoothingModel{ - discount: discount, - } -} - -func (sm *StupidBackoffSmoothingModel) Type() string { - return "stupid_backoff" -} - -func (sm *StupidBackoffSmoothingModel) Source() (interface{}, error) { - source := make(map[string]interface{}) - source["discount"] = sm.discount - return source, nil -} - -// -- - -// LaplaceSmoothingModel implements a laplace smoothing model. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models -// for details about smoothing models. -type LaplaceSmoothingModel struct { - alpha float64 -} - -func NewLaplaceSmoothingModel(alpha float64) *LaplaceSmoothingModel { - return &LaplaceSmoothingModel{ - alpha: alpha, - } -} - -func (sm *LaplaceSmoothingModel) Type() string { - return "laplace" -} - -func (sm *LaplaceSmoothingModel) Source() (interface{}, error) { - source := make(map[string]interface{}) - source["alpha"] = sm.alpha - return source, nil -} - -// -- - -// LinearInterpolationSmoothingModel implements a linear interpolation -// smoothing model. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models -// for details about smoothing models. -type LinearInterpolationSmoothingModel struct { - trigramLamda float64 - bigramLambda float64 - unigramLambda float64 -} - -func NewLinearInterpolationSmoothingModel(trigramLamda, bigramLambda, unigramLambda float64) *LinearInterpolationSmoothingModel { - return &LinearInterpolationSmoothingModel{ - trigramLamda: trigramLamda, - bigramLambda: bigramLambda, - unigramLambda: unigramLambda, - } -} - -func (sm *LinearInterpolationSmoothingModel) Type() string { - return "linear_interpolation" -} - -func (sm *LinearInterpolationSmoothingModel) Source() (interface{}, error) { - source := make(map[string]interface{}) - source["trigram_lambda"] = sm.trigramLamda - source["bigram_lambda"] = sm.bigramLambda - source["unigram_lambda"] = sm.unigramLambda - return source, nil -} - -// -- CandidateGenerator -- - -type CandidateGenerator interface { - Type() string - Source() (interface{}, error) -} - -// DirectCandidateGenerator implements a direct candidate generator. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models -// for details about smoothing models. -type DirectCandidateGenerator struct { - field string - preFilter *string - postFilter *string - suggestMode *string - accuracy *float64 - size *int - sort *string - stringDistance *string - maxEdits *int - maxInspections *int - maxTermFreq *float64 - prefixLength *int - minWordLength *int - minDocFreq *float64 -} - -func NewDirectCandidateGenerator(field string) *DirectCandidateGenerator { - return &DirectCandidateGenerator{ - field: field, - } -} - -func (g *DirectCandidateGenerator) Type() string { - return "direct_generator" -} - -func (g *DirectCandidateGenerator) Field(field string) *DirectCandidateGenerator { - g.field = field - return g -} - -func (g *DirectCandidateGenerator) PreFilter(preFilter string) *DirectCandidateGenerator { - g.preFilter = &preFilter - return g -} - -func (g *DirectCandidateGenerator) PostFilter(postFilter string) *DirectCandidateGenerator { - g.postFilter = &postFilter - return g -} - -func (g *DirectCandidateGenerator) SuggestMode(suggestMode string) *DirectCandidateGenerator { - g.suggestMode = &suggestMode - return g -} - -func (g *DirectCandidateGenerator) Accuracy(accuracy float64) *DirectCandidateGenerator { - g.accuracy = &accuracy - return g -} - -func (g *DirectCandidateGenerator) Size(size int) *DirectCandidateGenerator { - g.size = &size - return g -} - -func (g *DirectCandidateGenerator) Sort(sort string) *DirectCandidateGenerator { - g.sort = &sort - return g -} - -func (g *DirectCandidateGenerator) StringDistance(stringDistance string) *DirectCandidateGenerator { - g.stringDistance = &stringDistance - return g -} - -func (g *DirectCandidateGenerator) MaxEdits(maxEdits int) *DirectCandidateGenerator { - g.maxEdits = &maxEdits - return g -} - -func (g *DirectCandidateGenerator) MaxInspections(maxInspections int) *DirectCandidateGenerator { - g.maxInspections = &maxInspections - return g -} - -func (g *DirectCandidateGenerator) MaxTermFreq(maxTermFreq float64) *DirectCandidateGenerator { - g.maxTermFreq = &maxTermFreq - return g -} - -func (g *DirectCandidateGenerator) PrefixLength(prefixLength int) *DirectCandidateGenerator { - g.prefixLength = &prefixLength - return g -} - -func (g *DirectCandidateGenerator) MinWordLength(minWordLength int) *DirectCandidateGenerator { - g.minWordLength = &minWordLength - return g -} - -func (g *DirectCandidateGenerator) MinDocFreq(minDocFreq float64) *DirectCandidateGenerator { - g.minDocFreq = &minDocFreq - return g -} - -func (g *DirectCandidateGenerator) Source() (interface{}, error) { - source := make(map[string]interface{}) - if g.field != "" { - source["field"] = g.field - } - if g.suggestMode != nil { - source["suggest_mode"] = *g.suggestMode - } - if g.accuracy != nil { - source["accuracy"] = *g.accuracy - } - if g.size != nil { - source["size"] = *g.size - } - if g.sort != nil { - source["sort"] = *g.sort - } - if g.stringDistance != nil { - source["string_distance"] = *g.stringDistance - } - if g.maxEdits != nil { - source["max_edits"] = *g.maxEdits - } - if g.maxInspections != nil { - source["max_inspections"] = *g.maxInspections - } - if g.maxTermFreq != nil { - source["max_term_freq"] = *g.maxTermFreq - } - if g.prefixLength != nil { - source["prefix_length"] = *g.prefixLength - } - if g.minWordLength != nil { - source["min_word_length"] = *g.minWordLength - } - if g.minDocFreq != nil { - source["min_doc_freq"] = *g.minDocFreq - } - if g.preFilter != nil { - source["pre_filter"] = *g.preFilter - } - if g.postFilter != nil { - source["post_filter"] = *g.postFilter - } - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_phrase_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_phrase_test.go deleted file mode 100644 index 1eb46ce44..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_phrase_test.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestPhraseSuggesterSource(t *testing.T) { - s := NewPhraseSuggester("name"). - Text("Xor the Got-Jewel"). - Analyzer("body"). - Field("bigram"). - Size(1). - RealWordErrorLikelihood(0.95). - MaxErrors(0.5). - GramSize(2). - Highlight("", "") - src, err := s.Source(true) - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"name":{"text":"Xor the Got-Jewel","phrase":{"analyzer":"body","field":"bigram","gram_size":2,"highlight":{"post_tag":"\u003c/em\u003e","pre_tag":"\u003cem\u003e"},"max_errors":0.5,"real_word_error_likelihood":0.95,"size":1}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestPhraseSuggesterSourceWithContextQuery(t *testing.T) { - geomapQ := NewSuggesterGeoMapping("location"). - Precision("1km", "5m"). - Neighbors(true). - FieldName("pin"). - DefaultLocations(GeoPointFromLatLon(0.0, 0.0)) - - s := NewPhraseSuggester("name"). - Text("Xor the Got-Jewel"). - Analyzer("body"). - Field("bigram"). - Size(1). - RealWordErrorLikelihood(0.95). - MaxErrors(0.5). - GramSize(2). - Highlight("", ""). - ContextQuery(geomapQ) - src, err := s.Source(true) - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"name":{"text":"Xor the Got-Jewel","phrase":{"analyzer":"body","context":{"location":{"default":{"lat":0,"lon":0},"neighbors":true,"path":"pin","precision":["1km","5m"],"type":"geo"}},"field":"bigram","gram_size":2,"highlight":{"post_tag":"\u003c/em\u003e","pre_tag":"\u003cem\u003e"},"max_errors":0.5,"real_word_error_likelihood":0.95,"size":1}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestPhraseSuggesterComplexSource(t *testing.T) { - g1 := NewDirectCandidateGenerator("body"). - SuggestMode("always"). - MinWordLength(1) - - g2 := NewDirectCandidateGenerator("reverse"). - SuggestMode("always"). - MinWordLength(1). - PreFilter("reverse"). - PostFilter("reverse") - - s := NewPhraseSuggester("simple_phrase"). - Text("Xor the Got-Jewel"). - Analyzer("body"). - Field("bigram"). - Size(4). - RealWordErrorLikelihood(0.95). - Confidence(2.0). - GramSize(2). - CandidateGenerators(g1, g2). - CollateQuery(`"match":{"{{field_name}}" : "{{suggestion}}"}`). - CollateParams(map[string]interface{}{"field_name": "title"}). - CollatePreference("_primary"). - CollatePrune(true) - src, err := s.Source(true) - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"simple_phrase":{"text":"Xor the Got-Jewel","phrase":{"analyzer":"body","collate":{"params":{"field_name":"title"},"preference":"_primary","prune":true,"query":"\"match\":{\"{{field_name}}\" : \"{{suggestion}}\"}"},"confidence":2,"direct_generator":[{"field":"body","min_word_length":1,"suggest_mode":"always"},{"field":"reverse","min_word_length":1,"post_filter":"reverse","pre_filter":"reverse","suggest_mode":"always"}],"field":"bigram","gram_size":2,"real_word_error_likelihood":0.95,"size":4}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} - -func TestPhraseStupidBackoffSmoothingModel(t *testing.T) { - s := NewStupidBackoffSmoothingModel(0.42) - src, err := s.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - // The source does NOT include the smoothing model type! - expected := `{"discount":0.42}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } - if s.Type() != "stupid_backoff" { - t.Errorf("expected %q, got: %q", "stupid_backoff", s.Type()) - } -} - -func TestPhraseLaplaceSmoothingModel(t *testing.T) { - s := NewLaplaceSmoothingModel(0.63) - src, err := s.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - // The source does NOT include the smoothing model type! - expected := `{"alpha":0.63}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } - if s.Type() != "laplace" { - t.Errorf("expected %q, got: %q", "laplace", s.Type()) - } -} - -func TestLinearInterpolationSmoothingModel(t *testing.T) { - s := NewLinearInterpolationSmoothingModel(0.3, 0.2, 0.05) - src, err := s.Source() - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - // The source does NOT include the smoothing model type! - expected := `{"bigram_lambda":0.2,"trigram_lambda":0.3,"unigram_lambda":0.05}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } - if s.Type() != "linear_interpolation" { - t.Errorf("expected %q, got: %q", "linear_interpolation", s.Type()) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_term.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_term.go deleted file mode 100644 index 116af405a..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_term.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// For more details, see -// http://www.elasticsearch.org/guide/reference/api/search/term-suggest/ -type TermSuggester struct { - Suggester - name string - text string - field string - analyzer string - size *int - shardSize *int - contextQueries []SuggesterContextQuery - - // fields specific to term suggester - suggestMode string - accuracy *float64 - sort string - stringDistance string - maxEdits *int - maxInspections *int - maxTermFreq *float64 - prefixLength *int - minWordLength *int - minDocFreq *float64 -} - -// Creates a new term suggester. -func NewTermSuggester(name string) *TermSuggester { - return &TermSuggester{ - name: name, - contextQueries: make([]SuggesterContextQuery, 0), - } -} - -func (q *TermSuggester) Name() string { - return q.name -} - -func (q *TermSuggester) Text(text string) *TermSuggester { - q.text = text - return q -} - -func (q *TermSuggester) Field(field string) *TermSuggester { - q.field = field - return q -} - -func (q *TermSuggester) Analyzer(analyzer string) *TermSuggester { - q.analyzer = analyzer - return q -} - -func (q *TermSuggester) Size(size int) *TermSuggester { - q.size = &size - return q -} - -func (q *TermSuggester) ShardSize(shardSize int) *TermSuggester { - q.shardSize = &shardSize - return q -} - -func (q *TermSuggester) ContextQuery(query SuggesterContextQuery) *TermSuggester { - q.contextQueries = append(q.contextQueries, query) - return q -} - -func (q *TermSuggester) ContextQueries(queries ...SuggesterContextQuery) *TermSuggester { - q.contextQueries = append(q.contextQueries, queries...) - return q -} - -func (q *TermSuggester) SuggestMode(suggestMode string) *TermSuggester { - q.suggestMode = suggestMode - return q -} - -func (q *TermSuggester) Accuracy(accuracy float64) *TermSuggester { - q.accuracy = &accuracy - return q -} - -func (q *TermSuggester) Sort(sort string) *TermSuggester { - q.sort = sort - return q -} - -func (q *TermSuggester) StringDistance(stringDistance string) *TermSuggester { - q.stringDistance = stringDistance - return q -} - -func (q *TermSuggester) MaxEdits(maxEdits int) *TermSuggester { - q.maxEdits = &maxEdits - return q -} - -func (q *TermSuggester) MaxInspections(maxInspections int) *TermSuggester { - q.maxInspections = &maxInspections - return q -} - -func (q *TermSuggester) MaxTermFreq(maxTermFreq float64) *TermSuggester { - q.maxTermFreq = &maxTermFreq - return q -} - -func (q *TermSuggester) PrefixLength(prefixLength int) *TermSuggester { - q.prefixLength = &prefixLength - return q -} - -func (q *TermSuggester) MinWordLength(minWordLength int) *TermSuggester { - q.minWordLength = &minWordLength - return q -} - -func (q *TermSuggester) MinDocFreq(minDocFreq float64) *TermSuggester { - q.minDocFreq = &minDocFreq - return q -} - -// termSuggesterRequest is necessary because the order in which -// the JSON elements are routed to Elasticsearch is relevant. -// We got into trouble when using plain maps because the text element -// needs to go before the term element. -type termSuggesterRequest struct { - Text string `json:"text"` - Term interface{} `json:"term"` -} - -// Creates the source for the term suggester. -func (q *TermSuggester) Source(includeName bool) (interface{}, error) { - // "suggest" : { - // "my-suggest-1" : { - // "text" : "the amsterdma meetpu", - // "term" : { - // "field" : "body" - // } - // }, - // "my-suggest-2" : { - // "text" : "the rottredam meetpu", - // "term" : { - // "field" : "title", - // } - // } - // } - ts := &termSuggesterRequest{} - if q.text != "" { - ts.Text = q.text - } - - suggester := make(map[string]interface{}) - ts.Term = suggester - - if q.analyzer != "" { - suggester["analyzer"] = q.analyzer - } - if q.field != "" { - suggester["field"] = q.field - } - if q.size != nil { - suggester["size"] = *q.size - } - if q.shardSize != nil { - suggester["shard_size"] = *q.shardSize - } - switch len(q.contextQueries) { - case 0: - case 1: - src, err := q.contextQueries[0].Source() - if err != nil { - return nil, err - } - suggester["context"] = src - default: - ctxq := make([]interface{}, 0) - for _, query := range q.contextQueries { - src, err := query.Source() - if err != nil { - return nil, err - } - ctxq = append(ctxq, src) - } - suggester["context"] = ctxq - } - - // Specific to term suggester - if q.suggestMode != "" { - suggester["suggest_mode"] = q.suggestMode - } - if q.accuracy != nil { - suggester["accuracy"] = *q.accuracy - } - if q.sort != "" { - suggester["sort"] = q.sort - } - if q.stringDistance != "" { - suggester["string_distance"] = q.stringDistance - } - if q.maxEdits != nil { - suggester["max_edits"] = *q.maxEdits - } - if q.maxInspections != nil { - suggester["max_inspections"] = *q.maxInspections - } - if q.maxTermFreq != nil { - suggester["max_term_freq"] = *q.maxTermFreq - } - if q.prefixLength != nil { - suggester["prefix_len"] = *q.prefixLength - } - if q.minWordLength != nil { - suggester["min_word_len"] = *q.minWordLength - } - if q.minDocFreq != nil { - suggester["min_doc_freq"] = *q.minDocFreq - } - - if !includeName { - return ts, nil - } - - source := make(map[string]interface{}) - source[q.name] = ts - return source, nil -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_term_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/suggester_term_test.go deleted file mode 100644 index 869049890..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/suggester_term_test.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "testing" -) - -func TestTermSuggesterSource(t *testing.T) { - s := NewTermSuggester("name"). - Text("n"). - Field("suggest") - src, err := s.Source(true) - if err != nil { - t.Fatal(err) - } - data, err := json.Marshal(src) - if err != nil { - t.Fatalf("marshaling to JSON failed: %v", err) - } - got := string(data) - expected := `{"name":{"text":"n","term":{"field":"suggest"}}}` - if got != expected { - t.Errorf("expected\n%s\n,got:\n%s", expected, got) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/termvectors.go b/services/templeton/vendor/src/github.com/olivere/elastic/termvectors.go deleted file mode 100644 index 355108200..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/termvectors.go +++ /dev/null @@ -1,458 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// TermvectorsService returns information and statistics on terms in the -// fields of a particular document. The document could be stored in the -// index or artificially provided by the user. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/2.1/docs-termvectors.html -// for documentation. -type TermvectorsService struct { - client *Client - pretty bool - id string - index string - typ string - dfs *bool - doc interface{} - fieldStatistics *bool - fields []string - filter *TermvectorsFilterSettings - perFieldAnalyzer map[string]string - offsets *bool - parent string - payloads *bool - positions *bool - preference string - realtime *bool - routing string - termStatistics *bool - version interface{} - versionType string - bodyJson interface{} - bodyString string -} - -// NewTermvectorsService creates a new TermvectorsService. -func NewTermvectorsService(client *Client) *TermvectorsService { - return &TermvectorsService{ - client: client, - } -} - -// Index in which the document resides. -func (s *TermvectorsService) Index(index string) *TermvectorsService { - s.index = index - return s -} - -// Type of the document. -func (s *TermvectorsService) Type(typ string) *TermvectorsService { - s.typ = typ - return s -} - -// Id of the document. -func (s *TermvectorsService) Id(id string) *TermvectorsService { - s.id = id - return s -} - -// Dfs specifies if distributed frequencies should be returned instead -// shard frequencies. -func (s *TermvectorsService) Dfs(dfs bool) *TermvectorsService { - s.dfs = &dfs - return s -} - -// Doc is the document to analyze. -func (s *TermvectorsService) Doc(doc interface{}) *TermvectorsService { - s.doc = doc - return s -} - -// FieldStatistics specifies if document count, sum of document frequencies -// and sum of total term frequencies should be returned. -func (s *TermvectorsService) FieldStatistics(fieldStatistics bool) *TermvectorsService { - s.fieldStatistics = &fieldStatistics - return s -} - -// Fields a list of fields to return. -func (s *TermvectorsService) Fields(fields ...string) *TermvectorsService { - if s.fields == nil { - s.fields = make([]string, 0) - } - s.fields = append(s.fields, fields...) - return s -} - -// Filter adds terms filter settings. -func (s *TermvectorsService) Filter(filter *TermvectorsFilterSettings) *TermvectorsService { - s.filter = filter - return s -} - -// PerFieldAnalyzer allows to specify a different analyzer than the one -// at the field. -func (s *TermvectorsService) PerFieldAnalyzer(perFieldAnalyzer map[string]string) *TermvectorsService { - s.perFieldAnalyzer = perFieldAnalyzer - return s -} - -// Offsets specifies if term offsets should be returned. -func (s *TermvectorsService) Offsets(offsets bool) *TermvectorsService { - s.offsets = &offsets - return s -} - -// Parent id of documents. -func (s *TermvectorsService) Parent(parent string) *TermvectorsService { - s.parent = parent - return s -} - -// Payloads specifies if term payloads should be returned. -func (s *TermvectorsService) Payloads(payloads bool) *TermvectorsService { - s.payloads = &payloads - return s -} - -// Positions specifies if term positions should be returned. -func (s *TermvectorsService) Positions(positions bool) *TermvectorsService { - s.positions = &positions - return s -} - -// Preference specify the node or shard the operation -// should be performed on (default: random). -func (s *TermvectorsService) Preference(preference string) *TermvectorsService { - s.preference = preference - return s -} - -// Realtime specifies if request is real-time as opposed to -// near-real-time (default: true). -func (s *TermvectorsService) Realtime(realtime bool) *TermvectorsService { - s.realtime = &realtime - return s -} - -// Routing is a specific routing value. -func (s *TermvectorsService) Routing(routing string) *TermvectorsService { - s.routing = routing - return s -} - -// TermStatistics specifies if total term frequency and document frequency -// should be returned. -func (s *TermvectorsService) TermStatistics(termStatistics bool) *TermvectorsService { - s.termStatistics = &termStatistics - return s -} - -// Version an explicit version number for concurrency control. -func (s *TermvectorsService) Version(version interface{}) *TermvectorsService { - s.version = version - return s -} - -// VersionType specifies a version type ("internal", "external", "external_gte", or "force"). -func (s *TermvectorsService) VersionType(versionType string) *TermvectorsService { - s.versionType = versionType - return s -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *TermvectorsService) Pretty(pretty bool) *TermvectorsService { - s.pretty = pretty - return s -} - -// BodyJson defines the body parameters. See documentation. -func (s *TermvectorsService) BodyJson(body interface{}) *TermvectorsService { - s.bodyJson = body - return s -} - -// BodyString defines the body parameters as a string. See documentation. -func (s *TermvectorsService) BodyString(body string) *TermvectorsService { - s.bodyString = body - return s -} - -// buildURL builds the URL for the operation. -func (s *TermvectorsService) buildURL() (string, url.Values, error) { - var pathParam = map[string]string{ - "index": s.index, - "type": s.typ, - } - var path string - var err error - - // Build URL - if s.id != "" { - pathParam["id"] = s.id - path, err = uritemplates.Expand("/{index}/{type}/{id}/_termvectors", pathParam) - } else { - path, err = uritemplates.Expand("/{index}/{type}/_termvectors", pathParam) - } - - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if s.pretty { - params.Set("pretty", "1") - } - if s.dfs != nil { - params.Set("dfs", fmt.Sprintf("%v", *s.dfs)) - } - if s.fieldStatistics != nil { - params.Set("field_statistics", fmt.Sprintf("%v", *s.fieldStatistics)) - } - if len(s.fields) > 0 { - params.Set("fields", strings.Join(s.fields, ",")) - } - if s.offsets != nil { - params.Set("offsets", fmt.Sprintf("%v", *s.offsets)) - } - if s.parent != "" { - params.Set("parent", s.parent) - } - if s.payloads != nil { - params.Set("payloads", fmt.Sprintf("%v", *s.payloads)) - } - if s.positions != nil { - params.Set("positions", fmt.Sprintf("%v", *s.positions)) - } - if s.preference != "" { - params.Set("preference", s.preference) - } - if s.realtime != nil { - params.Set("realtime", fmt.Sprintf("%v", *s.realtime)) - } - if s.routing != "" { - params.Set("routing", s.routing) - } - if s.termStatistics != nil { - params.Set("term_statistics", fmt.Sprintf("%v", *s.termStatistics)) - } - if s.version != nil { - params.Set("version", fmt.Sprintf("%v", s.version)) - } - if s.versionType != "" { - params.Set("version_type", s.versionType) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *TermvectorsService) Validate() error { - var invalid []string - if s.index == "" { - invalid = append(invalid, "Index") - } - if s.typ == "" { - invalid = append(invalid, "Type") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *TermvectorsService) Do() (*TermvectorsResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else if s.bodyString != "" { - body = s.bodyString - } else { - data := make(map[string]interface{}) - if s.doc != nil { - data["doc"] = s.doc - } - if len(s.perFieldAnalyzer) > 0 { - data["per_field_analyzer"] = s.perFieldAnalyzer - } - if s.filter != nil { - src, err := s.filter.Source() - if err != nil { - return nil, err - } - data["filter"] = src - } - if len(data) > 0 { - body = data - } - } - - // Get HTTP response - res, err := s.client.PerformRequest("GET", path, params, body) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(TermvectorsResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// -- Filter settings -- - -// TermvectorsFilterSettings adds additional filters to a Termsvector request. -// It allows to filter terms based on their tf-idf scores. -// See https://www.elastic.co/guide/en/elasticsearch/reference/2.1/docs-termvectors.html#_terms_filtering -// for more information. -type TermvectorsFilterSettings struct { - maxNumTerms *int64 - minTermFreq *int64 - maxTermFreq *int64 - minDocFreq *int64 - maxDocFreq *int64 - minWordLength *int64 - maxWordLength *int64 -} - -// NewTermvectorsFilterSettings creates and initializes a new TermvectorsFilterSettings struct. -func NewTermvectorsFilterSettings() *TermvectorsFilterSettings { - return &TermvectorsFilterSettings{} -} - -// MaxNumTerms specifies the maximum number of terms the must be returned per field. -func (fs *TermvectorsFilterSettings) MaxNumTerms(value int64) *TermvectorsFilterSettings { - fs.maxNumTerms = &value - return fs -} - -// MinTermFreq ignores words with less than this frequency in the source doc. -func (fs *TermvectorsFilterSettings) MinTermFreq(value int64) *TermvectorsFilterSettings { - fs.minTermFreq = &value - return fs -} - -// MaxTermFreq ignores words with more than this frequency in the source doc. -func (fs *TermvectorsFilterSettings) MaxTermFreq(value int64) *TermvectorsFilterSettings { - fs.maxTermFreq = &value - return fs -} - -// MinDocFreq ignores terms which do not occur in at least this many docs. -func (fs *TermvectorsFilterSettings) MinDocFreq(value int64) *TermvectorsFilterSettings { - fs.minDocFreq = &value - return fs -} - -// MaxDocFreq ignores terms which occur in more than this many docs. -func (fs *TermvectorsFilterSettings) MaxDocFreq(value int64) *TermvectorsFilterSettings { - fs.maxDocFreq = &value - return fs -} - -// MinWordLength specifies the minimum word length below which words will be ignored. -func (fs *TermvectorsFilterSettings) MinWordLength(value int64) *TermvectorsFilterSettings { - fs.minWordLength = &value - return fs -} - -// MaxWordLength specifies the maximum word length above which words will be ignored. -func (fs *TermvectorsFilterSettings) MaxWordLength(value int64) *TermvectorsFilterSettings { - fs.maxWordLength = &value - return fs -} - -// Source returns JSON for the query. -func (fs *TermvectorsFilterSettings) Source() (interface{}, error) { - source := make(map[string]interface{}) - if fs.maxNumTerms != nil { - source["max_num_terms"] = *fs.maxNumTerms - } - if fs.minTermFreq != nil { - source["min_term_freq"] = *fs.minTermFreq - } - if fs.maxTermFreq != nil { - source["max_term_freq"] = *fs.maxTermFreq - } - if fs.minDocFreq != nil { - source["min_doc_freq"] = *fs.minDocFreq - } - if fs.maxDocFreq != nil { - source["max_doc_freq"] = *fs.maxDocFreq - } - if fs.minWordLength != nil { - source["min_word_length"] = *fs.minWordLength - } - if fs.maxWordLength != nil { - source["max_word_length"] = *fs.maxWordLength - } - return source, nil -} - -// -- Response types -- - -type TokenInfo struct { - StartOffset int64 `json:"start_offset"` - EndOffset int64 `json:"end_offset"` - Position int64 `json:"position"` - Payload string `json:"payload"` -} - -type TermsInfo struct { - DocFreq int64 `json:"doc_freq"` - TermFreq int64 `json:"term_freq"` - Ttf int64 `json:"ttf"` - Tokens []TokenInfo `json:"tokens"` -} - -type FieldStatistics struct { - DocCount int64 `json:"doc_count"` - SumDocFreq int64 `json:"sum_doc_freq"` - SumTtf int64 `json:"sum_ttf"` -} - -type TermVectorsFieldInfo struct { - FieldStatistics FieldStatistics `json:"field_statistics"` - Terms map[string]TermsInfo `json:"terms"` -} - -// TermvectorsResponse is the response of TermvectorsService.Do. -type TermvectorsResponse struct { - Index string `json:"_index"` - Type string `json:"_type"` - Id string `json:"_id,omitempty"` - Version int `json:"_version"` - Found bool `json:"found"` - Took int64 `json:"took"` - TermVectors map[string]TermVectorsFieldInfo `json:"term_vectors"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/termvectors_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/termvectors_test.go deleted file mode 100644 index e487a24a4..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/termvectors_test.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "testing" - "time" -) - -func TestTermVectorsBuildURL(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tests := []struct { - Index string - Type string - Id string - Expected string - }{ - { - "twitter", - "tweet", - "", - "/twitter/tweet/_termvectors", - }, - { - "twitter", - "tweet", - "1", - "/twitter/tweet/1/_termvectors", - }, - } - - for _, test := range tests { - builder := client.TermVectors(test.Index, test.Type) - if test.Id != "" { - builder = builder.Id(test.Id) - } - path, _, err := builder.buildURL() - if err != nil { - t.Fatal(err) - } - if path != test.Expected { - t.Errorf("expected %q; got: %q", test.Expected, path) - } - } -} - -func TestTermVectorsWithId(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."} - - // Add a document - indexResult, err := client.Index(). - Index(testIndexName). - Type("tweet"). - Id("1"). - BodyJson(&tweet1). - Refresh(true). - Do() - if err != nil { - t.Fatal(err) - } - if indexResult == nil { - t.Errorf("expected result to be != nil; got: %v", indexResult) - } - - // TermVectors by specifying ID - field := "Message" - result, err := client.TermVectors(testIndexName, "tweet"). - Id("1"). - Fields(field). - FieldStatistics(true). - TermStatistics(true). - Do() - if err != nil { - t.Fatal(err) - } - if result == nil { - t.Fatal("expected to return information and statistics") - } - if !result.Found { - t.Errorf("expected found to be %v; got: %v", true, result.Found) - } - if result.Took <= 0 { - t.Errorf("expected took in millis > 0; got: %v", result.Took) - } -} - -func TestTermVectorsWithDoc(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - // Travis lags sometimes - if isTravis() { - time.Sleep(2 * time.Second) - } - - // TermVectors by specifying Doc - var doc = map[string]interface{}{ - "fullname": "John Doe", - "text": "twitter test test test", - } - var perFieldAnalyzer = map[string]string{ - "fullname": "keyword", - } - - result, err := client.TermVectors(testIndexName, "tweet"). - Doc(doc). - PerFieldAnalyzer(perFieldAnalyzer). - FieldStatistics(true). - TermStatistics(true). - Do() - if err != nil { - t.Fatal(err) - } - if result == nil { - t.Fatal("expected to return information and statistics") - } - if !result.Found { - t.Errorf("expected found to be %v; got: %v", true, result.Found) - } - if result.Took <= 0 { - t.Errorf("expected took in millis > 0; got: %v", result.Took) - } -} - -func TestTermVectorsWithFilter(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - // Travis lags sometimes - if isTravis() { - time.Sleep(2 * time.Second) - } - - // TermVectors by specifying Doc - var doc = map[string]interface{}{ - "fullname": "John Doe", - "text": "twitter test test test", - } - var perFieldAnalyzer = map[string]string{ - "fullname": "keyword", - } - - result, err := client.TermVectors(testIndexName, "tweet"). - Doc(doc). - PerFieldAnalyzer(perFieldAnalyzer). - FieldStatistics(true). - TermStatistics(true). - Filter(NewTermvectorsFilterSettings().MinTermFreq(1)). - Do() - if err != nil { - t.Fatal(err) - } - if result == nil { - t.Fatal("expected to return information and statistics") - } - if !result.Found { - t.Errorf("expected found to be %v; got: %v", true, result.Found) - } - if result.Took <= 0 { - t.Errorf("expected took in millis > 0; got: %v", result.Took) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/update.go b/services/templeton/vendor/src/github.com/olivere/elastic/update.go deleted file mode 100644 index a20149b1c..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/update.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "gopkg.in/olivere/elastic.v3/uritemplates" -) - -// UpdateService updates a document in Elasticsearch. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-update.html -// for details. -type UpdateService struct { - client *Client - index string - typ string - id string - routing string - parent string - script *Script - fields []string - version *int64 - versionType string - retryOnConflict *int - refresh *bool - replicationType string - consistencyLevel string - upsert interface{} - scriptedUpsert *bool - docAsUpsert *bool - detectNoop *bool - doc interface{} - timeout string - pretty bool -} - -// NewUpdateService creates the service to update documents in Elasticsearch. -func NewUpdateService(client *Client) *UpdateService { - builder := &UpdateService{ - client: client, - fields: make([]string, 0), - } - return builder -} - -// Index is the name of the Elasticsearch index (required). -func (b *UpdateService) Index(name string) *UpdateService { - b.index = name - return b -} - -// Type is the type of the document (required). -func (b *UpdateService) Type(typ string) *UpdateService { - b.typ = typ - return b -} - -// Id is the identifier of the document to update (required). -func (b *UpdateService) Id(id string) *UpdateService { - b.id = id - return b -} - -// Routing specifies a specific routing value. -func (b *UpdateService) Routing(routing string) *UpdateService { - b.routing = routing - return b -} - -// Parent sets the id of the parent document. -func (b *UpdateService) Parent(parent string) *UpdateService { - b.parent = parent - return b -} - -// Script is the script definition. -func (b *UpdateService) Script(script *Script) *UpdateService { - b.script = script - return b -} - -// RetryOnConflict specifies how many times the operation should be retried -// when a conflict occurs (default: 0). -func (b *UpdateService) RetryOnConflict(retryOnConflict int) *UpdateService { - b.retryOnConflict = &retryOnConflict - return b -} - -// Fields is a list of fields to return in the response. -func (b *UpdateService) Fields(fields ...string) *UpdateService { - b.fields = make([]string, 0, len(fields)) - b.fields = append(b.fields, fields...) - return b -} - -// Version defines the explicit version number for concurrency control. -func (b *UpdateService) Version(version int64) *UpdateService { - b.version = &version - return b -} - -// VersionType is one of "internal" or "force". -func (b *UpdateService) VersionType(versionType string) *UpdateService { - b.versionType = versionType - return b -} - -// Refresh the index after performing the update. -func (b *UpdateService) Refresh(refresh bool) *UpdateService { - b.refresh = &refresh - return b -} - -// ReplicationType is one of "sync" or "async". -func (b *UpdateService) ReplicationType(replicationType string) *UpdateService { - b.replicationType = replicationType - return b -} - -// ConsistencyLevel is one of "one", "quorum", or "all". -// It sets the write consistency setting for the update operation. -func (b *UpdateService) ConsistencyLevel(consistencyLevel string) *UpdateService { - b.consistencyLevel = consistencyLevel - return b -} - -// Doc allows for updating a partial document. -func (b *UpdateService) Doc(doc interface{}) *UpdateService { - b.doc = doc - return b -} - -// Upsert can be used to index the document when it doesn't exist yet. -// Use this e.g. to initialize a document with a default value. -func (b *UpdateService) Upsert(doc interface{}) *UpdateService { - b.upsert = doc - return b -} - -// DocAsUpsert can be used to insert the document if it doesn't already exist. -func (b *UpdateService) DocAsUpsert(docAsUpsert bool) *UpdateService { - b.docAsUpsert = &docAsUpsert - return b -} - -// DetectNoop will instruct Elasticsearch to check if changes will occur -// when updating via Doc. It there aren't any changes, the request will -// turn into a no-op. -func (b *UpdateService) DetectNoop(detectNoop bool) *UpdateService { - b.detectNoop = &detectNoop - return b -} - -// ScriptedUpsert should be set to true if the referenced script -// (defined in Script or ScriptId) should be called to perform an insert. -// The default is false. -func (b *UpdateService) ScriptedUpsert(scriptedUpsert bool) *UpdateService { - b.scriptedUpsert = &scriptedUpsert - return b -} - -// Timeout is an explicit timeout for the operation, e.g. "1000", "1s" or "500ms". -func (b *UpdateService) Timeout(timeout string) *UpdateService { - b.timeout = timeout - return b -} - -// Pretty instructs to return human readable, prettified JSON. -func (b *UpdateService) Pretty(pretty bool) *UpdateService { - b.pretty = pretty - return b -} - -// url returns the URL part of the document request. -func (b *UpdateService) url() (string, url.Values, error) { - // Build url - path := "/{index}/{type}/{id}/_update" - path, err := uritemplates.Expand(path, map[string]string{ - "index": b.index, - "type": b.typ, - "id": b.id, - }) - if err != nil { - return "", url.Values{}, err - } - - // Parameters - params := make(url.Values) - if b.pretty { - params.Set("pretty", "true") - } - if b.routing != "" { - params.Set("routing", b.routing) - } - if b.parent != "" { - params.Set("parent", b.parent) - } - if b.timeout != "" { - params.Set("timeout", b.timeout) - } - if b.refresh != nil { - params.Set("refresh", fmt.Sprintf("%v", *b.refresh)) - } - if b.replicationType != "" { - params.Set("replication", b.replicationType) - } - if b.consistencyLevel != "" { - params.Set("consistency", b.consistencyLevel) - } - if len(b.fields) > 0 { - params.Set("fields", strings.Join(b.fields, ",")) - } - if b.version != nil { - params.Set("version", fmt.Sprintf("%d", *b.version)) - } - if b.versionType != "" { - params.Set("version_type", b.versionType) - } - if b.retryOnConflict != nil { - params.Set("retry_on_conflict", fmt.Sprintf("%v", *b.retryOnConflict)) - } - - return path, params, nil -} - -// body returns the body part of the document request. -func (b *UpdateService) body() (interface{}, error) { - source := make(map[string]interface{}) - - if b.script != nil { - src, err := b.script.Source() - if err != nil { - return nil, err - } - source["script"] = src - } - - if b.scriptedUpsert != nil { - source["scripted_upsert"] = *b.scriptedUpsert - } - - if b.upsert != nil { - source["upsert"] = b.upsert - } - - if b.doc != nil { - source["doc"] = b.doc - } - if b.docAsUpsert != nil { - source["doc_as_upsert"] = *b.docAsUpsert - } - if b.detectNoop != nil { - source["detect_noop"] = *b.detectNoop - } - - return source, nil -} - -// Do executes the update operation. -func (b *UpdateService) Do() (*UpdateResponse, error) { - path, params, err := b.url() - if err != nil { - return nil, err - } - - // Get body of the request - body, err := b.body() - if err != nil { - return nil, err - } - - // Get response - res, err := b.client.PerformRequest("POST", path, params, body) - if err != nil { - return nil, err - } - - // Return result - ret := new(UpdateResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// UpdateResponse is the result of updating a document in Elasticsearch. -type UpdateResponse struct { - Index string `json:"_index"` - Type string `json:"_type"` - Id string `json:"_id"` - Version int `json:"_version"` - Created bool `json:"created"` - GetResult *GetResult `json:"get"` -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/update_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/update_test.go deleted file mode 100644 index 57b26dc0e..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/update_test.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "net/url" - "testing" -) - -func TestUpdateViaScript(t *testing.T) { - client := setupTestClient(t) - update := client.Update(). - Index("test").Type("type1").Id("1"). - Script(NewScript("ctx._source.tags += tag").Params(map[string]interface{}{"tag": "blue"}).Lang("groovy")) - path, params, err := update.url() - if err != nil { - t.Fatalf("expected to return URL, got: %v", err) - } - expectedPath := `/test/type1/1/_update` - if expectedPath != path { - t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path) - } - expectedParams := url.Values{} - if expectedParams.Encode() != params.Encode() { - t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode()) - } - body, err := update.body() - if err != nil { - t.Fatalf("expected to return body, got: %v", err) - } - data, err := json.Marshal(body) - if err != nil { - t.Fatalf("expected to marshal body as JSON, got: %v", err) - } - got := string(data) - expected := `{"script":{"inline":"ctx._source.tags += tag","lang":"groovy","params":{"tag":"blue"}}}` - if got != expected { - t.Errorf("expected\n%s\ngot:\n%s", expected, got) - } -} - -func TestUpdateViaScriptId(t *testing.T) { - client := setupTestClient(t) - - scriptParams := map[string]interface{}{ - "pageViewEvent": map[string]interface{}{ - "url": "foo.com/bar", - "response": 404, - "time": "2014-01-01 12:32", - }, - } - script := NewScriptId("my_web_session_summariser").Params(scriptParams) - - update := client.Update(). - Index("sessions").Type("session").Id("dh3sgudg8gsrgl"). - Script(script). - ScriptedUpsert(true). - Upsert(map[string]interface{}{}) - path, params, err := update.url() - if err != nil { - t.Fatalf("expected to return URL, got: %v", err) - } - expectedPath := `/sessions/session/dh3sgudg8gsrgl/_update` - if expectedPath != path { - t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path) - } - expectedParams := url.Values{} - if expectedParams.Encode() != params.Encode() { - t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode()) - } - body, err := update.body() - if err != nil { - t.Fatalf("expected to return body, got: %v", err) - } - data, err := json.Marshal(body) - if err != nil { - t.Fatalf("expected to marshal body as JSON, got: %v", err) - } - got := string(data) - expected := `{"script":{"id":"my_web_session_summariser","params":{"pageViewEvent":{"response":404,"time":"2014-01-01 12:32","url":"foo.com/bar"}}},"scripted_upsert":true,"upsert":{}}` - if got != expected { - t.Errorf("expected\n%s\ngot:\n%s", expected, got) - } -} - -func TestUpdateViaScriptFile(t *testing.T) { - client := setupTestClient(t) - - scriptParams := map[string]interface{}{ - "pageViewEvent": map[string]interface{}{ - "url": "foo.com/bar", - "response": 404, - "time": "2014-01-01 12:32", - }, - } - script := NewScriptFile("update_script").Params(scriptParams) - - update := client.Update(). - Index("sessions").Type("session").Id("dh3sgudg8gsrgl"). - Script(script). - ScriptedUpsert(true). - Upsert(map[string]interface{}{}) - - path, params, err := update.url() - if err != nil { - t.Fatalf("expected to return URL, got: %v", err) - } - expectedPath := `/sessions/session/dh3sgudg8gsrgl/_update` - if expectedPath != path { - t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path) - } - expectedParams := url.Values{} - if expectedParams.Encode() != params.Encode() { - t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode()) - } - body, err := update.body() - if err != nil { - t.Fatalf("expected to return body, got: %v", err) - } - data, err := json.Marshal(body) - if err != nil { - t.Fatalf("expected to marshal body as JSON, got: %v", err) - } - got := string(data) - expected := `{"script":{"file":"update_script","params":{"pageViewEvent":{"response":404,"time":"2014-01-01 12:32","url":"foo.com/bar"}}},"scripted_upsert":true,"upsert":{}}` - if got != expected { - t.Errorf("expected\n%s\ngot:\n%s", expected, got) - } -} - -func TestUpdateViaScriptAndUpsert(t *testing.T) { - client := setupTestClient(t) - update := client.Update(). - Index("test").Type("type1").Id("1"). - Script(NewScript("ctx._source.counter += count").Params(map[string]interface{}{"count": 4})). - Upsert(map[string]interface{}{"counter": 1}) - path, params, err := update.url() - if err != nil { - t.Fatalf("expected to return URL, got: %v", err) - } - expectedPath := `/test/type1/1/_update` - if expectedPath != path { - t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path) - } - expectedParams := url.Values{} - if expectedParams.Encode() != params.Encode() { - t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode()) - } - body, err := update.body() - if err != nil { - t.Fatalf("expected to return body, got: %v", err) - } - data, err := json.Marshal(body) - if err != nil { - t.Fatalf("expected to marshal body as JSON, got: %v", err) - } - got := string(data) - expected := `{"script":{"inline":"ctx._source.counter += count","params":{"count":4}},"upsert":{"counter":1}}` - if got != expected { - t.Errorf("expected\n%s\ngot:\n%s", expected, got) - } -} - -func TestUpdateViaDoc(t *testing.T) { - client := setupTestClient(t) - update := client.Update(). - Index("test").Type("type1").Id("1"). - Doc(map[string]interface{}{"name": "new_name"}). - DetectNoop(true) - path, params, err := update.url() - if err != nil { - t.Fatalf("expected to return URL, got: %v", err) - } - expectedPath := `/test/type1/1/_update` - if expectedPath != path { - t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path) - } - expectedParams := url.Values{} - if expectedParams.Encode() != params.Encode() { - t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode()) - } - body, err := update.body() - if err != nil { - t.Fatalf("expected to return body, got: %v", err) - } - data, err := json.Marshal(body) - if err != nil { - t.Fatalf("expected to marshal body as JSON, got: %v", err) - } - got := string(data) - expected := `{"detect_noop":true,"doc":{"name":"new_name"}}` - if got != expected { - t.Errorf("expected\n%s\ngot:\n%s", expected, got) - } -} - -func TestUpdateViaDocAndUpsert(t *testing.T) { - client := setupTestClient(t) - update := client.Update(). - Index("test").Type("type1").Id("1"). - Doc(map[string]interface{}{"name": "new_name"}). - DocAsUpsert(true). - Timeout("1s"). - Refresh(true) - path, params, err := update.url() - if err != nil { - t.Fatalf("expected to return URL, got: %v", err) - } - expectedPath := `/test/type1/1/_update` - if expectedPath != path { - t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path) - } - expectedParams := url.Values{"refresh": []string{"true"}, "timeout": []string{"1s"}} - if expectedParams.Encode() != params.Encode() { - t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode()) - } - body, err := update.body() - if err != nil { - t.Fatalf("expected to return body, got: %v", err) - } - data, err := json.Marshal(body) - if err != nil { - t.Fatalf("expected to marshal body as JSON, got: %v", err) - } - got := string(data) - expected := `{"doc":{"name":"new_name"},"doc_as_upsert":true}` - if got != expected { - t.Errorf("expected\n%s\ngot:\n%s", expected, got) - } -} - -func TestUpdateViaScriptIntegration(t *testing.T) { - client := setupTestClientAndCreateIndex(t) - - esversion, err := client.ElasticsearchVersion(DefaultURL) - if err != nil { - t.Fatal(err) - } - if esversion >= "1.4.3" || (esversion < "1.4.0" && esversion >= "1.3.8") { - t.Skip("groovy scripting has been disabled as for [1.3.8,1.4.0) and 1.4.3+") - return - } - - tweet1 := tweet{User: "olivere", Retweets: 10, Message: "Welcome to Golang and Elasticsearch."} - - // Add a document - indexResult, err := client.Index(). - Index(testIndexName). - Type("tweet"). - Id("1"). - BodyJson(&tweet1). - Do() - if err != nil { - t.Fatal(err) - } - if indexResult == nil { - t.Errorf("expected result to be != nil; got: %v", indexResult) - } - - // Update number of retweets - increment := 1 - script := NewScript("ctx._source.retweets += num"). - Params(map[string]interface{}{"num": increment}). - Lang("groovy") // Use "groovy" as default language as 1.3 uses MVEL by default - update, err := client.Update().Index(testIndexName).Type("tweet").Id("1"). - Script(script). - Do() - if err != nil { - t.Fatal(err) - } - if update == nil { - t.Errorf("expected update to be != nil; got %v", update) - } - if update.Version != indexResult.Version+1 { - t.Errorf("expected version to be %d; got %d", indexResult.Version+1, update.Version) - } - - // Get document - getResult, err := client.Get(). - Index(testIndexName). - Type("tweet"). - Id("1"). - Do() - if err != nil { - t.Fatal(err) - } - if getResult.Index != testIndexName { - t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index) - } - if getResult.Type != "tweet" { - t.Errorf("expected GetResult.Type %q; got %q", "tweet", getResult.Type) - } - if getResult.Id != "1" { - t.Errorf("expected GetResult.Id %q; got %q", "1", getResult.Id) - } - if getResult.Source == nil { - t.Errorf("expected GetResult.Source to be != nil; got nil") - } - - // Decode the Source field - var tweetGot tweet - err = json.Unmarshal(*getResult.Source, &tweetGot) - if err != nil { - t.Fatal(err) - } - if tweetGot.Retweets != tweet1.Retweets+increment { - t.Errorf("expected Tweet.Retweets to be %d; got %d", tweet1.Retweets+increment, tweetGot.Retweets) - } -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/LICENSE b/services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/LICENSE deleted file mode 100644 index de9c88cb6..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/LICENSE +++ /dev/null @@ -1,18 +0,0 @@ -Copyright (c) 2013 Joshua Tacoma - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/uritemplates.go b/services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/uritemplates.go deleted file mode 100644 index 8a84813fe..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/uritemplates.go +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright 2013 Joshua Tacoma. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package uritemplates is a level 4 implementation of RFC 6570 (URI -// Template, http://tools.ietf.org/html/rfc6570). -// -// To use uritemplates, parse a template string and expand it with a value -// map: -// -// template, _ := uritemplates.Parse("https://api.github.com/repos{/user,repo}") -// values := make(map[string]interface{}) -// values["user"] = "jtacoma" -// values["repo"] = "uritemplates" -// expanded, _ := template.ExpandString(values) -// fmt.Printf(expanded) -// -package uritemplates - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "regexp" - "strconv" - "strings" -) - -var ( - unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]") - reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]") - validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$") - hex = []byte("0123456789ABCDEF") -) - -func pctEncode(src []byte) []byte { - dst := make([]byte, len(src)*3) - for i, b := range src { - buf := dst[i*3 : i*3+3] - buf[0] = 0x25 - buf[1] = hex[b/16] - buf[2] = hex[b%16] - } - return dst -} - -func escape(s string, allowReserved bool) (escaped string) { - if allowReserved { - escaped = string(reserved.ReplaceAllFunc([]byte(s), pctEncode)) - } else { - escaped = string(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) - } - return escaped -} - -// A UriTemplate is a parsed representation of a URI template. -type UriTemplate struct { - raw string - parts []templatePart -} - -// Parse parses a URI template string into a UriTemplate object. -func Parse(rawtemplate string) (template *UriTemplate, err error) { - template = new(UriTemplate) - template.raw = rawtemplate - split := strings.Split(rawtemplate, "{") - template.parts = make([]templatePart, len(split)*2-1) - for i, s := range split { - if i == 0 { - if strings.Contains(s, "}") { - err = errors.New("unexpected }") - break - } - template.parts[i].raw = s - } else { - subsplit := strings.Split(s, "}") - if len(subsplit) != 2 { - err = errors.New("malformed template") - break - } - expression := subsplit[0] - template.parts[i*2-1], err = parseExpression(expression) - if err != nil { - break - } - template.parts[i*2].raw = subsplit[1] - } - } - if err != nil { - template = nil - } - return template, err -} - -type templatePart struct { - raw string - terms []templateTerm - first string - sep string - named bool - ifemp string - allowReserved bool -} - -type templateTerm struct { - name string - explode bool - truncate int -} - -func parseExpression(expression string) (result templatePart, err error) { - switch expression[0] { - case '+': - result.sep = "," - result.allowReserved = true - expression = expression[1:] - case '.': - result.first = "." - result.sep = "." - expression = expression[1:] - case '/': - result.first = "/" - result.sep = "/" - expression = expression[1:] - case ';': - result.first = ";" - result.sep = ";" - result.named = true - expression = expression[1:] - case '?': - result.first = "?" - result.sep = "&" - result.named = true - result.ifemp = "=" - expression = expression[1:] - case '&': - result.first = "&" - result.sep = "&" - result.named = true - result.ifemp = "=" - expression = expression[1:] - case '#': - result.first = "#" - result.sep = "," - result.allowReserved = true - expression = expression[1:] - default: - result.sep = "," - } - rawterms := strings.Split(expression, ",") - result.terms = make([]templateTerm, len(rawterms)) - for i, raw := range rawterms { - result.terms[i], err = parseTerm(raw) - if err != nil { - break - } - } - return result, err -} - -func parseTerm(term string) (result templateTerm, err error) { - if strings.HasSuffix(term, "*") { - result.explode = true - term = term[:len(term)-1] - } - split := strings.Split(term, ":") - if len(split) == 1 { - result.name = term - } else if len(split) == 2 { - result.name = split[0] - var parsed int64 - parsed, err = strconv.ParseInt(split[1], 10, 0) - result.truncate = int(parsed) - } else { - err = errors.New("multiple colons in same term") - } - if !validname.MatchString(result.name) { - err = errors.New("not a valid name: " + result.name) - } - if result.explode && result.truncate > 0 { - err = errors.New("both explode and prefix modifers on same term") - } - return result, err -} - -// Expand expands a URI template with a set of values to produce a string. -func (self *UriTemplate) Expand(value interface{}) (string, error) { - values, ismap := value.(map[string]interface{}) - if !ismap { - if m, ismap := struct2map(value); !ismap { - return "", errors.New("expected map[string]interface{}, struct, or pointer to struct.") - } else { - return self.Expand(m) - } - } - var buf bytes.Buffer - for _, p := range self.parts { - err := p.expand(&buf, values) - if err != nil { - return "", err - } - } - return buf.String(), nil -} - -func (self *templatePart) expand(buf *bytes.Buffer, values map[string]interface{}) error { - if len(self.raw) > 0 { - buf.WriteString(self.raw) - return nil - } - var zeroLen = buf.Len() - buf.WriteString(self.first) - var firstLen = buf.Len() - for _, term := range self.terms { - value, exists := values[term.name] - if !exists { - continue - } - if buf.Len() != firstLen { - buf.WriteString(self.sep) - } - switch v := value.(type) { - case string: - self.expandString(buf, term, v) - case []interface{}: - self.expandArray(buf, term, v) - case map[string]interface{}: - if term.truncate > 0 { - return errors.New("cannot truncate a map expansion") - } - self.expandMap(buf, term, v) - default: - if m, ismap := struct2map(value); ismap { - if term.truncate > 0 { - return errors.New("cannot truncate a map expansion") - } - self.expandMap(buf, term, m) - } else { - str := fmt.Sprintf("%v", value) - self.expandString(buf, term, str) - } - } - } - if buf.Len() == firstLen { - original := buf.Bytes()[:zeroLen] - buf.Reset() - buf.Write(original) - } - return nil -} - -func (self *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) { - if self.named { - buf.WriteString(name) - if empty { - buf.WriteString(self.ifemp) - } else { - buf.WriteString("=") - } - } -} - -func (self *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) { - if len(s) > t.truncate && t.truncate > 0 { - s = s[:t.truncate] - } - self.expandName(buf, t.name, len(s) == 0) - buf.WriteString(escape(s, self.allowReserved)) -} - -func (self *templatePart) expandArray(buf *bytes.Buffer, t templateTerm, a []interface{}) { - if len(a) == 0 { - return - } else if !t.explode { - self.expandName(buf, t.name, false) - } - for i, value := range a { - if t.explode && i > 0 { - buf.WriteString(self.sep) - } else if i > 0 { - buf.WriteString(",") - } - var s string - switch v := value.(type) { - case string: - s = v - default: - s = fmt.Sprintf("%v", v) - } - if len(s) > t.truncate && t.truncate > 0 { - s = s[:t.truncate] - } - if self.named && t.explode { - self.expandName(buf, t.name, len(s) == 0) - } - buf.WriteString(escape(s, self.allowReserved)) - } -} - -func (self *templatePart) expandMap(buf *bytes.Buffer, t templateTerm, m map[string]interface{}) { - if len(m) == 0 { - return - } - if !t.explode { - self.expandName(buf, t.name, len(m) == 0) - } - var firstLen = buf.Len() - for k, value := range m { - if firstLen != buf.Len() { - if t.explode { - buf.WriteString(self.sep) - } else { - buf.WriteString(",") - } - } - var s string - switch v := value.(type) { - case string: - s = v - default: - s = fmt.Sprintf("%v", v) - } - if t.explode { - buf.WriteString(escape(k, self.allowReserved)) - buf.WriteRune('=') - buf.WriteString(escape(s, self.allowReserved)) - } else { - buf.WriteString(escape(k, self.allowReserved)) - buf.WriteRune(',') - buf.WriteString(escape(s, self.allowReserved)) - } - } -} - -func struct2map(v interface{}) (map[string]interface{}, bool) { - value := reflect.ValueOf(v) - switch value.Type().Kind() { - case reflect.Ptr: - return struct2map(value.Elem().Interface()) - case reflect.Struct: - m := make(map[string]interface{}) - for i := 0; i < value.NumField(); i++ { - tag := value.Type().Field(i).Tag - var name string - if strings.Contains(string(tag), ":") { - name = tag.Get("uri") - } else { - name = strings.TrimSpace(string(tag)) - } - if len(name) == 0 { - name = value.Type().Field(i).Name - } - m[name] = value.Field(i).Interface() - } - return m, true - } - return nil, false -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/utils.go b/services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/utils.go deleted file mode 100644 index 399ef4623..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/utils.go +++ /dev/null @@ -1,13 +0,0 @@ -package uritemplates - -func Expand(path string, expansions map[string]string) (string, error) { - template, err := Parse(path) - if err != nil { - return "", err - } - values := make(map[string]interface{}) - for k, v := range expansions { - values[k] = v - } - return template.Expand(values) -} diff --git a/services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/utils_test.go b/services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/utils_test.go deleted file mode 100644 index 633949b6f..000000000 --- a/services/templeton/vendor/src/github.com/olivere/elastic/uritemplates/utils_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package uritemplates - -import ( - "testing" -) - -type ExpandTest struct { - in string - expansions map[string]string - want string -} - -var expandTests = []ExpandTest{ - // #0: no expansions - { - "http://www.golang.org/", - map[string]string{}, - "http://www.golang.org/", - }, - // #1: one expansion, no escaping - { - "http://www.golang.org/{bucket}/delete", - map[string]string{ - "bucket": "red", - }, - "http://www.golang.org/red/delete", - }, - // #2: one expansion, with hex escapes - { - "http://www.golang.org/{bucket}/delete", - map[string]string{ - "bucket": "red/blue", - }, - "http://www.golang.org/red%2Fblue/delete", - }, - // #3: one expansion, with space - { - "http://www.golang.org/{bucket}/delete", - map[string]string{ - "bucket": "red or blue", - }, - "http://www.golang.org/red%20or%20blue/delete", - }, - // #4: expansion not found - { - "http://www.golang.org/{object}/delete", - map[string]string{ - "bucket": "red or blue", - }, - "http://www.golang.org//delete", - }, - // #5: multiple expansions - { - "http://www.golang.org/{one}/{two}/{three}/get", - map[string]string{ - "one": "ONE", - "two": "TWO", - "three": "THREE", - }, - "http://www.golang.org/ONE/TWO/THREE/get", - }, - // #6: utf-8 characters - { - "http://www.golang.org/{bucket}/get", - map[string]string{ - "bucket": "£100", - }, - "http://www.golang.org/%C2%A3100/get", - }, - // #7: punctuations - { - "http://www.golang.org/{bucket}/get", - map[string]string{ - "bucket": `/\@:,.*~`, - }, - "http://www.golang.org/%2F%5C%40%3A%2C.%2A~/get", - }, - // #8: mis-matched brackets - { - "http://www.golang.org/{bucket/get", - map[string]string{ - "bucket": "red", - }, - "", - }, - // #9: "+" prefix for suppressing escape - // See also: http://tools.ietf.org/html/rfc6570#section-3.2.3 - { - "http://www.golang.org/{+topic}", - map[string]string{ - "topic": "/topics/myproject/mytopic", - }, - // The double slashes here look weird, but it's intentional - "http://www.golang.org//topics/myproject/mytopic", - }, -} - -func TestExpand(t *testing.T) { - for i, test := range expandTests { - got, _ := Expand(test.in, test.expansions) - if got != test.want { - t.Errorf("got %q expected %q in test %d", got, test.want, i) - } - } -} From 13d53b5acb2e6d3f4c38a48c639df3a2e7ba2dc2 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Mon, 22 Feb 2016 11:06:14 -0700 Subject: [PATCH 091/183] Set batch sizes to 1000 to match infosquito/monkey. --- .../templeton/src/templeton/elasticsearch/elasticsearch.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/services/templeton/src/templeton/elasticsearch/elasticsearch.go b/services/templeton/src/templeton/elasticsearch/elasticsearch.go index ce56bd3b7..231b0deaf 100644 --- a/services/templeton/src/templeton/elasticsearch/elasticsearch.go +++ b/services/templeton/src/templeton/elasticsearch/elasticsearch.go @@ -70,13 +70,13 @@ func (b *BulkIndexer) Flush() error { // PurgeIndex walks an index querying a database, deleting those which should not exist func (e *Elasticer) PurgeIndex(d *database.Databaser) { - indexer := e.NewBulkIndexer(10) + indexer := e.NewBulkIndexer(1000) defer indexer.Flush() } // IndexEverything creates a bulk indexer and takes a database, and iterates to index its contents func (e *Elasticer) IndexEverything(d *database.Databaser) { - indexer := e.NewBulkIndexer(10) + indexer := e.NewBulkIndexer(1000) defer indexer.Flush() cursor, err := d.GetAllObjects() From 55e1d21cdaf1ccba563b863babe9f1736c1cce5c Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Mon, 22 Feb 2016 12:03:07 -0700 Subject: [PATCH 092/183] Add actual logic for purging the index of old/unused items. --- .../templeton/elasticsearch/elasticsearch.go | 36 +++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/services/templeton/src/templeton/elasticsearch/elasticsearch.go b/services/templeton/src/templeton/elasticsearch/elasticsearch.go index 231b0deaf..5ba945f80 100644 --- a/services/templeton/src/templeton/elasticsearch/elasticsearch.go +++ b/services/templeton/src/templeton/elasticsearch/elasticsearch.go @@ -72,6 +72,42 @@ func (b *BulkIndexer) Flush() error { func (e *Elasticer) PurgeIndex(d *database.Databaser) { indexer := e.NewBulkIndexer(1000) defer indexer.Flush() + + scanner, err := e.es.Scan(e.index).Type("metadata").Scroll("1m").Fields("_id").Do() + if err != nil { + logger.Fatal(err) + return + } + + for { + docs, err := scanner.Next() + if err == elastic.EOS { + logger.Print("Finished all rows for purge.") + break + } + if err != nil { + logger.Print(err) + break + } + + if docs.TotalHits() > 0 { + for _, hit := range docs.Hits.Hits { + avus, err := d.GetObjectAVUs(hit.Id) + if err != nil { + logger.Printf("Error processing %s: %s", hit.Id, err) + continue + } + if len(avus) == 0 { + logger.Printf("Deleting %s", hit.Id) + req := elastic.NewBulkDeleteRequest().Index(e.index).Type("metadata").Id(hit.Id) + err = indexer.Add(req) + if err != nil { + logger.Printf("Error enqueuing delete of %s: %s", hit.Id, err) + } + } + } + } + } } // IndexEverything creates a bulk indexer and takes a database, and iterates to index its contents From 91f5ddb4b719b4f9c52f135a8150c579236641a8 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Tue, 23 Feb 2016 11:31:06 -0700 Subject: [PATCH 093/183] Move reindex into templeton/elasticsearch package. Drop use of goroutines. --- .../templeton/src/templeton/elasticsearch/elasticsearch.go | 5 +++++ services/templeton/src/templeton/main.go | 3 +-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/services/templeton/src/templeton/elasticsearch/elasticsearch.go b/services/templeton/src/templeton/elasticsearch/elasticsearch.go index 5ba945f80..1598d3de5 100644 --- a/services/templeton/src/templeton/elasticsearch/elasticsearch.go +++ b/services/templeton/src/templeton/elasticsearch/elasticsearch.go @@ -147,3 +147,8 @@ func (e *Elasticer) IndexEverything(d *database.Databaser) { } } } + +func (e *Elasticer) Reindex(d *database.Databaser) { + e.PurgeIndex(d) + e.IndexEverything(d) +} diff --git a/services/templeton/src/templeton/main.go b/services/templeton/src/templeton/main.go index 308361b3c..737d39e4d 100644 --- a/services/templeton/src/templeton/main.go +++ b/services/templeton/src/templeton/main.go @@ -127,8 +127,7 @@ func main() { if *mode == "full" { logger.Println("Full indexing mode selected.") - go es.PurgeIndex(d) - es.IndexEverything(d) + es.Reindex(d) return } From 340bc5650ed7a0130d566af660cf3df0cdd0904a Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Thu, 3 Mar 2016 12:59:51 -0700 Subject: [PATCH 094/183] Make info-typer amqp QOS value configurable. --- .../roles/util-cfg-service/templates/info-typer.properties.j2 | 1 + services/info-typer/src/info_typer/amqp.clj | 2 +- services/info-typer/src/info_typer/config.clj | 4 ++++ 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/ansible/roles/util-cfg-service/templates/info-typer.properties.j2 b/ansible/roles/util-cfg-service/templates/info-typer.properties.j2 index ffe3118ee..495bbd961 100644 --- a/ansible/roles/util-cfg-service/templates/info-typer.properties.j2 +++ b/ansible/roles/util-cfg-service/templates/info-typer.properties.j2 @@ -25,3 +25,4 @@ info-typer.amqp.exchange = {{ amqp_irods_exchange }} info-typer.amqp.exchange.type = {{ amqp_irods_exchange_type }} info-typer.amqp.exchange.durable = {{ amqp_irods_exchange_durable }} info-typer.amqp.exchange.auto-delete = {{ amqp_irods_exchange_auto_delete }} +info-typer.amqp.qos = 100 diff --git a/services/info-typer/src/info_typer/amqp.clj b/services/info-typer/src/info_typer/amqp.clj index da6dc63fb..6f9edbd8e 100644 --- a/services/info-typer/src/info_typer/amqp.clj +++ b/services/info-typer/src/info_typer/amqp.clj @@ -86,7 +86,7 @@ (log/info "configuring AMQP connection") (let [chan (lch/open (get-connection (connection-map))) q (declare-queue chan (str "info-typer." (cfg/environment-name)))] - (lb/qos chan 100) + (lb/qos chan (cfg/amqp-qos)) (declare-exchange chan (cfg/amqp-exchange) (cfg/amqp-exchange-type) :durable (cfg/amqp-exchange-durable?) :auto-delete (cfg/amqp-exchange-auto-delete?)) (doseq [topic topics] (bind chan q (cfg/amqp-exchange) topic)) diff --git a/services/info-typer/src/info_typer/config.clj b/services/info-typer/src/info_typer/config.clj index 832d61af3..2c27d6545 100644 --- a/services/info-typer/src/info_typer/config.clj +++ b/services/info-typer/src/info_typer/config.clj @@ -150,6 +150,10 @@ [props config-valid configs] "info-typer.amqp.exchange.auto-delete") +(cc/defprop-int amqp-qos + "The number of messages to allow to be delivered to this client at once without acknowledgement." + [props config-valid configs] + "info-typer.amqp.qos") (defn- exception-filters [] From 0386fb58ab2115d9394073aa6a22b464eb88de39 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Thu, 3 Mar 2016 13:03:16 -0700 Subject: [PATCH 095/183] Make dewey amqp QOS value configurable. --- .../roles/util-cfg-service/templates/dewey.properties.j2 | 1 + services/dewey/dev-resources/local.properties | 1 + services/dewey/src/dewey/amq.clj | 8 +++++--- services/dewey/src/dewey/core.clj | 1 + 4 files changed, 8 insertions(+), 3 deletions(-) diff --git a/ansible/roles/util-cfg-service/templates/dewey.properties.j2 b/ansible/roles/util-cfg-service/templates/dewey.properties.j2 index 01177a606..bea2a487d 100644 --- a/ansible/roles/util-cfg-service/templates/dewey.properties.j2 +++ b/ansible/roles/util-cfg-service/templates/dewey.properties.j2 @@ -7,6 +7,7 @@ dewey.amqp.password = {{ amqp_password }} dewey.amqp.exchange.name = {{ amqp_irods_exchange }} dewey.amqp.exchange.durable = {{ amqp_irods_exchange_durable }} dewey.amqp.exchange.auto-delete = {{ amqp_irods_exchange_auto_delete }} +dewey.amqp.qos = 100 dewey.es.host = {{ elasticsearch.host }} dewey.es.port = {{ elasticsearch.port }} diff --git a/services/dewey/dev-resources/local.properties b/services/dewey/dev-resources/local.properties index db2cf7f90..e77788bb5 100644 --- a/services/dewey/dev-resources/local.properties +++ b/services/dewey/dev-resources/local.properties @@ -16,5 +16,6 @@ dewey.irods.user = rods dewey.irods.password = rods dewey.irods.default-resource = "" dewey.irods.home = /iplant/home/rods +dewey.amqp.qos = 100 dewey.status.listen-port = 8080 diff --git a/services/dewey/src/dewey/amq.clj b/services/dewey/src/dewey/amq.clj index 83918b70a..aa8ea9798 100644 --- a/services/dewey/src/dewey/amq.clj +++ b/services/dewey/src/dewey/amq.clj @@ -25,7 +25,7 @@ (defn- consume - [connection queue exchange-name exchange-durable exchange-auto-delete topics delivery-fn] + [connection queue exchange-name exchange-durable exchange-auto-delete qos topics delivery-fn] (let [channel (lch/open connection) consumer (lc/create-default channel :handle-consume-ok-fn (fn [_] (log/info "Registered with AMQP broker")) @@ -39,7 +39,7 @@ exchange-auto-delete topics delivery-fn)))] - (lb/qos channel 100) + (lb/qos channel qos) (le/topic channel exchange-name :durable exchange-durable :auto-delete exchange-auto-delete) (lq/declare channel queue :durable true) (doseq [topic topics] (lq/bind channel queue exchange-name :routing-key topic)) @@ -60,13 +60,14 @@ exchange-durable - a flag indicating whether or not the exchange preserves messages exchange-auto-delete - a flag indicating whether or not the exchange is deleted when all queues have been dettached + qos - a number of messages to allow to be sent to this client without acknowledgement consumer-fn - the function that will receive the JSON document topics - Optionally, a list of topics to listen for Throws: It will throw an exception if it fails to connect to the AMQP broker, setup the exchange, or setup the queue." - [host port user password queue-name exchange-name exchange-durable exchange-auto-delete consumer-fn & topics] + [host port user password queue-name exchange-name exchange-durable exchange-auto-delete qos consumer-fn & topics] (consume (rmq/connect {:host host :port port :username user @@ -76,5 +77,6 @@ exchange-name exchange-durable exchange-auto-delete + qos (if (empty? topics) "#" topics) (mk-handler consumer-fn))) diff --git a/services/dewey/src/dewey/core.clj b/services/dewey/src/dewey/core.clj index 69941ad72..8d637dd71 100644 --- a/services/dewey/src/dewey/core.clj +++ b/services/dewey/src/dewey/core.clj @@ -57,6 +57,7 @@ (get props "dewey.amqp.exchange.name") (Boolean. (get props "dewey.amqp.exchange.durable")) (Boolean. (get props "dewey.amqp.exchange.auto-delete")) + (Integer. (get props "dewey.amqp.qos")) (partial curation/consume-msg irods-cfg es) "data-object.#" "collection.#") From 93f904b58566d028a98dc4be378a4af664a3627e Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Thu, 3 Mar 2016 13:33:13 -0700 Subject: [PATCH 096/183] CORE-7445 add logging for debugging. --- .../views/dialogs/SimpleFileUploadDialog.java | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/diskResource/client/views/dialogs/SimpleFileUploadDialog.java b/ui/de-lib/src/main/java/org/iplantc/de/diskResource/client/views/dialogs/SimpleFileUploadDialog.java index 91ed3da3a..022818ad0 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/diskResource/client/views/dialogs/SimpleFileUploadDialog.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/diskResource/client/views/dialogs/SimpleFileUploadDialog.java @@ -55,6 +55,8 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; +import java.util.logging.Level; +import java.util.logging.Logger; /** * @author jstroot @@ -115,6 +117,8 @@ interface SimpleFileUploadPanelUiBinder extends UiBinder"); + public SimpleFileUploadDialog(final HasPath uploadDest, final DiskResourceServiceFacade drService, final EventBus eventBus, @@ -252,6 +256,7 @@ void onSubmitComplete(SubmitCompleteEvent event) { IPCFileUploadField field = fufList.get(formList.indexOf(event.getSource())); String results2 = event.getResults(); GWT.log("upload result->" + results2); + LOG.log(Level.SEVERE, "\nUpload result -->" + results2 + "<----\n"); if (Strings.isNullOrEmpty(results2)) { IplantAnnouncer.getInstance() .schedule(new ErrorAnnouncementConfig(appearance.fileUploadsFailed(Lists.newArrayList( @@ -260,17 +265,22 @@ void onSubmitComplete(SubmitCompleteEvent event) { String results = Format.stripTags(results2); Splittable split = StringQuoter.split(results); + LOG.log(Level.SEVERE, "\nUpload split -->" + results + "<---\n"); + if (split == null) { + LOG.log(Level.SEVERE, "\n--->Upload split null-->\n"); IplantAnnouncer.getInstance() .schedule(new ErrorAnnouncementConfig(appearance.fileUploadsFailed(Lists.newArrayList( field.getValue())))); } else { if (split.isUndefined("file") || (split.get("file") == null)) { + LOG.log(Level.SEVERE, "\n--->Upload split file empty-->\n"); field.markInvalid(appearance.fileUploadsFailed(Lists.newArrayList(field.getValue()))); IplantAnnouncer.getInstance() .schedule(new ErrorAnnouncementConfig(appearance.fileUploadsFailed( Lists.newArrayList(field.getValue())))); } else { + LOG.log(Level.SEVERE, "\n--->Upload split not empty -->\n"); eventBus.fireEvent(new FileUploadedEvent(uploadDest, field.getValue(), results)); } } @@ -369,8 +379,10 @@ public void onSubmit(SubmitEvent event) { }); try { form.submit(); + LOG.log(Level.SEVERE, "\nUpload submitted!\n"); } catch(Exception e ) { - GWT.log("expcetion on submit" + e.getMessage()); + GWT.log("\nxpcetion on submit\n" + e.getMessage()); + LOG.log(Level.SEVERE, "\nUpload exception!\n"); IplantAnnouncer.getInstance() .schedule(new ErrorAnnouncementConfig(appearance.fileUploadsFailed( Lists.newArrayList(field.getValue())))); From 909b75b52b80700e355966bf1268f5f062e343da Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Tue, 23 Feb 2016 11:10:22 -0700 Subject: [PATCH 097/183] Add messaging package copied-with-slight-modification from jobservices. --- services/templeton/src/messaging/amqp.go | 203 + services/templeton/vendor/manifest | 6 + .../src/github.com/streadway/amqp/LICENSE | 23 + .../src/github.com/streadway/amqp/README.md | 81 + .../streadway/amqp/_examples/pubsub/pubsub.go | 230 ++ .../_examples/simple-consumer/consumer.go | 169 + .../_examples/simple-producer/producer.go | 112 + .../github.com/streadway/amqp/allocator.go | 106 + .../streadway/amqp/allocator_test.go | 90 + .../src/github.com/streadway/amqp/auth.go | 44 + .../src/github.com/streadway/amqp/certs.sh | 159 + .../src/github.com/streadway/amqp/channel.go | 1557 ++++++++ .../github.com/streadway/amqp/client_test.go | 603 +++ .../src/github.com/streadway/amqp/confirms.go | 93 + .../streadway/amqp/confirms_test.go | 119 + .../github.com/streadway/amqp/connection.go | 769 ++++ .../github.com/streadway/amqp/consumers.go | 118 + .../src/github.com/streadway/amqp/delivery.go | 173 + .../streadway/amqp/delivery_test.go | 33 + .../src/github.com/streadway/amqp/doc.go | 108 + .../streadway/amqp/examples_test.go | 393 ++ .../src/github.com/streadway/amqp/fuzz.go | 16 + .../src/github.com/streadway/amqp/gen.sh | 2 + .../streadway/amqp/integration_test.go | 1796 +++++++++ .../src/github.com/streadway/amqp/read.go | 447 +++ .../github.com/streadway/amqp/read_test.go | 22 + .../streadway/amqp/reconnect_test.go | 113 + .../src/github.com/streadway/amqp/return.go | 64 + .../github.com/streadway/amqp/shared_test.go | 71 + .../amqp/spec/amqp0-9-1.stripped.extended.xml | 537 +++ .../src/github.com/streadway/amqp/spec/gen.go | 536 +++ .../src/github.com/streadway/amqp/spec091.go | 3306 +++++++++++++++++ .../src/github.com/streadway/amqp/tls_test.go | 218 ++ .../src/github.com/streadway/amqp/types.go | 390 ++ .../src/github.com/streadway/amqp/uri.go | 170 + .../src/github.com/streadway/amqp/uri_test.go | 328 ++ .../src/github.com/streadway/amqp/write.go | 411 ++ 37 files changed, 13616 insertions(+) create mode 100644 services/templeton/src/messaging/amqp.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/LICENSE create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/README.md create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/_examples/pubsub/pubsub.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/_examples/simple-consumer/consumer.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/_examples/simple-producer/producer.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/allocator.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/allocator_test.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/auth.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/certs.sh create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/channel.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/client_test.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/confirms.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/confirms_test.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/connection.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/consumers.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/delivery.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/delivery_test.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/doc.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/examples_test.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/fuzz.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/gen.sh create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/integration_test.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/read.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/read_test.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/reconnect_test.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/return.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/shared_test.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/spec/amqp0-9-1.stripped.extended.xml create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/spec/gen.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/spec091.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/tls_test.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/types.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/uri.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/uri_test.go create mode 100644 services/templeton/vendor/src/github.com/streadway/amqp/write.go diff --git a/services/templeton/src/messaging/amqp.go b/services/templeton/src/messaging/amqp.go new file mode 100644 index 000000000..504a1898e --- /dev/null +++ b/services/templeton/src/messaging/amqp.go @@ -0,0 +1,203 @@ +package messaging + +import ( + "logcabin" + "math/rand" + "os" + "time" + + "github.com/streadway/amqp" +) + +var logger = logcabin.New() + +// MessageHandler defines a type for amqp.Delivery handlers. +type MessageHandler func(amqp.Delivery) + +type aggregationMessage struct { + handler MessageHandler + delivery *amqp.Delivery +} + +type consumer struct { + exchange string + queue string + key string + handler MessageHandler +} + +type consumeradder struct { + consumer consumer + latch chan int +} + +type publisher struct { + exchange string + channel *amqp.Channel +} + +// Client encapsulates the information needed to interact via AMQP. +type Client struct { + uri string + connection *amqp.Connection + aggregationChan chan aggregationMessage + errors chan *amqp.Error + consumers []*consumer + consumersChan chan consumeradder + publisher *publisher + Reconnect bool +} + +// NewClient returns a new *Client. It will block until the connection succeeds. +func NewClient(uri string, reconnect bool) (*Client, error) { + c := &Client{} + randomizer := rand.New(rand.NewSource(time.Now().UnixNano())) + c.uri = uri + c.Reconnect = reconnect + logger.Println("Attempting AMQP connection...") + var connection *amqp.Connection + var err error + if c.Reconnect { + for { + connection, err = amqp.Dial(c.uri) + if err != nil { + logger.Print(err) + waitFor := randomizer.Intn(10) + logger.Printf("Re-attempting connection in %d seconds", waitFor) + time.Sleep(time.Duration(waitFor) * time.Second) + } else { + logger.Println("Successfully connected to the AMQP broker") + break + } + } + } else { + connection, err = amqp.Dial(c.uri) + if err != nil { + return nil, err + } + logger.Println("Successfully connected to the AMQP broker") + } + c.connection = connection + c.consumersChan = make(chan consumeradder) + c.aggregationChan = make(chan aggregationMessage) + c.errors = c.connection.NotifyClose(make(chan *amqp.Error)) + return c, nil +} + +// Listen will wait for messages and pass them off to handlers, which run in +// their own goroutine. +func (c *Client) Listen() { + var consumers []*consumer + // init := func() { + // for _, cs := range c.consumers { + // c.initconsumer(cs) + // } + // } + // init() + // for _, cs := range c.consumers { + // consumers = append(consumers, cs) + // } + for { + select { + case cs := <-c.consumersChan: + logger.Println("A new consumer is being added") + c.initconsumer(&cs.consumer) + consumers = append(consumers, &cs.consumer) + logger.Println("Done adding a new consumer") + cs.latch <- 1 + case err := <-c.errors: + logger.Printf("An error in the connection to the AMQP broker occurred:\n%s", err) + if c.Reconnect { + c, _ = NewClient(c.uri, c.Reconnect) + c.consumers = consumers + for _, cs := range c.consumers { + c.initconsumer(cs) + } + // init() + } else { + os.Exit(-1) + } + case msg := <-c.aggregationChan: + go func() { + msg.handler(*msg.delivery) + }() + } + } +} + +// Close closes the connection to the AMQP broker. +func (c *Client) Close() { + c.connection.Close() +} + +// AddConsumer adds a consumer to the list of consumers that need to be created +// each time the client is set up. Note that this just adds the consumers to a +// list, it doesn't actually start handling messages yet. You need to call +// Listen() for that. +func (c *Client) AddConsumer(exchange, queue, key string, handler MessageHandler) { + cs := consumer{ + exchange: exchange, + queue: queue, + key: key, + handler: handler, + } + adder := consumeradder{ + consumer: cs, + latch: make(chan int), + } + c.consumersChan <- adder + <-adder.latch +} + +func (c *Client) initconsumer(cs *consumer) error { + channel, err := c.connection.Channel() + if err != nil { + return err + } + err = channel.ExchangeDeclare( + cs.exchange, //name + "topic", //kind + true, //durable + false, //auto-delete + false, //internal + false, //no-wait + nil, //args + ) + _, err = channel.QueueDeclare( + cs.queue, + true, //durable + false, //auto-delete + false, //internal + false, //no-wait + nil, //args + ) + err = channel.QueueBind( + cs.queue, + cs.key, + cs.exchange, + false, //no-wait + nil, //args + ) + + d, err := channel.Consume( + cs.queue, + "", //consumer tag - auto-assigned in this case + false, //auto-ack + false, //exclusive + false, //no-local + false, //no-wait + nil, //args + ) + if err != nil { + return err + } + go func() { + for msg := range d { + c.aggregationChan <- aggregationMessage{ + handler: cs.handler, + delivery: &msg, + } + } + }() + return err +} diff --git a/services/templeton/vendor/manifest b/services/templeton/vendor/manifest index 13beb5830..b04f79d82 100644 --- a/services/templeton/vendor/manifest +++ b/services/templeton/vendor/manifest @@ -13,6 +13,12 @@ "revision": "e3edea7d68b76222b5118cc2e1cf3825e30abb80", "branch": "master" }, + { + "importpath": "github.com/streadway/amqp", + "repository": "https://github.com/streadway/amqp", + "revision": "b4f3ceab0337f013208d31348b578d83c0064744", + "branch": "master" + }, { "importpath": "gopkg.in/olivere/elastic.v3", "repository": "https://gopkg.in/olivere/elastic.v3", diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/LICENSE b/services/templeton/vendor/src/github.com/streadway/amqp/LICENSE new file mode 100644 index 000000000..243c0ce7c --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright notice, this +list of conditions and the following disclaimer in the documentation and/or +other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/README.md b/services/templeton/vendor/src/github.com/streadway/amqp/README.md new file mode 100644 index 000000000..7869af81e --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/README.md @@ -0,0 +1,81 @@ +# AMQP + +AMQP 0.9.1 client with RabbitMQ extensions in Go. + +# Status + +*Beta* + +[![Build Status](https://secure.travis-ci.org/streadway/amqp.png)](http://travis-ci.org/streadway/amqp) + +API changes unlikely and will be discussed on [Github +issues](https://github.com/streadway/amqp/issues) along with any bugs or +enhancements. + +# Goals + +Provide a functional interface that closely represents the AMQP 0.9.1 model +targeted to RabbitMQ as a server. This includes the minimum necessary to +interact the semantics of the protocol. + +# Non-goals + +Things not intended to be supported. + + * Auto reconnect and re-synchronization of client and server topologies. + * Reconnection would require understanding the error paths when the + topology cannot be declared on reconnect. This would require a new set + of types and code paths that are best suited at the call-site of this + package. AMQP has a dynamic topology that needs all peers to agree. If + this doesn't happen, the behavior is undefined. Instead of producing a + possible interface with undefined behavior, this package is designed to + be simple for the caller to implement the necessary connection-time + topology declaration so that reconnection is trivial and encapsulated in + the caller's application code. + * AMQP Protocol negotiation for forward or backward compatibility. + * 0.9.1 is stable and widely deployed. Versions 0.10 and 1.0 are divergent + specifications that change the semantics and wire format of the protocol. + We will accept patches for other protocol support but have no plans for + implementation ourselves. + * Anything other than PLAIN and EXTERNAL authentication mechanisms. + * Keeping the mechanisms interface modular makes it possible to extend + outside of this package. If other mechanisms prove to be popular, then + we would accept patches to include them in this pacakge. + +# Usage + +See the 'examples' subdirectory for simple producers and consumers executables. +If you have a use-case in mind which isn't well-represented by the examples, +please file an issue. + +# Documentation + +Use [Godoc documentation](http://godoc.org/github.com/streadway/amqp) for +reference and usage. + +[RabbitMQ tutorials in +Go](https://github.com/rabbitmq/rabbitmq-tutorials/tree/master/go) are also +available. + +# Contributing + +Pull requests are very much welcomed. Create your pull request on a non-master +branch, make sure a test or example is included that covers your change and +your commits represent coherent changes that include a reason for the change. + +To run the integration tests, make sure you have RabbitMQ running on any host, +export the environment variable `AMQP_URL=amqp://host/` and run `go test -tags +integration`. TravisCI will also run the integration tests. + +Thanks to the [community of contributors](https://github.com/streadway/amqp/graphs/contributors). + +# External packages + + * Google App Engine Dialer support: [https://github.com/soundtrackyourbrand/gaeamqp](https://github.com/soundtrackyourbrand/gaeamqp) + * RabbitMQ examples in Go: [https://github.com/rabbitmq/rabbitmq-tutorials/tree/master/go](https://github.com/rabbitmq/rabbitmq-tutorials/tree/master/go) + +# License + +BSD 2 clause - see LICENSE for more details. + + diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/_examples/pubsub/pubsub.go b/services/templeton/vendor/src/github.com/streadway/amqp/_examples/pubsub/pubsub.go new file mode 100644 index 000000000..29b4a53a3 --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/_examples/pubsub/pubsub.go @@ -0,0 +1,230 @@ +// Command pubsub is an example of a fanout exchange with dynamic reliable +// membership, reading from stdin, writing to stdout. +// +// This example shows how to implement reconnect logic independent from a +// publish/subscribe loop with bridges to application types. + +package main + +import ( + "bufio" + "crypto/sha1" + "flag" + "fmt" + "io" + "log" + "os" + + "github.com/streadway/amqp" + "golang.org/x/net/context" +) + +var url = flag.String("url", "amqp:///", "AMQP url for both the publisher and subscriber") + +// exchange binds the publishers to the subscribers +const exchange = "pubsub" + +// message is the application type for a message. This can contain identity, +// or a reference to the recevier chan for further demuxing. +type message []byte + +// session composes an amqp.Connection with an amqp.Channel +type session struct { + *amqp.Connection + *amqp.Channel +} + +// Close tears the connection down, taking the channel with it. +func (s session) Close() error { + if s.Connection == nil { + return nil + } + return s.Connection.Close() +} + +// redial continually connects to the URL, exiting the program when no longer possible +func redial(ctx context.Context, url string) chan chan session { + sessions := make(chan chan session) + + go func() { + sess := make(chan session) + defer close(sessions) + + for { + select { + case sessions <- sess: + case <-ctx.Done(): + log.Println("shutting down session factory") + return + } + + conn, err := amqp.Dial(url) + if err != nil { + log.Fatalf("cannot (re)dial: %v: %q", err, url) + } + + ch, err := conn.Channel() + if err != nil { + log.Fatalf("cannot create channel: %v", err) + } + + if err := ch.ExchangeDeclare(exchange, "fanout", false, true, false, false, nil); err != nil { + log.Fatalf("cannot declare fanout exchange: %v", err) + } + + select { + case sess <- session{conn, ch}: + case <-ctx.Done(): + log.Println("shutting down new session") + return + } + } + }() + + return sessions +} + +// publish publishes messages to a reconnecting session to a fanout exchange. +// It receives from the application specific source of messages. +func publish(sessions chan chan session, messages <-chan message) { + var ( + running bool + reading = messages + pending = make(chan message, 1) + confirm = make(chan amqp.Confirmation, 1) + ) + + for session := range sessions { + pub := <-session + + // publisher confirms for this channel/connection + if err := pub.Confirm(false); err != nil { + log.Printf("publisher confirms not supported") + close(confirm) // confirms not supported, simulate by always nacking + } else { + pub.NotifyPublish(confirm) + } + + log.Printf("publishing...") + + for { + var body message + select { + case confirmed := <-confirm: + if !confirmed.Ack { + log.Printf("nack message %d, body: %q", confirmed.DeliveryTag, string(body)) + } + reading = messages + + case body = <-pending: + routingKey := "ignored for fanout exchanges, application dependent for other exchanges" + err := pub.Publish(exchange, routingKey, false, false, amqp.Publishing{ + Body: body, + }) + // Retry failed delivery on the next session + if err != nil { + pending <- body + pub.Close() + break + } + + case body, running = <-reading: + // all messages consumed + if !running { + return + } + // work on pending delivery until ack'd + pending <- body + reading = nil + } + } + } +} + +// identity returns the same host/process unique string for the lifetime of +// this process so that subscriber reconnections reuse the same queue name. +func identity() string { + hostname, err := os.Hostname() + h := sha1.New() + fmt.Fprint(h, hostname) + fmt.Fprint(h, err) + fmt.Fprint(h, os.Getpid()) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +// subscribe consumes deliveries from an exclusive queue from a fanout exchange and sends to the application specific messages chan. +func subscribe(sessions chan chan session, messages chan<- message) { + queue := identity() + + for session := range sessions { + sub := <-session + + if _, err := sub.QueueDeclare(queue, false, true, true, false, nil); err != nil { + log.Printf("cannot consume from exclusive queue: %q, %v", queue, err) + return + } + + routingKey := "application specific routing key for fancy toplogies" + if err := sub.QueueBind(queue, routingKey, exchange, false, nil); err != nil { + log.Printf("cannot consume without a binding to exchange: %q, %v", exchange, err) + return + } + + deliveries, err := sub.Consume(queue, "", false, true, false, false, nil) + if err != nil { + log.Printf("cannot consume from: %q, %v", queue, err) + return + } + + log.Printf("subscribed...") + + for msg := range deliveries { + messages <- message(msg.Body) + sub.Ack(msg.DeliveryTag, false) + } + } +} + +// read is this application's translation to the message format, scanning from +// stdin. +func read(r io.Reader) <-chan message { + lines := make(chan message) + go func() { + defer close(lines) + scan := bufio.NewScanner(r) + for scan.Scan() { + lines <- message(scan.Bytes()) + } + }() + return lines +} + +// write is this application's subscriber of application messages, printing to +// stdout. +func write(w io.Writer) chan<- message { + lines := make(chan message) + go func() { + for line := range lines { + fmt.Fprintln(w, string(line)) + } + }() + return lines +} + +func main() { + flag.Parse() + + ctx, done := context.WithCancel(context.Background()) + + go func() { + publish(redial(ctx, *url), read(os.Stdin)) + done() + }() + + go func() { + subscribe(redial(ctx, *url), write(os.Stdout)) + done() + }() + + <-ctx.Done() +} diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/_examples/simple-consumer/consumer.go b/services/templeton/vendor/src/github.com/streadway/amqp/_examples/simple-consumer/consumer.go new file mode 100644 index 000000000..b1c2180db --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/_examples/simple-consumer/consumer.go @@ -0,0 +1,169 @@ +// This example declares a durable Exchange, an ephemeral (auto-delete) Queue, +// binds the Queue to the Exchange with a binding key, and consumes every +// message published to that Exchange with that routing key. +// +package main + +import ( + "flag" + "fmt" + "github.com/streadway/amqp" + "log" + "time" +) + +var ( + uri = flag.String("uri", "amqp://guest:guest@localhost:5672/", "AMQP URI") + exchange = flag.String("exchange", "test-exchange", "Durable, non-auto-deleted AMQP exchange name") + exchangeType = flag.String("exchange-type", "direct", "Exchange type - direct|fanout|topic|x-custom") + queue = flag.String("queue", "test-queue", "Ephemeral AMQP queue name") + bindingKey = flag.String("key", "test-key", "AMQP binding key") + consumerTag = flag.String("consumer-tag", "simple-consumer", "AMQP consumer tag (should not be blank)") + lifetime = flag.Duration("lifetime", 5*time.Second, "lifetime of process before shutdown (0s=infinite)") +) + +func init() { + flag.Parse() +} + +func main() { + c, err := NewConsumer(*uri, *exchange, *exchangeType, *queue, *bindingKey, *consumerTag) + if err != nil { + log.Fatalf("%s", err) + } + + if *lifetime > 0 { + log.Printf("running for %s", *lifetime) + time.Sleep(*lifetime) + } else { + log.Printf("running forever") + select {} + } + + log.Printf("shutting down") + + if err := c.Shutdown(); err != nil { + log.Fatalf("error during shutdown: %s", err) + } +} + +type Consumer struct { + conn *amqp.Connection + channel *amqp.Channel + tag string + done chan error +} + +func NewConsumer(amqpURI, exchange, exchangeType, queueName, key, ctag string) (*Consumer, error) { + c := &Consumer{ + conn: nil, + channel: nil, + tag: ctag, + done: make(chan error), + } + + var err error + + log.Printf("dialing %q", amqpURI) + c.conn, err = amqp.Dial(amqpURI) + if err != nil { + return nil, fmt.Errorf("Dial: %s", err) + } + + go func() { + fmt.Printf("closing: %s", <-c.conn.NotifyClose(make(chan *amqp.Error))) + }() + + log.Printf("got Connection, getting Channel") + c.channel, err = c.conn.Channel() + if err != nil { + return nil, fmt.Errorf("Channel: %s", err) + } + + log.Printf("got Channel, declaring Exchange (%q)", exchange) + if err = c.channel.ExchangeDeclare( + exchange, // name of the exchange + exchangeType, // type + true, // durable + false, // delete when complete + false, // internal + false, // noWait + nil, // arguments + ); err != nil { + return nil, fmt.Errorf("Exchange Declare: %s", err) + } + + log.Printf("declared Exchange, declaring Queue %q", queueName) + queue, err := c.channel.QueueDeclare( + queueName, // name of the queue + true, // durable + false, // delete when usused + false, // exclusive + false, // noWait + nil, // arguments + ) + if err != nil { + return nil, fmt.Errorf("Queue Declare: %s", err) + } + + log.Printf("declared Queue (%q %d messages, %d consumers), binding to Exchange (key %q)", + queue.Name, queue.Messages, queue.Consumers, key) + + if err = c.channel.QueueBind( + queue.Name, // name of the queue + key, // bindingKey + exchange, // sourceExchange + false, // noWait + nil, // arguments + ); err != nil { + return nil, fmt.Errorf("Queue Bind: %s", err) + } + + log.Printf("Queue bound to Exchange, starting Consume (consumer tag %q)", c.tag) + deliveries, err := c.channel.Consume( + queue.Name, // name + c.tag, // consumerTag, + false, // noAck + false, // exclusive + false, // noLocal + false, // noWait + nil, // arguments + ) + if err != nil { + return nil, fmt.Errorf("Queue Consume: %s", err) + } + + go handle(deliveries, c.done) + + return c, nil +} + +func (c *Consumer) Shutdown() error { + // will close() the deliveries channel + if err := c.channel.Cancel(c.tag, true); err != nil { + return fmt.Errorf("Consumer cancel failed: %s", err) + } + + if err := c.conn.Close(); err != nil { + return fmt.Errorf("AMQP connection close error: %s", err) + } + + defer log.Printf("AMQP shutdown OK") + + // wait for handle() to exit + return <-c.done +} + +func handle(deliveries <-chan amqp.Delivery, done chan error) { + for d := range deliveries { + log.Printf( + "got %dB delivery: [%v] %q", + len(d.Body), + d.DeliveryTag, + d.Body, + ) + d.Ack(false) + } + log.Printf("handle: deliveries channel closed") + done <- nil +} diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/_examples/simple-producer/producer.go b/services/templeton/vendor/src/github.com/streadway/amqp/_examples/simple-producer/producer.go new file mode 100644 index 000000000..199868398 --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/_examples/simple-producer/producer.go @@ -0,0 +1,112 @@ +// This example declares a durable Exchange, and publishes a single message to +// that Exchange with a given routing key. +// +package main + +import ( + "flag" + "fmt" + "log" + + "github.com/streadway/amqp" +) + +var ( + uri = flag.String("uri", "amqp://guest:guest@localhost:5672/", "AMQP URI") + exchangeName = flag.String("exchange", "test-exchange", "Durable AMQP exchange name") + exchangeType = flag.String("exchange-type", "direct", "Exchange type - direct|fanout|topic|x-custom") + routingKey = flag.String("key", "test-key", "AMQP routing key") + body = flag.String("body", "foobar", "Body of message") + reliable = flag.Bool("reliable", true, "Wait for the publisher confirmation before exiting") +) + +func init() { + flag.Parse() +} + +func main() { + if err := publish(*uri, *exchangeName, *exchangeType, *routingKey, *body, *reliable); err != nil { + log.Fatalf("%s", err) + } + log.Printf("published %dB OK", len(*body)) +} + +func publish(amqpURI, exchange, exchangeType, routingKey, body string, reliable bool) error { + + // This function dials, connects, declares, publishes, and tears down, + // all in one go. In a real service, you probably want to maintain a + // long-lived connection as state, and publish against that. + + log.Printf("dialing %q", amqpURI) + connection, err := amqp.Dial(amqpURI) + if err != nil { + return fmt.Errorf("Dial: %s", err) + } + defer connection.Close() + + log.Printf("got Connection, getting Channel") + channel, err := connection.Channel() + if err != nil { + return fmt.Errorf("Channel: %s", err) + } + + log.Printf("got Channel, declaring %q Exchange (%q)", exchangeType, exchange) + if err := channel.ExchangeDeclare( + exchange, // name + exchangeType, // type + true, // durable + false, // auto-deleted + false, // internal + false, // noWait + nil, // arguments + ); err != nil { + return fmt.Errorf("Exchange Declare: %s", err) + } + + // Reliable publisher confirms require confirm.select support from the + // connection. + if reliable { + log.Printf("enabling publishing confirms.") + if err := channel.Confirm(false); err != nil { + return fmt.Errorf("Channel could not be put into confirm mode: %s", err) + } + + confirms := channel.NotifyPublish(make(chan amqp.Confirmation, 1)) + + defer confirmOne(confirms) + } + + log.Printf("declared Exchange, publishing %dB body (%q)", len(body), body) + if err = channel.Publish( + exchange, // publish to an exchange + routingKey, // routing to 0 or more queues + false, // mandatory + false, // immediate + amqp.Publishing{ + Headers: amqp.Table{}, + ContentType: "text/plain", + ContentEncoding: "", + Body: []byte(body), + DeliveryMode: amqp.Transient, // 1=non-persistent, 2=persistent + Priority: 0, // 0-9 + // a bunch of application/implementation-specific fields + }, + ); err != nil { + return fmt.Errorf("Exchange Publish: %s", err) + } + + return nil +} + +// One would typically keep a channel of publishings, a sequence number, and a +// set of unacknowledged sequence numbers and loop until the publishing channel +// is closed. +func confirmOne(confirms <-chan amqp.Confirmation) { + log.Printf("waiting for confirmation of one publishing") + + if confirmed := <-confirms; confirmed.Ack { + log.Printf("confirmed delivery with delivery tag: %d", confirmed.DeliveryTag) + } else { + log.Printf("failed delivery of delivery tag: %d", confirmed.DeliveryTag) + } +} diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/allocator.go b/services/templeton/vendor/src/github.com/streadway/amqp/allocator.go new file mode 100644 index 000000000..928418826 --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/allocator.go @@ -0,0 +1,106 @@ +package amqp + +import ( + "bytes" + "fmt" + "math/big" +) + +const ( + free = 0 + allocated = 1 +) + +// allocator maintains a bitset of allocated numbers. +type allocator struct { + pool *big.Int + last int + low int + high int +} + +// NewAllocator reserves and frees integers out of a range between low and +// high. +// +// O(N) worst case space used, where N is maximum allocated, divided by +// sizeof(big.Word) +func newAllocator(low, high int) *allocator { + return &allocator{ + pool: big.NewInt(0), + last: low, + low: low, + high: high, + } +} + +// String returns a string describing the contents of the allocator like +// "allocator[low..high] reserved..until" +// +// O(N) where N is high-low +func (a allocator) String() string { + b := &bytes.Buffer{} + fmt.Fprintf(b, "allocator[%d..%d]", a.low, a.high) + + for low := a.low; low <= a.high; low++ { + high := low + for a.reserved(high) && high <= a.high { + high++ + } + + if high > low+1 { + fmt.Fprintf(b, " %d..%d", low, high-1) + } else if high > low { + fmt.Fprintf(b, " %d", high-1) + } + + low = high + } + return b.String() +} + +// Next reserves and returns the next available number out of the range between +// low and high. If no number is available, false is returned. +// +// O(N) worst case runtime where N is allocated, but usually O(1) due to a +// rolling index into the oldest allocation. +func (a *allocator) next() (int, bool) { + wrapped := a.last + + // Find trailing bit + for ; a.last <= a.high; a.last++ { + if a.reserve(a.last) { + return a.last, true + } + } + + // Find preceeding free'd pool + a.last = a.low + + for ; a.last < wrapped; a.last++ { + if a.reserve(a.last) { + return a.last, true + } + } + + return 0, false +} + +// reserve claims the bit if it is not already claimed, returning true if +// succesfully claimed. +func (a *allocator) reserve(n int) bool { + if a.reserved(n) { + return false + } + a.pool.SetBit(a.pool, n-a.low, allocated) + return true +} + +// reserved returns true if the integer has been allocated +func (a *allocator) reserved(n int) bool { + return a.pool.Bit(n-a.low) == allocated +} + +// release frees the use of the number for another allocation +func (a *allocator) release(n int) { + a.pool.SetBit(a.pool, n-a.low, free) +} diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/allocator_test.go b/services/templeton/vendor/src/github.com/streadway/amqp/allocator_test.go new file mode 100644 index 000000000..2d6fd5dba --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/allocator_test.go @@ -0,0 +1,90 @@ +package amqp + +import ( + "math/rand" + "testing" +) + +func TestAllocatorFirstShouldBeTheLow(t *testing.T) { + n, ok := newAllocator(1, 2).next() + if !ok { + t.Fatalf("expected to allocate between 1 and 2") + } + + if want, got := 1, n; want != got { + t.Fatalf("expected to first allocation to be 1") + } +} + +func TestAllocatorShouldBeBoundByHigh(t *testing.T) { + a := newAllocator(1, 2) + + if n, ok := a.next(); n != 1 || !ok { + t.Fatalf("expected to allocate between 1 and 2, got %d, %v", n, ok) + } + if n, ok := a.next(); n != 2 || !ok { + t.Fatalf("expected to allocate between 1 and 2, got %d, %v", n, ok) + } + if _, ok := a.next(); ok { + t.Fatalf("expected not to allocate outside of 1 and 2") + } +} + +func TestAllocatorStringShouldIncludeAllocatedRanges(t *testing.T) { + a := newAllocator(1, 10) + a.reserve(1) + a.reserve(2) + a.reserve(3) + a.reserve(5) + a.reserve(6) + a.reserve(8) + a.reserve(10) + + if want, got := "allocator[1..10] 1..3 5..6 8 10", a.String(); want != got { + t.Fatalf("expected String of %q, got %q", want, got) + } +} + +func TestAllocatorShouldReuseReleased(t *testing.T) { + a := newAllocator(1, 2) + + first, _ := a.next() + if want, got := 1, first; want != got { + t.Fatalf("expected allocation to be %d, got: %d", want, got) + } + + second, _ := a.next() + if want, got := 2, second; want != got { + t.Fatalf("expected allocation to be %d, got: %d", want, got) + } + + a.release(first) + + third, _ := a.next() + if want, got := first, third; want != got { + t.Fatalf("expected third allocation to be %d, got: %d", want, got) + } + + _, ok := a.next() + if want, got := false, ok; want != got { + t.Fatalf("expected fourth allocation to saturate the pool") + } +} + +func TestAllocatorReleasesKeepUpWithAllocationsForAllSizes(t *testing.T) { + const runs = 5 + const max = 13 + + for lim := 1; lim < 2<= lim { // fills the allocator + a.release(int(rand.Int63n(int64(lim)))) + } + if _, ok := a.next(); !ok { + t.Fatalf("expected %d runs of random release of size %d not to fail on allocation %d", runs, lim, i) + } + } + } +} diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/auth.go b/services/templeton/vendor/src/github.com/streadway/amqp/auth.go new file mode 100644 index 000000000..bff7d7948 --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/auth.go @@ -0,0 +1,44 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "fmt" +) + +// Authentication interface provides a means for different SASL authentication +// mechanisms to be used during connection tuning. +type Authentication interface { + Mechanism() string + Response() string +} + +// PlainAuth is a similar to Basic Auth in HTTP. +type PlainAuth struct { + Username string + Password string +} + +func (me *PlainAuth) Mechanism() string { + return "PLAIN" +} + +func (me *PlainAuth) Response() string { + return fmt.Sprintf("\000%s\000%s", me.Username, me.Password) +} + +// Finds the first mechanism preferred by the client that the server supports. +func pickSASLMechanism(client []Authentication, serverMechanisms []string) (auth Authentication, ok bool) { + for _, auth = range client { + for _, mech := range serverMechanisms { + if auth.Mechanism() == mech { + return auth, true + } + } + } + + return +} diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/certs.sh b/services/templeton/vendor/src/github.com/streadway/amqp/certs.sh new file mode 100644 index 000000000..834f42242 --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/certs.sh @@ -0,0 +1,159 @@ +#!/bin/sh +# +# Creates the CA, server and client certs to be used by tls_test.go +# http://www.rabbitmq.com/ssl.html +# +# Copy stdout into the const section of tls_test.go or use for RabbitMQ +# +root=$PWD/certs + +if [ -f $root/ca/serial ]; then + echo >&2 "Previous installation found" + echo >&2 "Remove $root/ca and rerun to overwrite" + exit 1 +fi + +mkdir -p $root/ca/private +mkdir -p $root/ca/certs +mkdir -p $root/server +mkdir -p $root/client + +cd $root/ca + +chmod 700 private +touch index.txt +echo 'unique_subject = no' > index.txt.attr +echo '01' > serial +echo >openssl.cnf ' +[ ca ] +default_ca = testca + +[ testca ] +dir = . +certificate = $dir/cacert.pem +database = $dir/index.txt +new_certs_dir = $dir/certs +private_key = $dir/private/cakey.pem +serial = $dir/serial + +default_crl_days = 7 +default_days = 3650 +default_md = sha1 + +policy = testca_policy +x509_extensions = certificate_extensions + +[ testca_policy ] +commonName = supplied +stateOrProvinceName = optional +countryName = optional +emailAddress = optional +organizationName = optional +organizationalUnitName = optional + +[ certificate_extensions ] +basicConstraints = CA:false + +[ req ] +default_bits = 2048 +default_keyfile = ./private/cakey.pem +default_md = sha1 +prompt = yes +distinguished_name = root_ca_distinguished_name +x509_extensions = root_ca_extensions + +[ root_ca_distinguished_name ] +commonName = hostname + +[ root_ca_extensions ] +basicConstraints = CA:true +keyUsage = keyCertSign, cRLSign + +[ client_ca_extensions ] +basicConstraints = CA:false +keyUsage = digitalSignature +extendedKeyUsage = 1.3.6.1.5.5.7.3.2 + +[ server_ca_extensions ] +basicConstraints = CA:false +keyUsage = keyEncipherment +extendedKeyUsage = 1.3.6.1.5.5.7.3.1 +subjectAltName = @alt_names + +[ alt_names ] +IP.1 = 127.0.0.1 +' + +openssl req \ + -x509 \ + -nodes \ + -config openssl.cnf \ + -newkey rsa:2048 \ + -days 3650 \ + -subj "/CN=MyTestCA/" \ + -out cacert.pem \ + -outform PEM + +openssl x509 \ + -in cacert.pem \ + -out cacert.cer \ + -outform DER + +openssl genrsa -out $root/server/key.pem 2048 +openssl genrsa -out $root/client/key.pem 2048 + +openssl req \ + -new \ + -nodes \ + -config openssl.cnf \ + -subj "/CN=127.0.0.1/O=server/" \ + -key $root/server/key.pem \ + -out $root/server/req.pem \ + -outform PEM + +openssl req \ + -new \ + -nodes \ + -config openssl.cnf \ + -subj "/CN=127.0.0.1/O=client/" \ + -key $root/client/key.pem \ + -out $root/client/req.pem \ + -outform PEM + +openssl ca \ + -config openssl.cnf \ + -in $root/server/req.pem \ + -out $root/server/cert.pem \ + -notext \ + -batch \ + -extensions server_ca_extensions + +openssl ca \ + -config openssl.cnf \ + -in $root/client/req.pem \ + -out $root/client/cert.pem \ + -notext \ + -batch \ + -extensions client_ca_extensions + +cat <<-END +const caCert = \` +`cat $root/ca/cacert.pem` +\` + +const serverCert = \` +`cat $root/server/cert.pem` +\` + +const serverKey = \` +`cat $root/server/key.pem` +\` + +const clientCert = \` +`cat $root/client/cert.pem` +\` + +const clientKey = \` +`cat $root/client/key.pem` +\` +END diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/channel.go b/services/templeton/vendor/src/github.com/streadway/amqp/channel.go new file mode 100644 index 000000000..8976fa905 --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/channel.go @@ -0,0 +1,1557 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "reflect" + "sync" +) + +// 0 1 3 7 size+7 size+8 +// +------+---------+-------------+ +------------+ +-----------+ +// | type | channel | size | | payload | | frame-end | +// +------+---------+-------------+ +------------+ +-----------+ +// octet short long size octets octet +const frameHeaderSize = 1 + 2 + 4 + 1 + +/* +Channel represents an AMQP channel. Used as a context for valid message +exchange. Errors on methods with this Channel as a receiver means this channel +should be discarded and a new channel established. + +*/ +type Channel struct { + destructor sync.Once + sendM sync.Mutex // sequence channel frames + m sync.Mutex // struct field mutex + + connection *Connection + + rpc chan message + consumers *consumers + + id uint16 + + // true when we will never notify again + noNotify bool + + // Channel and Connection exceptions will be broadcast on these listeners. + closes []chan *Error + + // Listeners for active=true flow control. When true is sent to a listener, + // publishing should pause until false is sent to listeners. + flows []chan bool + + // Listeners for returned publishings for unroutable messages on mandatory + // publishings or undeliverable messages on immediate publishings. + returns []chan Return + + // Listeners for when the server notifies the client that + // a consumer has been cancelled. + cancels []chan string + + // Allocated when in confirm mode in order to track publish counter and order confirms + confirms *confirms + confirming bool + + // Selects on any errors from shutdown during RPC + errors chan *Error + + // State machine that manages frame order, must only be mutated by the connection + recv func(*Channel, frame) error + + // State that manages the send behavior after before and after shutdown, must + // only be mutated in shutdown() + send func(*Channel, message) error + + // Current state for frame re-assembly, only mutated from recv + message messageWithContent + header *headerFrame + body []byte +} + +// Constructs a new channel with the given framing rules +func newChannel(c *Connection, id uint16) *Channel { + return &Channel{ + connection: c, + id: id, + rpc: make(chan message), + consumers: makeConsumers(), + confirms: newConfirms(), + recv: (*Channel).recvMethod, + send: (*Channel).sendOpen, + errors: make(chan *Error, 1), + } +} + +// shutdown is called by Connection after the channel has been removed from the +// connection registry. +func (me *Channel) shutdown(e *Error) { + me.destructor.Do(func() { + me.m.Lock() + defer me.m.Unlock() + + // Broadcast abnormal shutdown + if e != nil { + for _, c := range me.closes { + c <- e + } + } + + me.send = (*Channel).sendClosed + + // Notify RPC if we're selecting + if e != nil { + me.errors <- e + } + + me.consumers.closeAll() + + for _, c := range me.closes { + close(c) + } + + for _, c := range me.flows { + close(c) + } + + for _, c := range me.returns { + close(c) + } + + for _, c := range me.cancels { + close(c) + } + + if me.confirms != nil { + me.confirms.Close() + } + + me.noNotify = true + }) +} + +func (me *Channel) open() error { + return me.call(&channelOpen{}, &channelOpenOk{}) +} + +// Performs a request/response call for when the message is not NoWait and is +// specified as Synchronous. +func (me *Channel) call(req message, res ...message) error { + if err := me.send(me, req); err != nil { + return err + } + + if req.wait() { + select { + case e := <-me.errors: + return e + + case msg := <-me.rpc: + if msg != nil { + for _, try := range res { + if reflect.TypeOf(msg) == reflect.TypeOf(try) { + // *res = *msg + vres := reflect.ValueOf(try).Elem() + vmsg := reflect.ValueOf(msg).Elem() + vres.Set(vmsg) + return nil + } + } + return ErrCommandInvalid + } else { + // RPC channel has been closed without an error, likely due to a hard + // error on the Connection. This indicates we have already been + // shutdown and if were waiting, will have returned from the errors chan. + return ErrClosed + } + } + } + + return nil +} + +func (me *Channel) sendClosed(msg message) (err error) { + me.sendM.Lock() + defer me.sendM.Unlock() + + // After a 'channel.close' is sent or received the only valid response is + // channel.close-ok + if _, ok := msg.(*channelCloseOk); ok { + return me.connection.send(&methodFrame{ + ChannelId: me.id, + Method: msg, + }) + } + + return ErrClosed +} + +func (me *Channel) sendOpen(msg message) (err error) { + me.sendM.Lock() + defer me.sendM.Unlock() + + if content, ok := msg.(messageWithContent); ok { + props, body := content.getContent() + class, _ := content.id() + + // catch client max frame size==0 and server max frame size==0 + // set size to length of what we're trying to publish + var size int + if me.connection.Config.FrameSize > 0 { + size = me.connection.Config.FrameSize - frameHeaderSize + } else { + size = len(body) + } + + if err = me.connection.send(&methodFrame{ + ChannelId: me.id, + Method: content, + }); err != nil { + return + } + + if err = me.connection.send(&headerFrame{ + ChannelId: me.id, + ClassId: class, + Size: uint64(len(body)), + Properties: props, + }); err != nil { + return + } + + // chunk body into size (max frame size - frame header size) + for i, j := 0, size; i < len(body); i, j = j, j+size { + if j > len(body) { + j = len(body) + } + + if err = me.connection.send(&bodyFrame{ + ChannelId: me.id, + Body: body[i:j], + }); err != nil { + return + } + } + } else { + err = me.connection.send(&methodFrame{ + ChannelId: me.id, + Method: msg, + }) + } + + return +} + +// Eventually called via the state machine from the connection's reader +// goroutine, so assumes serialized access. +func (me *Channel) dispatch(msg message) { + switch m := msg.(type) { + case *channelClose: + me.connection.closeChannel(me, newError(m.ReplyCode, m.ReplyText)) + me.send(me, &channelCloseOk{}) + + case *channelFlow: + for _, c := range me.flows { + c <- m.Active + } + me.send(me, &channelFlowOk{Active: m.Active}) + + case *basicCancel: + for _, c := range me.cancels { + c <- m.ConsumerTag + } + me.send(me, &basicCancelOk{ConsumerTag: m.ConsumerTag}) + + case *basicReturn: + ret := newReturn(*m) + for _, c := range me.returns { + c <- *ret + } + + case *basicAck: + if me.confirming { + if m.Multiple { + me.confirms.Multiple(Confirmation{m.DeliveryTag, true}) + } else { + me.confirms.One(Confirmation{m.DeliveryTag, true}) + } + } + + case *basicNack: + if me.confirming { + if m.Multiple { + me.confirms.Multiple(Confirmation{m.DeliveryTag, false}) + } else { + me.confirms.One(Confirmation{m.DeliveryTag, false}) + } + } + + case *basicDeliver: + me.consumers.send(m.ConsumerTag, newDelivery(me, m)) + // TODO log failed consumer and close channel, this can happen when + // deliveries are in flight and a no-wait cancel has happened + + default: + me.rpc <- msg + } +} + +func (me *Channel) transition(f func(*Channel, frame) error) error { + me.recv = f + return nil +} + +func (me *Channel) recvMethod(f frame) error { + switch frame := f.(type) { + case *methodFrame: + if msg, ok := frame.Method.(messageWithContent); ok { + me.body = make([]byte, 0) + me.message = msg + return me.transition((*Channel).recvHeader) + } + + me.dispatch(frame.Method) // termination state + return me.transition((*Channel).recvMethod) + + case *headerFrame: + // drop + return me.transition((*Channel).recvMethod) + + case *bodyFrame: + // drop + return me.transition((*Channel).recvMethod) + + default: + panic("unexpected frame type") + } + + panic("unreachable") +} + +func (me *Channel) recvHeader(f frame) error { + switch frame := f.(type) { + case *methodFrame: + // interrupt content and handle method + return me.recvMethod(f) + + case *headerFrame: + // start collecting if we expect body frames + me.header = frame + + if frame.Size == 0 { + me.message.setContent(me.header.Properties, me.body) + me.dispatch(me.message) // termination state + return me.transition((*Channel).recvMethod) + } else { + return me.transition((*Channel).recvContent) + } + + case *bodyFrame: + // drop and reset + return me.transition((*Channel).recvMethod) + + default: + panic("unexpected frame type") + } + + panic("unreachable") +} + +// state after method + header and before the length +// defined by the header has been reached +func (me *Channel) recvContent(f frame) error { + switch frame := f.(type) { + case *methodFrame: + // interrupt content and handle method + return me.recvMethod(f) + + case *headerFrame: + // drop and reset + return me.transition((*Channel).recvMethod) + + case *bodyFrame: + me.body = append(me.body, frame.Body...) + + if uint64(len(me.body)) >= me.header.Size { + me.message.setContent(me.header.Properties, me.body) + me.dispatch(me.message) // termination state + return me.transition((*Channel).recvMethod) + } + + return me.transition((*Channel).recvContent) + + default: + panic("unexpected frame type") + } + + panic("unreachable") +} + +/* +Close initiate a clean channel closure by sending a close message with the error +code set to '200'. + +It is safe to call this method multiple times. + +*/ +func (me *Channel) Close() error { + defer me.connection.closeChannel(me, nil) + return me.call( + &channelClose{ReplyCode: replySuccess}, + &channelCloseOk{}, + ) +} + +/* +NotifyClose registers a listener for when the server sends a channel or +connection exception in the form of a Connection.Close or Channel.Close method. +Connection exceptions will be broadcast to all open channels and all channels +will be closed, where channel exceptions will only be broadcast to listeners to +this channel. + +The chan provided will be closed when the Channel is closed and on a +graceful close, no error will be sent. + +*/ +func (me *Channel) NotifyClose(c chan *Error) chan *Error { + me.m.Lock() + defer me.m.Unlock() + + if me.noNotify { + close(c) + } else { + me.closes = append(me.closes, c) + } + + return c +} + +/* +NotifyFlow registers a listener for basic.flow methods sent by the server. +When `true` is sent on one of the listener channels, all publishers should +pause until a `false` is sent. + +The server may ask the producer to pause or restart the flow of Publishings +sent by on a channel. This is a simple flow-control mechanism that a server can +use to avoid overflowing its queues or otherwise finding itself receiving more +messages than it can process. Note that this method is not intended for window +control. It does not affect contents returned by basic.get-ok methods. + +When a new channel is opened, it is active (flow is active). Some +applications assume that channels are inactive until started. To emulate +this behavior a client MAY open the channel, then pause it. + +Publishers should respond to a flow messages as rapidly as possible and the +server may disconnect over producing channels that do not respect these +messages. + +basic.flow-ok methods will always be returned to the server regardless of +the number of listeners there are. + +To control the flow of deliveries from the server. Use the Channel.Flow() +method instead. + +Note: RabbitMQ will rather use TCP pushback on the network connection instead +of sending basic.flow. This means that if a single channel is producing too +much on the same connection, all channels using that connection will suffer, +including acknowledgments from deliveries. Use different Connections if you +desire to interleave consumers and producers in the same process to avoid your +basic.ack messages from getting rate limited with your basic.publish messages. + +*/ +func (me *Channel) NotifyFlow(c chan bool) chan bool { + me.m.Lock() + defer me.m.Unlock() + + if me.noNotify { + close(c) + } else { + me.flows = append(me.flows, c) + } + + return c +} + +/* +NotifyReturn registers a listener for basic.return methods. These can be sent +from the server when a publish is undeliverable either from the mandatory or +immediate flags. + +A return struct has a copy of the Publishing along with some error +information about why the publishing failed. + +*/ +func (me *Channel) NotifyReturn(c chan Return) chan Return { + me.m.Lock() + defer me.m.Unlock() + + if me.noNotify { + close(c) + } else { + me.returns = append(me.returns, c) + } + + return c +} + +/* +NotifyCancel registers a listener for basic.cancel methods. These can be sent +from the server when a queue is deleted or when consuming from a mirrored queue +where the master has just failed (and was moved to another node) + +The subscription tag is returned to the listener. + +*/ +func (me *Channel) NotifyCancel(c chan string) chan string { + me.m.Lock() + defer me.m.Unlock() + + if me.noNotify { + close(c) + } else { + me.cancels = append(me.cancels, c) + } + + return c +} + +/* +NotifyConfirm calls NotifyPublish and starts a goroutines sending +ordered Ack and Nack DeliveryTag to the respective channels. + +For strict ordering, use NotifyPublish instead. +*/ +func (me *Channel) NotifyConfirm(ack, nack chan uint64) (chan uint64, chan uint64) { + confirms := me.NotifyPublish(make(chan Confirmation, len(ack)+len(nack))) + + go func() { + for c := range confirms { + if c.Ack { + ack <- c.DeliveryTag + } else { + nack <- c.DeliveryTag + } + } + close(ack) + if nack != ack { + close(nack) + } + }() + + return ack, nack +} + +/* +NotifyPublish registers a listener for reliable publishing. Receives from this +chan for every publish after Channel.Confirm will be in order starting with +DeliveryTag 1. + +There will be one and only one Confimration Publishing starting with the +delviery tag of 1 and progressing sequentially until the total number of +Publishings have been seen by the server. + +Acknowledgments will be received in the order of delivery from the +NotifyPublish channels even if the server acknowledges them out of order. + +The listener chan will be closed when the Channel is closed. + +The capacity of the chan Confirmation must be at least as large as the +number of outstanding publishings. Not having enough buffered chans will +create a deadlock if you attempt to perform other operations on the Connection +or Channel while confirms are in-flight. + +It's advisable to wait for all Confirmations to arrive before calling +Channel.Close() or Connection.Close(). + +*/ +func (me *Channel) NotifyPublish(confirm chan Confirmation) chan Confirmation { + me.m.Lock() + defer me.m.Unlock() + + if me.noNotify { + close(confirm) + } else { + me.confirms.Listen(confirm) + } + + return confirm + +} + +/* +Qos controls how many messages or how many bytes the server will try to keep on +the network for consumers before receiving delivery acks. The intent of Qos is +to make sure the network buffers stay full between the server and client. + +With a prefetch count greater than zero, the server will deliver that many +messages to consumers before acknowledgments are received. The server ignores +this option when consumers are started with noAck because no acknowledgments +are expected or sent. + +With a prefetch size greater than zero, the server will try to keep at least +that many bytes of deliveries flushed to the network before receiving +acknowledgments from the consumers. This option is ignored when consumers are +started with noAck. + +When global is true, these Qos settings apply to all existing and future +consumers on all channels on the same connection. When false, the Channel.Qos +settings will apply to all existing and future consumers on this channel. +RabbitMQ does not implement the global flag. + +To get round-robin behavior between consumers consuming from the same queue on +different connections, set the prefetch count to 1, and the next available +message on the server will be delivered to the next available consumer. + +If your consumer work time is reasonably consistent and not much greater +than two times your network round trip time, you will see significant +throughput improvements starting with a prefetch count of 2 or slightly +greater as described by benchmarks on RabbitMQ. + +http://www.rabbitmq.com/blog/2012/04/25/rabbitmq-performance-measurements-part-2/ +*/ +func (me *Channel) Qos(prefetchCount, prefetchSize int, global bool) error { + return me.call( + &basicQos{ + PrefetchCount: uint16(prefetchCount), + PrefetchSize: uint32(prefetchSize), + Global: global, + }, + &basicQosOk{}, + ) +} + +/* +Cancel stops deliveries to the consumer chan established in Channel.Consume and +identified by consumer. + +Only use this method to cleanly stop receiving deliveries from the server and +cleanly shut down the consumer chan identified by this tag. Using this method +and waiting for remaining messages to flush from the consumer chan will ensure +all messages received on the network will be delivered to the receiver of your +consumer chan. + +Continue consuming from the chan Delivery provided by Channel.Consume until the +chan closes. + +When noWait is true, do not wait for the server to acknowledge the cancel. +Only use this when you are certain there are no deliveries requiring +acknowledgment are in-flight otherwise they will arrive and be dropped in the +client without an ack and will not be redelivered to other consumers. + +*/ +func (me *Channel) Cancel(consumer string, noWait bool) error { + req := &basicCancel{ + ConsumerTag: consumer, + NoWait: noWait, + } + res := &basicCancelOk{} + + if err := me.call(req, res); err != nil { + return err + } + + if req.wait() { + me.consumers.close(res.ConsumerTag) + } else { + // Potentially could drop deliveries in flight + me.consumers.close(consumer) + } + + return nil +} + +/* +QueueDeclare declares a queue to hold messages and deliver to consumers. +Declaring creates a queue if it doesn't already exist, or ensures that an +existing queue matches the same parameters. + +Every queue declared gets a default binding to the empty exchange "" which has +the type "direct" with the routing key matching the queue's name. With this +default binding, it is possible to publish messages that route directly to +this queue by publishing to "" with the routing key of the queue name. + + QueueDeclare("alerts", true, false, false false, false, nil) + Publish("", "alerts", false, false, Publishing{Body: []byte("...")}) + + Delivery Exchange Key Queue + ----------------------------------------------- + key: alerts -> "" -> alerts -> alerts + +The queue name may be empty, in which the server will generate a unique name +which will be returned in the Name field of Queue struct. + +Durable and Non-Auto-Deleted queues will survive server restarts and remain +when there are no remaining consumers or bindings. Persistent publishings will +be restored in this queue on server restart. These queues are only able to be +bound to durable exchanges. + +Non-Durable and Auto-Deleted queues will not be redeclared on server restart +and will be deleted by the server after a short time when the last consumer is +canceled or the last consumer's channel is closed. Queues with this lifetime +can also be deleted normally with QueueDelete. These durable queues can only +be bound to non-durable exchanges. + +Non-Durable and Non-Auto-Deleted queues will remain declared as long as the +server is running regardless of how many consumers. This lifetime is useful +for temporary topologies that may have long delays between consumer activity. +These queues can only be bound to non-durable exchanges. + +Durable and Auto-Deleted queues will be restored on server restart, but without +active consumers, will not survive and be removed. This Lifetime is unlikely +to be useful. + +Exclusive queues are only accessible by the connection that declares them and +will be deleted when the connection closes. Channels on other connections +will receive an error when attempting declare, bind, consume, purge or delete a +queue with the same name. + +When noWait is true, the queue will assume to be declared on the server. A +channel exception will arrive if the conditions are met for existing queues +or attempting to modify an existing queue from a different connection. + +When the error return value is not nil, you can assume the queue could not be +declared with these parameters and the channel will be closed. + +*/ +func (me *Channel) QueueDeclare(name string, durable, autoDelete, exclusive, noWait bool, args Table) (Queue, error) { + if err := args.Validate(); err != nil { + return Queue{}, err + } + + req := &queueDeclare{ + Queue: name, + Passive: false, + Durable: durable, + AutoDelete: autoDelete, + Exclusive: exclusive, + NoWait: noWait, + Arguments: args, + } + res := &queueDeclareOk{} + + if err := me.call(req, res); err != nil { + return Queue{}, err + } + + if req.wait() { + return Queue{ + Name: res.Queue, + Messages: int(res.MessageCount), + Consumers: int(res.ConsumerCount), + }, nil + } + + return Queue{ + Name: name, + }, nil + + panic("unreachable") +} + +/* + +QueueDeclarePassive is functionally and parametrically equivalent to +QueueDeclare, except that it sets the "passive" attribute to true. A passive +queue is assumed by RabbitMQ to already exist, and attempting to connect to a +non-existent queue will cause RabbitMQ to throw an exception. This function +can be used to test for the existence of a queue. + +*/ +func (me *Channel) QueueDeclarePassive(name string, durable, autoDelete, exclusive, noWait bool, args Table) (Queue, error) { + if err := args.Validate(); err != nil { + return Queue{}, err + } + + req := &queueDeclare{ + Queue: name, + Passive: true, + Durable: durable, + AutoDelete: autoDelete, + Exclusive: exclusive, + NoWait: noWait, + Arguments: args, + } + res := &queueDeclareOk{} + + if err := me.call(req, res); err != nil { + return Queue{}, err + } + + if req.wait() { + return Queue{ + Name: res.Queue, + Messages: int(res.MessageCount), + Consumers: int(res.ConsumerCount), + }, nil + } + + return Queue{ + Name: name, + }, nil + + panic("unreachable") +} + +/* +QueueInspect passively declares a queue by name to inspect the current message +count, consumer count. + +Use this method to check how many unacknowledged messages reside in the queue +and how many consumers are receiving deliveries and whether a queue by this +name already exists. + +If the queue by this name exists, use Channel.QueueDeclare check if it is +declared with specific parameters. + +If a queue by this name does not exist, an error will be returned and the +channel will be closed. + +*/ +func (me *Channel) QueueInspect(name string) (Queue, error) { + req := &queueDeclare{ + Queue: name, + Passive: true, + } + res := &queueDeclareOk{} + + err := me.call(req, res) + + state := Queue{ + Name: name, + Messages: int(res.MessageCount), + Consumers: int(res.ConsumerCount), + } + + return state, err +} + +/* +QueueBind binds an exchange to a queue so that publishings to the exchange will +be routed to the queue when the publishing routing key matches the binding +routing key. + + QueueBind("pagers", "alert", "log", false, nil) + QueueBind("emails", "info", "log", false, nil) + + Delivery Exchange Key Queue + ----------------------------------------------- + key: alert --> log ----> alert --> pagers + key: info ---> log ----> info ---> emails + key: debug --> log (none) (dropped) + +If a binding with the same key and arguments already exists between the +exchange and queue, the attempt to rebind will be ignored and the existing +binding will be retained. + +In the case that multiple bindings may cause the message to be routed to the +same queue, the server will only route the publishing once. This is possible +with topic exchanges. + + QueueBind("pagers", "alert", "amq.topic", false, nil) + QueueBind("emails", "info", "amq.topic", false, nil) + QueueBind("emails", "#", "amq.topic", false, nil) // match everything + + Delivery Exchange Key Queue + ----------------------------------------------- + key: alert --> amq.topic ----> alert --> pagers + key: info ---> amq.topic ----> # ------> emails + \---> info ---/ + key: debug --> amq.topic ----> # ------> emails + +It is only possible to bind a durable queue to a durable exchange regardless of +whether the queue or exchange is auto-deleted. Bindings between durable queues +and exchanges will also be restored on server restart. + +If the binding could not complete, an error will be returned and the channel +will be closed. + +When noWait is true and the queue could not be bound, the channel will be +closed with an error. + +*/ +func (me *Channel) QueueBind(name, key, exchange string, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return me.call( + &queueBind{ + Queue: name, + Exchange: exchange, + RoutingKey: key, + NoWait: noWait, + Arguments: args, + }, + &queueBindOk{}, + ) +} + +/* +QueueUnbind removes a binding between an exchange and queue matching the key and +arguments. + +It is possible to send and empty string for the exchange name which means to +unbind the queue from the default exchange. + +*/ +func (me *Channel) QueueUnbind(name, key, exchange string, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return me.call( + &queueUnbind{ + Queue: name, + Exchange: exchange, + RoutingKey: key, + Arguments: args, + }, + &queueUnbindOk{}, + ) +} + +/* +QueuePurge removes all messages from the named queue which are not waiting to +be acknowledged. Messages that have been delivered but have not yet been +acknowledged will not be removed. + +When successful, returns the number of messages purged. + +If noWait is true, do not wait for the server response and the number of +messages purged will not be meaningful. +*/ +func (me *Channel) QueuePurge(name string, noWait bool) (int, error) { + req := &queuePurge{ + Queue: name, + NoWait: noWait, + } + res := &queuePurgeOk{} + + err := me.call(req, res) + + return int(res.MessageCount), err +} + +/* +QueueDelete removes the queue from the server including all bindings then +purges the messages based on server configuration, returning the number of +messages purged. + +When ifUnused is true, the queue will not be deleted if there are any +consumers on the queue. If there are consumers, an error will be returned and +the channel will be closed. + +When ifEmpty is true, the queue will not be deleted if there are any messages +remaining on the queue. If there are messages, an error will be returned and +the channel will be closed. + +When noWait is true, the queue will be deleted without waiting for a response +from the server. The purged message count will not be meaningful. If the queue +could not be deleted, a channel exception will be raised and the channel will +be closed. + +*/ +func (me *Channel) QueueDelete(name string, ifUnused, ifEmpty, noWait bool) (int, error) { + req := &queueDelete{ + Queue: name, + IfUnused: ifUnused, + IfEmpty: ifEmpty, + NoWait: noWait, + } + res := &queueDeleteOk{} + + err := me.call(req, res) + + return int(res.MessageCount), err +} + +/* +Consume immediately starts delivering queued messages. + +Begin receiving on the returned chan Delivery before any other operation on the +Connection or Channel. + +Continues deliveries to the returned chan Delivery until Channel.Cancel, +Connection.Close, Channel.Close, or an AMQP exception occurs. Consumers must +range over the chan to ensure all deliveries are received. Unreceived +deliveries will block all methods on the same connection. + +All deliveries in AMQP must be acknowledged. It is expected of the consumer to +call Delivery.Ack after it has successfully processed the delivery. If the +consumer is cancelled or the channel or connection is closed any unacknowledged +deliveries will be requeued at the end of the same queue. + +The consumer is identified by a string that is unique and scoped for all +consumers on this channel. If you wish to eventually cancel the consumer, use +the same non-empty idenfitier in Channel.Cancel. An empty string will cause +the library to generate a unique identity. The consumer identity will be +included in every Delivery in the ConsumerTag field + +When autoAck (also known as noAck) is true, the server will acknowledge +deliveries to this consumer prior to writing the delivery to the network. When +autoAck is true, the consumer should not call Delivery.Ack. Automatically +acknowledging deliveries means that some deliveries may get lost if the +consumer is unable to process them after the server delivers them. + +When exclusive is true, the server will ensure that this is the sole consumer +from this queue. When exclusive is false, the server will fairly distribute +deliveries across multiple consumers. + +When noLocal is true, the server will not deliver publishing sent from the same +connection to this consumer. It's advisable to use separate connections for +Channel.Publish and Channel.Consume so not to have TCP pushback on publishing +affect the ability to consume messages, so this parameter is here mostly for +completeness. + +When noWait is true, do not wait for the server to confirm the request and +immediately begin deliveries. If it is not possible to consume, a channel +exception will be raised and the channel will be closed. + +Optional arguments can be provided that have specific semantics for the queue +or server. + +When the channel or connection closes, all delivery chans will also close. + +Deliveries on the returned chan will be buffered indefinitely. To limit memory +of this buffer, use the Channel.Qos method to limit the amount of +unacknowledged/buffered deliveries the server will deliver on this Channel. + +*/ +func (me *Channel) Consume(queue, consumer string, autoAck, exclusive, noLocal, noWait bool, args Table) (<-chan Delivery, error) { + // When we return from me.call, there may be a delivery already for the + // consumer that hasn't been added to the consumer hash yet. Because of + // this, we never rely on the server picking a consumer tag for us. + + if err := args.Validate(); err != nil { + return nil, err + } + + if consumer == "" { + consumer = uniqueConsumerTag() + } + + req := &basicConsume{ + Queue: queue, + ConsumerTag: consumer, + NoLocal: noLocal, + NoAck: autoAck, + Exclusive: exclusive, + NoWait: noWait, + Arguments: args, + } + res := &basicConsumeOk{} + + deliveries := make(chan Delivery) + + me.consumers.add(consumer, deliveries) + + if err := me.call(req, res); err != nil { + me.consumers.close(consumer) + return nil, err + } + + return (<-chan Delivery)(deliveries), nil +} + +/* +ExchangeDeclare declares an exchange on the server. If the exchange does not +already exist, the server will create it. If the exchange exists, the server +verifies that it is of the provided type, durability and auto-delete flags. + +Errors returned from this method will close the channel. + +Exchange names starting with "amq." are reserved for pre-declared and +standardized exchanges. The client MAY declare an exchange starting with +"amq." if the passive option is set, or the exchange already exists. Names can +consists of a non-empty sequence of letters, digits, hyphen, underscore, +period, or colon. + +Each exchange belongs to one of a set of exchange kinds/types implemented by +the server. The exchange types define the functionality of the exchange - i.e. +how messages are routed through it. Once an exchange is declared, its type +cannot be changed. The common types are "direct", "fanout", "topic" and +"headers". + +Durable and Non-Auto-Deleted exchanges will survive server restarts and remain +declared when there are no remaining bindings. This is the best lifetime for +long-lived exchange configurations like stable routes and default exchanges. + +Non-Durable and Auto-Deleted exchanges will be deleted when there are no +remaining bindings and not restored on server restart. This lifetime is +useful for temporary topologies that should not pollute the virtual host on +failure or after the consumers have completed. + +Non-Durable and Non-Auto-deleted exchanges will remain as long as the server is +running including when there are no remaining bindings. This is useful for +temporary topologies that may have long delays between bindings. + +Durable and Auto-Deleted exchanges will survive server restarts and will be +removed before and after server restarts when there are no remaining bindings. +These exchanges are useful for robust temporary topologies or when you require +binding durable queues to auto-deleted exchanges. + +Note: RabbitMQ declares the default exchange types like 'amq.fanout' as +durable, so queues that bind to these pre-declared exchanges must also be +durable. + +Exchanges declared as `internal` do not accept accept publishings. Internal +exchanges are useful for when you wish to implement inter-exchange topologies +that should not be exposed to users of the broker. + +When noWait is true, declare without waiting for a confirmation from the server. +The channel may be closed as a result of an error. Add a NotifyClose listener +to respond to any exceptions. + +Optional amqp.Table of arguments that are specific to the server's implementation of +the exchange can be sent for exchange types that require extra parameters. +*/ +func (me *Channel) ExchangeDeclare(name, kind string, durable, autoDelete, internal, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return me.call( + &exchangeDeclare{ + Exchange: name, + Type: kind, + Passive: false, + Durable: durable, + AutoDelete: autoDelete, + Internal: internal, + NoWait: noWait, + Arguments: args, + }, + &exchangeDeclareOk{}, + ) +} + +/* + +ExchangeDeclarePassive is functionally and parametrically equivalent to +ExchangeDeclare, except that it sets the "passive" attribute to true. A passive +exchange is assumed by RabbitMQ to already exist, and attempting to connect to a +non-existent exchange will cause RabbitMQ to throw an exception. This function +can be used to detect the existence of an exchange. + +*/ +func (me *Channel) ExchangeDeclarePassive(name, kind string, durable, autoDelete, internal, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return me.call( + &exchangeDeclare{ + Exchange: name, + Type: kind, + Passive: true, + Durable: durable, + AutoDelete: autoDelete, + Internal: internal, + NoWait: noWait, + Arguments: args, + }, + &exchangeDeclareOk{}, + ) +} + +/* +ExchangeDelete removes the named exchange from the server. When an exchange is +deleted all queue bindings on the exchange are also deleted. If this exchange +does not exist, the channel will be closed with an error. + +When ifUnused is true, the server will only delete the exchange if it has no queue +bindings. If the exchange has queue bindings the server does not delete it +but close the channel with an exception instead. Set this to true if you are +not the sole owner of the exchange. + +When noWait is true, do not wait for a server confirmation that the exchange has +been deleted. Failing to delete the channel could close the channel. Add a +NotifyClose listener to respond to these channel exceptions. +*/ +func (me *Channel) ExchangeDelete(name string, ifUnused, noWait bool) error { + return me.call( + &exchangeDelete{ + Exchange: name, + IfUnused: ifUnused, + NoWait: noWait, + }, + &exchangeDeleteOk{}, + ) +} + +/* +ExchangeBind binds an exchange to another exchange to create inter-exchange +routing topologies on the server. This can decouple the private topology and +routing exchanges from exchanges intended solely for publishing endpoints. + +Binding two exchanges with identical arguments will not create duplicate +bindings. + +Binding one exchange to another with multiple bindings will only deliver a +message once. For example if you bind your exchange to `amq.fanout` with two +different binding keys, only a single message will be delivered to your +exchange even though multiple bindings will match. + +Given a message delivered to the source exchange, the message will be forwarded +to the destination exchange when the routing key is matched. + + ExchangeBind("sell", "MSFT", "trade", false, nil) + ExchangeBind("buy", "AAPL", "trade", false, nil) + + Delivery Source Key Destination + example exchange exchange + ----------------------------------------------- + key: AAPL --> trade ----> MSFT sell + \---> AAPL --> buy + +When noWait is true, do not wait for the server to confirm the binding. If any +error occurs the channel will be closed. Add a listener to NotifyClose to +handle these errors. + +Optional arguments specific to the exchanges bound can also be specified. +*/ +func (me *Channel) ExchangeBind(destination, key, source string, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return me.call( + &exchangeBind{ + Destination: destination, + Source: source, + RoutingKey: key, + NoWait: noWait, + Arguments: args, + }, + &exchangeBindOk{}, + ) +} + +/* +ExchangeUnbind unbinds the destination exchange from the source exchange on the +server by removing the routing key between them. This is the inverse of +ExchangeBind. If the binding does not currently exist, an error will be +returned. + +When noWait is true, do not wait for the server to confirm the deletion of the +binding. If any error occurs the channel will be closed. Add a listener to +NotifyClose to handle these errors. + +Optional arguments that are specific to the type of exchanges bound can also be +provided. These must match the same arguments specified in ExchangeBind to +identify the binding. +*/ +func (me *Channel) ExchangeUnbind(destination, key, source string, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return me.call( + &exchangeUnbind{ + Destination: destination, + Source: source, + RoutingKey: key, + NoWait: noWait, + Arguments: args, + }, + &exchangeUnbindOk{}, + ) +} + +/* +Publish sends a Publishing from the client to an exchange on the server. + +When you want a single message to be delivered to a single queue, you can +publish to the default exchange with the routingKey of the queue name. This is +because every declared queue gets an implicit route to the default exchange. + +Since publishings are asynchronous, any undeliverable message will get returned +by the server. Add a listener with Channel.NotifyReturn to handle any +undeliverable message when calling publish with either the mandatory or +immediate parameters as true. + +Publishings can be undeliverable when the mandatory flag is true and no queue is +bound that matches the routing key, or when the immediate flag is true and no +consumer on the matched queue is ready to accept the delivery. + +This can return an error when the channel, connection or socket is closed. The +error or lack of an error does not indicate whether the server has received this +publishing. + +It is possible for publishing to not reach the broker if the underlying socket +is shutdown without pending publishing packets being flushed from the kernel +buffers. The easy way of making it probable that all publishings reach the +server is to always call Connection.Close before terminating your publishing +application. The way to ensure that all publishings reach the server is to add +a listener to Channel.NotifyPublish and put the channel in confirm mode with +Channel.Confirm. Publishing delivery tags and their corresponding +confirmations start at 1. Exit when all publishings are confirmed. + +When Publish does not return an error and the channel is in confirm mode, the +internal counter for DeliveryTags with the first confirmation starting at 1. + +*/ +func (me *Channel) Publish(exchange, key string, mandatory, immediate bool, msg Publishing) error { + if err := msg.Headers.Validate(); err != nil { + return err + } + + me.m.Lock() + defer me.m.Unlock() + + if err := me.send(me, &basicPublish{ + Exchange: exchange, + RoutingKey: key, + Mandatory: mandatory, + Immediate: immediate, + Body: msg.Body, + Properties: properties{ + Headers: msg.Headers, + ContentType: msg.ContentType, + ContentEncoding: msg.ContentEncoding, + DeliveryMode: msg.DeliveryMode, + Priority: msg.Priority, + CorrelationId: msg.CorrelationId, + ReplyTo: msg.ReplyTo, + Expiration: msg.Expiration, + MessageId: msg.MessageId, + Timestamp: msg.Timestamp, + Type: msg.Type, + UserId: msg.UserId, + AppId: msg.AppId, + }, + }); err != nil { + return err + } + + if me.confirming { + me.confirms.Publish() + } + + return nil +} + +/* +Get synchronously receives a single Delivery from the head of a queue from the +server to the client. In almost all cases, using Channel.Consume will be +preferred. + +If there was a delivery waiting on the queue and that delivery was received the +second return value will be true. If there was no delivery waiting or an error +occured, the ok bool will be false. + +All deliveries must be acknowledged including those from Channel.Get. Call +Delivery.Ack on the returned delivery when you have fully processed this +delivery. + +When autoAck is true, the server will automatically acknowledge this message so +you don't have to. But if you are unable to fully process this message before +the channel or connection is closed, the message will not get requeued. + +*/ +func (me *Channel) Get(queue string, autoAck bool) (msg Delivery, ok bool, err error) { + req := &basicGet{Queue: queue, NoAck: autoAck} + res := &basicGetOk{} + empty := &basicGetEmpty{} + + if err := me.call(req, res, empty); err != nil { + return Delivery{}, false, err + } + + if res.DeliveryTag > 0 { + return *(newDelivery(me, res)), true, nil + } + + return Delivery{}, false, nil +} + +/* +Tx puts the channel into transaction mode on the server. All publishings and +acknowledgments following this method will be atomically committed or rolled +back for a single queue. Call either Channel.TxCommit or Channel.TxRollback to +leave a this transaction and immediately start a new transaction. + +The atomicity across multiple queues is not defined as queue declarations and +bindings are not included in the transaction. + +The behavior of publishings that are delivered as mandatory or immediate while +the channel is in a transaction is not defined. + +Once a channel has been put into transaction mode, it cannot be taken out of +transaction mode. Use a different channel for non-transactional semantics. + +*/ +func (me *Channel) Tx() error { + return me.call( + &txSelect{}, + &txSelectOk{}, + ) +} + +/* +TxCommit atomically commits all publishings and acknowledgments for a single +queue and immediately start a new transaction. + +Calling this method without having called Channel.Tx is an error. + +*/ +func (me *Channel) TxCommit() error { + return me.call( + &txCommit{}, + &txCommitOk{}, + ) +} + +/* +TxRollback atomically rolls back all publishings and acknowledgments for a +single queue and immediately start a new transaction. + +Calling this method without having called Channel.Tx is an error. + +*/ +func (me *Channel) TxRollback() error { + return me.call( + &txRollback{}, + &txRollbackOk{}, + ) +} + +/* +Flow pauses the delivery of messages to consumers on this channel. Channels +are opened with flow control not active, to open a channel with paused +deliveries immediately call this method with true after calling +Connection.Channel. + +When active is true, this method asks the server to temporarily pause deliveries +until called again with active as false. + +Channel.Get methods will not be affected by flow control. + +This method is not intended to act as window control. Use Channel.Qos to limit +the number of unacknowledged messages or bytes in flight instead. + +The server may also send us flow methods to throttle our publishings. A well +behaving publishing client should add a listener with Channel.NotifyFlow and +pause its publishings when true is sent on that channel. + +Note: RabbitMQ prefers to use TCP push back to control flow for all channels on +a connection, so under high volume scenarios, it's wise to open separate +Connections for publishings and deliveries. + +*/ +func (me *Channel) Flow(active bool) error { + return me.call( + &channelFlow{Active: active}, + &channelFlowOk{}, + ) +} + +/* +Confirm puts this channel into confirm mode so that the client can ensure all +publishings have successfully been received by the server. After entering this +mode, the server will send a basic.ack or basic.nack message with the deliver +tag set to a 1 based incrementing index corresponding to every publishing +received after the this method returns. + +Add a listener to Channel.NotifyPublish to respond to the Confirmations. If +Channel.NotifyPublish is not called, the Confirmations will be silently +ignored. + +The order of acknowledgments is not bound to the order of deliveries. + +Ack and Nack confirmations will arrive at some point in the future. + +Unroutable mandatory or immediate messages are acknowledged immediately after +any Channel.NotifyReturn listeners have been notified. Other messages are +acknowledged when all queues that should have the message routed to them have +either have received acknowledgment of delivery or have enqueued the message, +persisting the message if necessary. + +When noWait is true, the client will not wait for a response. A channel +exception could occur if the server does not support this method. + +*/ +func (me *Channel) Confirm(noWait bool) error { + me.m.Lock() + defer me.m.Unlock() + + if err := me.call( + &confirmSelect{Nowait: noWait}, + &confirmSelectOk{}, + ); err != nil { + return err + } + + me.confirming = true + + return nil +} + +/* +Recover redelivers all unacknowledged deliveries on this channel. + +When requeue is false, messages will be redelivered to the original consumer. + +When requeue is true, messages will be redelivered to any available consumer, +potentially including the original. + +If the deliveries cannot be recovered, an error will be returned and the channel +will be closed. + +Note: this method is not implemented on RabbitMQ, use Delivery.Nack instead +*/ +func (me *Channel) Recover(requeue bool) error { + return me.call( + &basicRecover{Requeue: requeue}, + &basicRecoverOk{}, + ) +} + +/* +Ack acknowledges a delivery by its delivery tag when having been consumed with +Channel.Consume or Channel.Get. + +Ack acknowledges all message received prior to the delivery tag when multiple +is true. + +See also Delivery.Ack +*/ +func (me *Channel) Ack(tag uint64, multiple bool) error { + return me.send(me, &basicAck{ + DeliveryTag: tag, + Multiple: multiple, + }) +} + +/* +Nack negatively acknowledges a delivery by its delivery tag. Prefer this +method to notify the server that you were not able to process this delivery and +it must be redelivered or dropped. + +See also Delivery.Nack +*/ +func (me *Channel) Nack(tag uint64, multiple bool, requeue bool) error { + return me.send(me, &basicNack{ + DeliveryTag: tag, + Multiple: multiple, + Requeue: requeue, + }) +} + +/* +Reject negatively acknowledges a delivery by its delivery tag. Prefer Nack +over Reject when communicating with a RabbitMQ server because you can Nack +multiple messages, reducing the amount of protocol messages to exchange. + +See also Delivery.Reject +*/ +func (me *Channel) Reject(tag uint64, requeue bool) error { + return me.send(me, &basicReject{ + DeliveryTag: tag, + Requeue: requeue, + }) +} diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/client_test.go b/services/templeton/vendor/src/github.com/streadway/amqp/client_test.go new file mode 100644 index 000000000..23acc9744 --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/client_test.go @@ -0,0 +1,603 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "bytes" + "io" + "reflect" + "testing" + "time" +) + +type server struct { + *testing.T + r reader // framer <- client + w writer // framer -> client + S io.ReadWriteCloser // Server IO + C io.ReadWriteCloser // Client IO + + // captured client frames + start connectionStartOk + tune connectionTuneOk +} + +func defaultConfig() Config { + return Config{SASL: []Authentication{&PlainAuth{"guest", "guest"}}, Vhost: "/"} +} + +func newSession(t *testing.T) (io.ReadWriteCloser, *server) { + rs, wc := io.Pipe() + rc, ws := io.Pipe() + + rws := &logIO{t, "server", pipe{rs, ws}} + rwc := &logIO{t, "client", pipe{rc, wc}} + + server := server{ + T: t, + r: reader{rws}, + w: writer{rws}, + S: rws, + C: rwc, + } + + return rwc, &server +} + +func (t *server) expectBytes(b []byte) { + in := make([]byte, len(b)) + if _, err := io.ReadFull(t.S, in); err != nil { + t.Fatalf("io error expecting bytes: %v", err) + } + + if bytes.Compare(b, in) != 0 { + t.Fatalf("failed bytes: expected: %s got: %s", string(b), string(in)) + } +} + +func (t *server) send(channel int, m message) { + defer time.AfterFunc(time.Second, func() { panic("send deadlock") }).Stop() + + if err := t.w.WriteFrame(&methodFrame{ + ChannelId: uint16(channel), + Method: m, + }); err != nil { + t.Fatalf("frame err, write: %s", err) + } +} + +// drops all but method frames expected on the given channel +func (t *server) recv(channel int, m message) message { + defer time.AfterFunc(time.Second, func() { panic("recv deadlock") }).Stop() + + var remaining int + var header *headerFrame + var body []byte + + for { + frame, err := t.r.ReadFrame() + if err != nil { + t.Fatalf("frame err, read: %s", err) + } + + if frame.channel() != uint16(channel) { + t.Fatalf("expected frame on channel %d, got channel %d", channel, frame.channel()) + } + + switch f := frame.(type) { + case *heartbeatFrame: + // drop + + case *headerFrame: + // start content state + header = f + remaining = int(header.Size) + if remaining == 0 { + m.(messageWithContent).setContent(header.Properties, nil) + return m + } + + case *bodyFrame: + // continue until terminated + body = append(body, f.Body...) + remaining -= len(f.Body) + if remaining <= 0 { + m.(messageWithContent).setContent(header.Properties, body) + return m + } + + case *methodFrame: + if reflect.TypeOf(m) == reflect.TypeOf(f.Method) { + wantv := reflect.ValueOf(m).Elem() + havev := reflect.ValueOf(f.Method).Elem() + wantv.Set(havev) + if _, ok := m.(messageWithContent); !ok { + return m + } + } else { + t.Fatalf("expected method type: %T, got: %T", m, f.Method) + } + + default: + t.Fatalf("unexpected frame: %+v", f) + } + } + + panic("unreachable") +} + +func (t *server) expectAMQP() { + t.expectBytes([]byte{'A', 'M', 'Q', 'P', 0, 0, 9, 1}) +} + +func (t *server) connectionStart() { + t.send(0, &connectionStart{ + VersionMajor: 0, + VersionMinor: 9, + Mechanisms: "PLAIN", + Locales: "en-us", + }) + + t.recv(0, &t.start) +} + +func (t *server) connectionTune() { + t.send(0, &connectionTune{ + ChannelMax: 11, + FrameMax: 20000, + Heartbeat: 10, + }) + + t.recv(0, &t.tune) +} + +func (t *server) connectionOpen() { + t.expectAMQP() + t.connectionStart() + t.connectionTune() + + t.recv(0, &connectionOpen{}) + t.send(0, &connectionOpenOk{}) +} + +func (t *server) connectionClose() { + t.recv(0, &connectionClose{}) + t.send(0, &connectionCloseOk{}) +} + +func (t *server) channelOpen(id int) { + t.recv(id, &channelOpen{}) + t.send(id, &channelOpenOk{}) +} + +func TestDefaultClientProperties(t *testing.T) { + rwc, srv := newSession(t) + + go func() { + srv.connectionOpen() + rwc.Close() + }() + + if c, err := Open(rwc, defaultConfig()); err != nil { + t.Fatalf("could not create connection: %v (%s)", c, err) + } + + if want, got := defaultProduct, srv.start.ClientProperties["product"]; want != got { + t.Errorf("expected product %s got: %s", want, got) + } + + if want, got := defaultVersion, srv.start.ClientProperties["version"]; want != got { + t.Errorf("expected version %s got: %s", want, got) + } +} + +func TestCustomClientProperties(t *testing.T) { + rwc, srv := newSession(t) + + config := defaultConfig() + config.Properties = Table{ + "product": "foo", + "version": "1.0", + } + + go func() { + srv.connectionOpen() + rwc.Close() + }() + + if c, err := Open(rwc, config); err != nil { + t.Fatalf("could not create connection: %v (%s)", c, err) + } + + if want, got := config.Properties["product"], srv.start.ClientProperties["product"]; want != got { + t.Errorf("expected product %s got: %s", want, got) + } + + if want, got := config.Properties["version"], srv.start.ClientProperties["version"]; want != got { + t.Errorf("expected version %s got: %s", want, got) + } +} + +func TestOpen(t *testing.T) { + rwc, srv := newSession(t) + go func() { + srv.connectionOpen() + rwc.Close() + }() + + if c, err := Open(rwc, defaultConfig()); err != nil { + t.Fatalf("could not create connection: %v (%s)", c, err) + } +} + +func TestChannelOpen(t *testing.T) { + rwc, srv := newSession(t) + + go func() { + srv.connectionOpen() + srv.channelOpen(1) + + rwc.Close() + }() + + c, err := Open(rwc, defaultConfig()) + if err != nil { + t.Fatalf("could not create connection: %v (%s)", c, err) + } + + ch, err := c.Channel() + if err != nil { + t.Fatalf("could not open channel: %v (%s)", ch, err) + } +} + +func TestOpenFailedSASLUnsupportedMechanisms(t *testing.T) { + rwc, srv := newSession(t) + + go func() { + srv.expectAMQP() + srv.send(0, &connectionStart{ + VersionMajor: 0, + VersionMinor: 9, + Mechanisms: "KERBEROS NTLM", + Locales: "en-us", + }) + }() + + c, err := Open(rwc, defaultConfig()) + if err != ErrSASL { + t.Fatalf("expected ErrSASL got: %+v on %+v", err, c) + } +} + +func TestOpenFailedCredentials(t *testing.T) { + rwc, srv := newSession(t) + + go func() { + srv.expectAMQP() + srv.connectionStart() + // Now kill/timeout the connection indicating bad auth + rwc.Close() + }() + + c, err := Open(rwc, defaultConfig()) + if err != ErrCredentials { + t.Fatalf("expected ErrCredentials got: %+v on %+v", err, c) + } +} + +func TestOpenFailedVhost(t *testing.T) { + rwc, srv := newSession(t) + + go func() { + srv.expectAMQP() + srv.connectionStart() + srv.connectionTune() + srv.recv(0, &connectionOpen{}) + + // Now kill/timeout the connection on bad Vhost + rwc.Close() + }() + + c, err := Open(rwc, defaultConfig()) + if err != ErrVhost { + t.Fatalf("expected ErrVhost got: %+v on %+v", err, c) + } +} + +func TestConfirmMultipleOrdersDeliveryTags(t *testing.T) { + rwc, srv := newSession(t) + defer rwc.Close() + + go func() { + srv.connectionOpen() + srv.channelOpen(1) + + srv.recv(1, &confirmSelect{}) + srv.send(1, &confirmSelectOk{}) + + srv.recv(1, &basicPublish{}) + srv.recv(1, &basicPublish{}) + srv.recv(1, &basicPublish{}) + srv.recv(1, &basicPublish{}) + + // Single tag, plus multiple, should produce + // 2, 1, 3, 4 + srv.send(1, &basicAck{DeliveryTag: 2}) + srv.send(1, &basicAck{DeliveryTag: 1}) + srv.send(1, &basicAck{DeliveryTag: 4, Multiple: true}) + + srv.recv(1, &basicPublish{}) + srv.recv(1, &basicPublish{}) + srv.recv(1, &basicPublish{}) + srv.recv(1, &basicPublish{}) + + // And some more, but in reverse order, multiple then one + // 5, 6, 7, 8 + srv.send(1, &basicAck{DeliveryTag: 6, Multiple: true}) + srv.send(1, &basicAck{DeliveryTag: 8}) + srv.send(1, &basicAck{DeliveryTag: 7}) + }() + + c, err := Open(rwc, defaultConfig()) + if err != nil { + t.Fatalf("could not create connection: %v (%s)", c, err) + } + + ch, err := c.Channel() + if err != nil { + t.Fatalf("could not open channel: %v (%s)", ch, err) + } + + confirm := ch.NotifyPublish(make(chan Confirmation)) + + ch.Confirm(false) + + go func() { + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 1")}) + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 2")}) + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 3")}) + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 4")}) + }() + + // received out of order, consumed in order + for i, tag := range []uint64{1, 2, 3, 4} { + if ack := <-confirm; tag != ack.DeliveryTag { + t.Fatalf("failed ack, expected ack#%d to be %d, got %d", i, tag, ack.DeliveryTag) + } + } + + go func() { + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 5")}) + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 6")}) + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 7")}) + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 8")}) + }() + + for i, tag := range []uint64{5, 6, 7, 8} { + if ack := <-confirm; tag != ack.DeliveryTag { + t.Fatalf("failed ack, expected ack#%d to be %d, got %d", i, tag, ack.DeliveryTag) + } + } + +} + +func TestNotifyClosesReusedPublisherConfirmChan(t *testing.T) { + rwc, srv := newSession(t) + + go func() { + srv.connectionOpen() + srv.channelOpen(1) + + srv.recv(1, &confirmSelect{}) + srv.send(1, &confirmSelectOk{}) + + srv.recv(0, &connectionClose{}) + srv.send(0, &connectionCloseOk{}) + }() + + c, err := Open(rwc, defaultConfig()) + if err != nil { + t.Fatalf("could not create connection: %v (%s)", c, err) + } + + ch, err := c.Channel() + if err != nil { + t.Fatalf("could not open channel: %v (%s)", ch, err) + } + + ackAndNack := make(chan uint64) + ch.NotifyConfirm(ackAndNack, ackAndNack) + + if err := ch.Confirm(false); err != nil { + t.Fatalf("expected to enter confirm mode: %v", err) + } + + if err := c.Close(); err != nil { + t.Fatalf("could not close connection: %v (%s)", c, err) + } +} + +func TestNotifyClosesAllChansAfterConnectionClose(t *testing.T) { + rwc, srv := newSession(t) + + go func() { + srv.connectionOpen() + srv.channelOpen(1) + + srv.recv(0, &connectionClose{}) + srv.send(0, &connectionCloseOk{}) + }() + + c, err := Open(rwc, defaultConfig()) + if err != nil { + t.Fatalf("could not create connection: %v (%s)", c, err) + } + + ch, err := c.Channel() + if err != nil { + t.Fatalf("could not open channel: %v (%s)", ch, err) + } + + if err := c.Close(); err != nil { + t.Fatalf("could not close connection: %v (%s)", c, err) + } + + select { + case <-c.NotifyClose(make(chan *Error)): + case <-time.After(time.Millisecond): + t.Errorf("expected to close NotifyClose chan after Connection.Close") + } + + select { + case <-ch.NotifyClose(make(chan *Error)): + case <-time.After(time.Millisecond): + t.Errorf("expected to close Connection.NotifyClose chan after Connection.Close") + } + + select { + case <-ch.NotifyFlow(make(chan bool)): + case <-time.After(time.Millisecond): + t.Errorf("expected to close Channel.NotifyFlow chan after Connection.Close") + } + + select { + case <-ch.NotifyCancel(make(chan string)): + case <-time.After(time.Millisecond): + t.Errorf("expected to close Channel.NofityCancel chan after Connection.Close") + } + + select { + case <-ch.NotifyReturn(make(chan Return)): + case <-time.After(time.Millisecond): + t.Errorf("expected to close Channel.NotifyReturn chan after Connection.Close") + } + + confirms := ch.NotifyPublish(make(chan Confirmation)) + + select { + case <-confirms: + case <-time.After(time.Millisecond): + t.Errorf("expected to close confirms on Channel.NotifyPublish chan after Connection.Close") + } +} + +// Should not panic when sending bodies split at differnet boundaries +func TestPublishBodySliceIssue74(t *testing.T) { + rwc, srv := newSession(t) + defer rwc.Close() + + const frameSize = 100 + const publishings = frameSize * 3 + + done := make(chan bool) + base := make([]byte, publishings) + + go func() { + srv.connectionOpen() + srv.channelOpen(1) + + for i := 0; i < publishings; i++ { + srv.recv(1, &basicPublish{}) + } + + done <- true + }() + + cfg := defaultConfig() + cfg.FrameSize = frameSize + + c, err := Open(rwc, cfg) + if err != nil { + t.Fatalf("could not create connection: %v (%s)", c, err) + } + + ch, err := c.Channel() + if err != nil { + t.Fatalf("could not open channel: %v (%s)", ch, err) + } + + for i := 0; i < publishings; i++ { + go ch.Publish("", "q", false, false, Publishing{Body: base[0:i]}) + } + + <-done +} + +// Should not panic when server and client have frame_size of 0 +func TestPublishZeroFrameSizeIssue161(t *testing.T) { + rwc, srv := newSession(t) + defer rwc.Close() + + const frameSize = 0 + const publishings = 1 + done := make(chan bool) + + go func() { + srv.connectionOpen() + srv.channelOpen(1) + + for i := 0; i < publishings; i++ { + srv.recv(1, &basicPublish{}) + } + + done <- true + }() + + cfg := defaultConfig() + cfg.FrameSize = frameSize + + c, err := Open(rwc, cfg) + + // override the tuned framesize with a hard 0, as would happen when rabbit is configured with 0 + c.Config.FrameSize = frameSize + + if err != nil { + t.Fatalf("could not create connection: %v (%s)", c, err) + } + + ch, err := c.Channel() + if err != nil { + t.Fatalf("could not open channel: %v (%s)", ch, err) + } + + for i := 0; i < publishings; i++ { + go ch.Publish("", "q", false, false, Publishing{Body: []byte("anything")}) + } + + <-done +} + +func TestPublishAndShutdownDeadlockIssue84(t *testing.T) { + rwc, srv := newSession(t) + defer rwc.Close() + + go func() { + srv.connectionOpen() + srv.channelOpen(1) + srv.recv(1, &basicPublish{}) + // Mimic a broken io pipe so that Publish catches the error and goes into shutdown + srv.S.Close() + }() + + c, err := Open(rwc, defaultConfig()) + if err != nil { + t.Fatalf("couldn't create connection: %v (%s)", c, err) + } + + ch, err := c.Channel() + if err != nil { + t.Fatalf("couldn't open channel: %v (%s)", ch, err) + } + + defer time.AfterFunc(500*time.Millisecond, func() { panic("Publish deadlock") }).Stop() + for { + if err := ch.Publish("exchange", "q", false, false, Publishing{Body: []byte("test")}); err != nil { + t.Log("successfully caught disconnect error", err) + return + } + } +} diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/confirms.go b/services/templeton/vendor/src/github.com/streadway/amqp/confirms.go new file mode 100644 index 000000000..ebee9368b --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/confirms.go @@ -0,0 +1,93 @@ +package amqp + +import "sync" + +// confirms resequences and notifies one or multiple publisher confirmation listeners +type confirms struct { + m sync.Mutex + listeners []chan Confirmation + sequencer map[uint64]Confirmation + published uint64 + expecting uint64 +} + +// newConfirms allocates a confirms +func newConfirms() *confirms { + return &confirms{ + sequencer: map[uint64]Confirmation{}, + published: 0, + expecting: 1, + } +} + +func (c *confirms) Listen(l chan Confirmation) { + c.m.Lock() + defer c.m.Unlock() + + c.listeners = append(c.listeners, l) +} + +// publish increments the publishing counter +func (c *confirms) Publish() uint64 { + c.m.Lock() + defer c.m.Unlock() + + c.published++ + return c.published +} + +// confirm confirms one publishing, increments the expecting delivery tag, and +// removes bookkeeping for that delivery tag. +func (c *confirms) confirm(confirmation Confirmation) { + delete(c.sequencer, c.expecting) + c.expecting++ + for _, l := range c.listeners { + l <- confirmation + } +} + +// resequence confirms any out of order delivered confirmations +func (c *confirms) resequence() { + for c.expecting <= c.published { + sequenced, found := c.sequencer[c.expecting] + if !found { + return + } + c.confirm(sequenced) + } +} + +// one confirms one publishing and all following in the publishing sequence +func (c *confirms) One(confirmed Confirmation) { + c.m.Lock() + defer c.m.Unlock() + + if c.expecting == confirmed.DeliveryTag { + c.confirm(confirmed) + } else { + c.sequencer[confirmed.DeliveryTag] = confirmed + } + c.resequence() +} + +// multiple confirms all publishings up until the delivery tag +func (c *confirms) Multiple(confirmed Confirmation) { + c.m.Lock() + defer c.m.Unlock() + + for c.expecting <= confirmed.DeliveryTag { + c.confirm(Confirmation{c.expecting, confirmed.Ack}) + } +} + +// Close closes all listeners, discarding any out of sequence confirmations +func (c *confirms) Close() error { + c.m.Lock() + defer c.m.Unlock() + + for _, l := range c.listeners { + close(l) + } + c.listeners = nil + return nil +} diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/confirms_test.go b/services/templeton/vendor/src/github.com/streadway/amqp/confirms_test.go new file mode 100644 index 000000000..7eb2acc06 --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/confirms_test.go @@ -0,0 +1,119 @@ +package amqp + +import ( + "testing" + "time" +) + +func TestConfirmOneResequences(t *testing.T) { + var ( + fixtures = []Confirmation{ + {1, true}, + {2, false}, + {3, true}, + } + c = newConfirms() + l = make(chan Confirmation, len(fixtures)) + ) + + c.Listen(l) + + for i, _ := range fixtures { + if want, got := uint64(i+1), c.Publish(); want != got { + t.Fatalf("expected publish to return the 1 based delivery tag published, want: %d, got: %d", want, got) + } + } + + c.One(fixtures[1]) + c.One(fixtures[2]) + + select { + case confirm := <-l: + t.Fatalf("expected to wait in order to properly resequence results, got: %+v", confirm) + default: + } + + c.One(fixtures[0]) + + for i, fix := range fixtures { + if want, got := fix, <-l; want != got { + t.Fatalf("expected to return confirmations in sequence for %d, want: %+v, got: %+v", i, want, got) + } + } +} + +func TestConfirmMultipleResequences(t *testing.T) { + var ( + fixtures = []Confirmation{ + {1, true}, + {2, true}, + {3, true}, + {4, true}, + } + c = newConfirms() + l = make(chan Confirmation, len(fixtures)) + ) + c.Listen(l) + + for _, _ = range fixtures { + c.Publish() + } + + c.Multiple(fixtures[len(fixtures)-1]) + + for i, fix := range fixtures { + if want, got := fix, <-l; want != got { + t.Fatalf("expected to confirm multiple in sequence for %d, want: %+v, got: %+v", i, want, got) + } + } +} + +func BenchmarkSequentialBufferedConfirms(t *testing.B) { + var ( + c = newConfirms() + l = make(chan Confirmation, 10) + ) + + c.Listen(l) + + for i := 0; i < t.N; i++ { + if i > cap(l)-1 { + <-l + } + c.One(Confirmation{c.Publish(), true}) + } +} + +func TestConfirmsIsThreadSafe(t *testing.T) { + const count = 1000 + const timeout = 5 * time.Second + var ( + c = newConfirms() + l = make(chan Confirmation) + pub = make(chan Confirmation) + done = make(chan Confirmation) + late = time.After(timeout) + ) + + c.Listen(l) + + for i := 0; i < count; i++ { + go func() { pub <- Confirmation{c.Publish(), true} }() + } + + for i := 0; i < count; i++ { + go func() { c.One(<-pub) }() + } + + for i := 0; i < count; i++ { + go func() { done <- <-l }() + } + + for i := 0; i < count; i++ { + select { + case <-done: + case <-late: + t.Fatalf("expected all publish/confirms to finish after %s", timeout) + } + } +} diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/connection.go b/services/templeton/vendor/src/github.com/streadway/amqp/connection.go new file mode 100644 index 000000000..ad4007978 --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/connection.go @@ -0,0 +1,769 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "bufio" + "crypto/tls" + "io" + "net" + "reflect" + "strconv" + "strings" + "sync" + "time" +) + +const ( + maxChannelMax = (2 << 15) - 1 + + defaultHeartbeat = 10 * time.Second + defaultConnectionTimeout = 30 * time.Second + defaultProduct = "https://github.com/streadway/amqp" + defaultVersion = "β" + defaultChannelMax = maxChannelMax +) + +// Config is used in DialConfig and Open to specify the desired tuning +// parameters used during a connection open handshake. The negotiated tuning +// will be stored in the returned connection's Config field. +type Config struct { + // The SASL mechanisms to try in the client request, and the successful + // mechanism used on the Connection object. + // If SASL is nil, PlainAuth from the URL is used. + SASL []Authentication + + // Vhost specifies the namespace of permissions, exchanges, queues and + // bindings on the server. Dial sets this to the path parsed from the URL. + Vhost string + + ChannelMax int // 0 max channels means 2^16 - 1 + FrameSize int // 0 max bytes means unlimited + Heartbeat time.Duration // less than 1s uses the server's interval + + // TLSClientConfig specifies the client configuration of the TLS connection + // when establishing a tls transport. + // If the URL uses an amqps scheme, then an empty tls.Config with the + // ServerName from the URL is used. + TLSClientConfig *tls.Config + + // Properties is table of properties that the client advertises to the server. + // This is an optional setting - if the application does not set this, + // the underlying library will use a generic set of client properties. + Properties Table + + // Dial returns a net.Conn prepared for a TLS handshake with TSLClientConfig, + // then an AMQP connection handshake. + // If Dial is nil, net.DialTimeout with a 30s connection and 30s read + // deadline is used. + Dial func(network, addr string) (net.Conn, error) +} + +// Connection manages the serialization and deserialization of frames from IO +// and dispatches the frames to the appropriate channel. All RPC methods and +// asyncronous Publishing, Delivery, Ack, Nack and Return messages are +// multiplexed on this channel. There must always be active receivers for +// every asynchronous message on this connection. +type Connection struct { + destructor sync.Once // shutdown once + sendM sync.Mutex // conn writer mutex + m sync.Mutex // struct field mutex + + conn io.ReadWriteCloser + + rpc chan message + writer *writer + sends chan time.Time // timestamps of each frame sent + deadlines chan readDeadliner // heartbeater updates read deadlines + + allocator *allocator // id generator valid after openTune + channels map[uint16]*Channel + + noNotify bool // true when we will never notify again + closes []chan *Error + blocks []chan Blocking + + errors chan *Error + + Config Config // The negotiated Config after connection.open + + Major int // Server's major version + Minor int // Server's minor version + Properties Table // Server properties +} + +type readDeadliner interface { + SetReadDeadline(time.Time) error +} + +type localNetAddr interface { + LocalAddr() net.Addr +} + +// defaultDial establishes a connection when config.Dial is not provided +func defaultDial(network, addr string) (net.Conn, error) { + conn, err := net.DialTimeout(network, addr, defaultConnectionTimeout) + if err != nil { + return nil, err + } + + // Heartbeating hasn't started yet, don't stall forever on a dead server. + if err := conn.SetReadDeadline(time.Now().Add(defaultConnectionTimeout)); err != nil { + return nil, err + } + + return conn, nil +} + +// Dial accepts a string in the AMQP URI format and returns a new Connection +// over TCP using PlainAuth. Defaults to a server heartbeat interval of 10 +// seconds and sets the initial read deadline to 30 seconds. +// +// Dial uses the zero value of tls.Config when it encounters an amqps:// +// scheme. It is equivalent to calling DialTLS(amqp, nil). +func Dial(url string) (*Connection, error) { + return DialConfig(url, Config{ + Heartbeat: defaultHeartbeat, + }) +} + +// DialTLS accepts a string in the AMQP URI format and returns a new Connection +// over TCP using PlainAuth. Defaults to a server heartbeat interval of 10 +// seconds and sets the initial read deadline to 30 seconds. +// +// DialTLS uses the provided tls.Config when encountering an amqps:// scheme. +func DialTLS(url string, amqps *tls.Config) (*Connection, error) { + return DialConfig(url, Config{ + Heartbeat: defaultHeartbeat, + TLSClientConfig: amqps, + }) +} + +// DialConfig accepts a string in the AMQP URI format and a configuration for +// the transport and connection setup, returning a new Connection. Defaults to +// a server heartbeat interval of 10 seconds and sets the initial read deadline +// to 30 seconds. +func DialConfig(url string, config Config) (*Connection, error) { + var err error + var conn net.Conn + + uri, err := ParseURI(url) + if err != nil { + return nil, err + } + + if config.SASL == nil { + config.SASL = []Authentication{uri.PlainAuth()} + } + + if config.Vhost == "" { + config.Vhost = uri.Vhost + } + + if uri.Scheme == "amqps" && config.TLSClientConfig == nil { + config.TLSClientConfig = new(tls.Config) + } + + addr := net.JoinHostPort(uri.Host, strconv.FormatInt(int64(uri.Port), 10)) + + dialer := config.Dial + if dialer == nil { + dialer = defaultDial + } + + conn, err = dialer("tcp", addr) + if err != nil { + return nil, err + } + + if config.TLSClientConfig != nil { + // Use the URI's host for hostname validation unless otherwise set. Make a + // copy so not to modify the caller's reference when the caller reuses a + // tls.Config for a different URL. + if config.TLSClientConfig.ServerName == "" { + c := *config.TLSClientConfig + c.ServerName = uri.Host + config.TLSClientConfig = &c + } + + client := tls.Client(conn, config.TLSClientConfig) + if err := client.Handshake(); err != nil { + conn.Close() + return nil, err + } + + conn = client + } + + return Open(conn, config) +} + +/* +Open accepts an already established connection, or other io.ReadWriteCloser as +a transport. Use this method if you have established a TLS connection or wish +to use your own custom transport. + +*/ +func Open(conn io.ReadWriteCloser, config Config) (*Connection, error) { + me := &Connection{ + conn: conn, + writer: &writer{bufio.NewWriter(conn)}, + channels: make(map[uint16]*Channel), + rpc: make(chan message), + sends: make(chan time.Time), + errors: make(chan *Error, 1), + deadlines: make(chan readDeadliner, 1), + } + go me.reader(conn) + return me, me.open(config) +} + +/* +LocalAddr returns the local TCP peer address, or ":0" (the zero value of net.TCPAddr) +as a fallback default value if the underlying transport does not support LocalAddr(). +*/ +func (me *Connection) LocalAddr() net.Addr { + if c, ok := me.conn.(localNetAddr); ok { + return c.LocalAddr() + } + return &net.TCPAddr{} +} + +/* +NotifyClose registers a listener for close events either initiated by an error +accompaning a connection.close method or by a normal shutdown. + +On normal shutdowns, the chan will be closed. + +To reconnect after a transport or protocol error, register a listener here and +re-run your setup process. + +*/ +func (me *Connection) NotifyClose(c chan *Error) chan *Error { + me.m.Lock() + defer me.m.Unlock() + + if me.noNotify { + close(c) + } else { + me.closes = append(me.closes, c) + } + + return c +} + +/* +NotifyBlock registers a listener for RabbitMQ specific TCP flow control method +extensions connection.blocked and connection.unblocked. Flow control is active +with a reason when Blocking.Blocked is true. When a Connection is blocked, all +methods will block across all connections until server resources become free +again. + +This optional extension is supported by the server when the +"connection.blocked" server capability key is true. + +*/ +func (me *Connection) NotifyBlocked(c chan Blocking) chan Blocking { + me.m.Lock() + defer me.m.Unlock() + + if me.noNotify { + close(c) + } else { + me.blocks = append(me.blocks, c) + } + + return c +} + +/* +Close requests and waits for the response to close the AMQP connection. + +It's advisable to use this message when publishing to ensure all kernel buffers +have been flushed on the server and client before exiting. + +An error indicates that server may not have received this request to close but +the connection should be treated as closed regardless. + +After returning from this call, all resources associated with this connection, +including the underlying io, Channels, Notify listeners and Channel consumers +will also be closed. +*/ +func (me *Connection) Close() error { + defer me.shutdown(nil) + return me.call( + &connectionClose{ + ReplyCode: replySuccess, + ReplyText: "kthxbai", + }, + &connectionCloseOk{}, + ) +} + +func (me *Connection) closeWith(err *Error) error { + defer me.shutdown(err) + return me.call( + &connectionClose{ + ReplyCode: uint16(err.Code), + ReplyText: err.Reason, + }, + &connectionCloseOk{}, + ) +} + +func (me *Connection) send(f frame) error { + me.sendM.Lock() + err := me.writer.WriteFrame(f) + me.sendM.Unlock() + + if err != nil { + // shutdown could be re-entrant from signaling notify chans + go me.shutdown(&Error{ + Code: FrameError, + Reason: err.Error(), + }) + } else { + // Broadcast we sent a frame, reducing heartbeats, only + // if there is something that can receive - like a non-reentrant + // call or if the heartbeater isn't running + select { + case me.sends <- time.Now(): + default: + } + } + + return err +} + +func (me *Connection) shutdown(err *Error) { + me.destructor.Do(func() { + if err != nil { + for _, c := range me.closes { + c <- err + } + } + + for _, ch := range me.channels { + me.closeChannel(ch, err) + } + + if err != nil { + me.errors <- err + } + + me.conn.Close() + + for _, c := range me.closes { + close(c) + } + + for _, c := range me.blocks { + close(c) + } + + me.m.Lock() + me.noNotify = true + me.m.Unlock() + }) +} + +// All methods sent to the connection channel should be synchronous so we +// can handle them directly without a framing component +func (me *Connection) demux(f frame) { + if f.channel() == 0 { + me.dispatch0(f) + } else { + me.dispatchN(f) + } +} + +func (me *Connection) dispatch0(f frame) { + switch mf := f.(type) { + case *methodFrame: + switch m := mf.Method.(type) { + case *connectionClose: + // Send immediately as shutdown will close our side of the writer. + me.send(&methodFrame{ + ChannelId: 0, + Method: &connectionCloseOk{}, + }) + + me.shutdown(newError(m.ReplyCode, m.ReplyText)) + case *connectionBlocked: + for _, c := range me.blocks { + c <- Blocking{Active: true, Reason: m.Reason} + } + case *connectionUnblocked: + for _, c := range me.blocks { + c <- Blocking{Active: false} + } + default: + me.rpc <- m + } + case *heartbeatFrame: + // kthx - all reads reset our deadline. so we can drop this + default: + // lolwat - channel0 only responds to methods and heartbeats + me.closeWith(ErrUnexpectedFrame) + } +} + +func (me *Connection) dispatchN(f frame) { + me.m.Lock() + channel := me.channels[f.channel()] + me.m.Unlock() + + if channel != nil { + channel.recv(channel, f) + } else { + me.dispatchClosed(f) + } +} + +// section 2.3.7: "When a peer decides to close a channel or connection, it +// sends a Close method. The receiving peer MUST respond to a Close with a +// Close-Ok, and then both parties can close their channel or connection. Note +// that if peers ignore Close, deadlock can happen when both peers send Close +// at the same time." +// +// When we don't have a channel, so we must respond with close-ok on a close +// method. This can happen between a channel exception on an asynchronous +// method like basic.publish and a synchronous close with channel.close. +// In that case, we'll get both a channel.close and channel.close-ok in any +// order. +func (me *Connection) dispatchClosed(f frame) { + // Only consider method frames, drop content/header frames + if mf, ok := f.(*methodFrame); ok { + switch mf.Method.(type) { + case *channelClose: + me.send(&methodFrame{ + ChannelId: f.channel(), + Method: &channelCloseOk{}, + }) + case *channelCloseOk: + // we are already closed, so do nothing + default: + // unexpected method on closed channel + me.closeWith(ErrClosed) + } + } +} + +// Reads each frame off the IO and hand off to the connection object that +// will demux the streams and dispatch to one of the opened channels or +// handle on channel 0 (the connection channel). +func (me *Connection) reader(r io.Reader) { + buf := bufio.NewReader(r) + frames := &reader{buf} + conn, haveDeadliner := r.(readDeadliner) + + for { + frame, err := frames.ReadFrame() + + if err != nil { + me.shutdown(&Error{Code: FrameError, Reason: err.Error()}) + return + } + + me.demux(frame) + + if haveDeadliner { + me.deadlines <- conn + } + } +} + +// Ensures that at least one frame is being sent at the tuned interval with a +// jitter tolerance of 1s +func (me *Connection) heartbeater(interval time.Duration, done chan *Error) { + const maxServerHeartbeatsInFlight = 3 + + var sendTicks <-chan time.Time + if interval > 0 { + ticker := time.NewTicker(interval) + defer ticker.Stop() + sendTicks = ticker.C + } + + lastSent := time.Now() + + for { + select { + case at, stillSending := <-me.sends: + // When actively sending, depend on sent frames to reset server timer + if stillSending { + lastSent = at + } else { + return + } + + case at := <-sendTicks: + // When idle, fill the space with a heartbeat frame + if at.Sub(lastSent) > interval-time.Second { + if err := me.send(&heartbeatFrame{}); err != nil { + // send heartbeats even after close/closeOk so we + // tick until the connection starts erroring + return + } + } + + case conn := <-me.deadlines: + // When reading, reset our side of the deadline, if we've negotiated one with + // a deadline that covers at least 2 server heartbeats + if interval > 0 { + conn.SetReadDeadline(time.Now().Add(maxServerHeartbeatsInFlight * interval)) + } + + case <-done: + return + } + } +} + +// Convenience method to inspect the Connection.Properties["capabilities"] +// Table for server identified capabilities like "basic.ack" or +// "confirm.select". +func (me *Connection) isCapable(featureName string) bool { + capabilities, _ := me.Properties["capabilities"].(Table) + hasFeature, _ := capabilities[featureName].(bool) + return hasFeature +} + +// allocateChannel records but does not open a new channel with a unique id. +// This method is the initial part of the channel lifecycle and paired with +// releaseChannel +func (me *Connection) allocateChannel() (*Channel, error) { + me.m.Lock() + defer me.m.Unlock() + + id, ok := me.allocator.next() + if !ok { + return nil, ErrChannelMax + } + + ch := newChannel(me, uint16(id)) + me.channels[uint16(id)] = ch + + return ch, nil +} + +// releaseChannel removes a channel from the registry as the final part of the +// channel lifecycle +func (me *Connection) releaseChannel(id uint16) { + me.m.Lock() + defer me.m.Unlock() + + delete(me.channels, id) + me.allocator.release(int(id)) +} + +// openChannel allocates and opens a channel, must be paired with closeChannel +func (me *Connection) openChannel() (*Channel, error) { + ch, err := me.allocateChannel() + if err != nil { + return nil, err + } + + if err := ch.open(); err != nil { + return nil, err + } + return ch, nil +} + +// closeChannel releases and initiates a shutdown of the channel. All channel +// closures should be initiated here for proper channel lifecycle management on +// this connection. +func (me *Connection) closeChannel(ch *Channel, e *Error) { + ch.shutdown(e) + me.releaseChannel(ch.id) +} + +/* +Channel opens a unique, concurrent server channel to process the bulk of AMQP +messages. Any error from methods on this receiver will render the receiver +invalid and a new Channel should be opened. + +*/ +func (me *Connection) Channel() (*Channel, error) { + return me.openChannel() +} + +func (me *Connection) call(req message, res ...message) error { + // Special case for when the protocol header frame is sent insted of a + // request method + if req != nil { + if err := me.send(&methodFrame{ChannelId: 0, Method: req}); err != nil { + return err + } + } + + select { + case err := <-me.errors: + return err + + case msg := <-me.rpc: + // Try to match one of the result types + for _, try := range res { + if reflect.TypeOf(msg) == reflect.TypeOf(try) { + // *res = *msg + vres := reflect.ValueOf(try).Elem() + vmsg := reflect.ValueOf(msg).Elem() + vres.Set(vmsg) + return nil + } + } + return ErrCommandInvalid + } + + panic("unreachable") +} + +// Connection = open-Connection *use-Connection close-Connection +// open-Connection = C:protocol-header +// S:START C:START-OK +// *challenge +// S:TUNE C:TUNE-OK +// C:OPEN S:OPEN-OK +// challenge = S:SECURE C:SECURE-OK +// use-Connection = *channel +// close-Connection = C:CLOSE S:CLOSE-OK +// / S:CLOSE C:CLOSE-OK +func (me *Connection) open(config Config) error { + if err := me.send(&protocolHeader{}); err != nil { + return err + } + + return me.openStart(config) +} + +func (me *Connection) openStart(config Config) error { + start := &connectionStart{} + + if err := me.call(nil, start); err != nil { + return err + } + + me.Major = int(start.VersionMajor) + me.Minor = int(start.VersionMinor) + me.Properties = Table(start.ServerProperties) + + // eventually support challenge/response here by also responding to + // connectionSecure. + auth, ok := pickSASLMechanism(config.SASL, strings.Split(start.Mechanisms, " ")) + if !ok { + return ErrSASL + } + + // Save this mechanism off as the one we chose + me.Config.SASL = []Authentication{auth} + + return me.openTune(config, auth) +} + +func (me *Connection) openTune(config Config, auth Authentication) error { + if len(config.Properties) == 0 { + config.Properties = Table{ + "product": defaultProduct, + "version": defaultVersion, + } + } + + config.Properties["capabilities"] = Table{ + "connection.blocked": true, + "consumer_cancel_notify": true, + } + + ok := &connectionStartOk{ + Mechanism: auth.Mechanism(), + Response: auth.Response(), + ClientProperties: config.Properties, + } + tune := &connectionTune{} + + if err := me.call(ok, tune); err != nil { + // per spec, a connection can only be closed when it has been opened + // so at this point, we know it's an auth error, but the socket + // was closed instead. Return a meaningful error. + return ErrCredentials + } + + // When the server and client both use default 0, then the max channel is + // only limited by uint16. + me.Config.ChannelMax = pick(config.ChannelMax, int(tune.ChannelMax)) + if me.Config.ChannelMax == 0 { + me.Config.ChannelMax = defaultChannelMax + } + me.Config.ChannelMax = min(me.Config.ChannelMax, maxChannelMax) + + // Frame size includes headers and end byte (len(payload)+8), even if + // this is less than FrameMinSize, use what the server sends because the + // alternative is to stop the handshake here. + me.Config.FrameSize = pick(config.FrameSize, int(tune.FrameMax)) + + // Save this off for resetDeadline() + me.Config.Heartbeat = time.Second * time.Duration(pick( + int(config.Heartbeat/time.Second), + int(tune.Heartbeat))) + + // "The client should start sending heartbeats after receiving a + // Connection.Tune method" + go me.heartbeater(me.Config.Heartbeat, me.NotifyClose(make(chan *Error, 1))) + + if err := me.send(&methodFrame{ + ChannelId: 0, + Method: &connectionTuneOk{ + ChannelMax: uint16(me.Config.ChannelMax), + FrameMax: uint32(me.Config.FrameSize), + Heartbeat: uint16(me.Config.Heartbeat / time.Second), + }, + }); err != nil { + return err + } + + return me.openVhost(config) +} + +func (me *Connection) openVhost(config Config) error { + req := &connectionOpen{VirtualHost: config.Vhost} + res := &connectionOpenOk{} + + if err := me.call(req, res); err != nil { + // Cannot be closed yet, but we know it's a vhost problem + return ErrVhost + } + + me.Config.Vhost = config.Vhost + + return me.openComplete() +} + +// openComplete performs any final Connection initialization dependent on the +// connection handshake. +func (me *Connection) openComplete() error { + me.allocator = newAllocator(1, me.Config.ChannelMax) + return nil +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func pick(client, server int) int { + if client == 0 || server == 0 { + return max(client, server) + } + return min(client, server) +} diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/consumers.go b/services/templeton/vendor/src/github.com/streadway/amqp/consumers.go new file mode 100644 index 000000000..b6bd60575 --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/consumers.go @@ -0,0 +1,118 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "fmt" + "os" + "sync" + "sync/atomic" +) + +var consumerSeq uint64 + +func uniqueConsumerTag() string { + return fmt.Sprintf("ctag-%s-%d", os.Args[0], atomic.AddUint64(&consumerSeq, 1)) +} + +type consumerBuffers map[string]chan *Delivery + +// Concurrent type that manages the consumerTag -> +// ingress consumerBuffer mapping +type consumers struct { + sync.Mutex + chans consumerBuffers +} + +func makeConsumers() *consumers { + return &consumers{chans: make(consumerBuffers)} +} + +func bufferDeliveries(in chan *Delivery, out chan Delivery) { + var queue []*Delivery + var queueIn = in + + for delivery := range in { + select { + case out <- *delivery: + // delivered immediately while the consumer chan can receive + default: + queue = append(queue, delivery) + } + + for len(queue) > 0 { + select { + case out <- *queue[0]: + queue = queue[1:] + case delivery, open := <-queueIn: + if open { + queue = append(queue, delivery) + } else { + // stop receiving to drain the queue + queueIn = nil + } + } + } + } + + close(out) +} + +// On key conflict, close the previous channel. +func (me *consumers) add(tag string, consumer chan Delivery) { + me.Lock() + defer me.Unlock() + + if prev, found := me.chans[tag]; found { + close(prev) + } + + in := make(chan *Delivery) + go bufferDeliveries(in, consumer) + + me.chans[tag] = in +} + +func (me *consumers) close(tag string) (found bool) { + me.Lock() + defer me.Unlock() + + ch, found := me.chans[tag] + + if found { + delete(me.chans, tag) + close(ch) + } + + return found +} + +func (me *consumers) closeAll() { + me.Lock() + defer me.Unlock() + + for _, ch := range me.chans { + close(ch) + } + + me.chans = make(consumerBuffers) +} + +// Sends a delivery to a the consumer identified by `tag`. +// If unbuffered channels are used for Consume this method +// could block all deliveries until the consumer +// receives on the other end of the channel. +func (me *consumers) send(tag string, msg *Delivery) bool { + me.Lock() + defer me.Unlock() + + buffer, found := me.chans[tag] + if found { + buffer <- msg + } + + return found +} diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/delivery.go b/services/templeton/vendor/src/github.com/streadway/amqp/delivery.go new file mode 100644 index 000000000..f84ae4592 --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/delivery.go @@ -0,0 +1,173 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "errors" + "time" +) + +var errDeliveryNotInitialized = errors.New("delivery not initialized") + +// Acknowledger notifies the server of successful or failed consumption of +// delivieries via identifier found in the Delivery.DeliveryTag field. +// +// Applications can provide mock implementations in tests of Delivery handlers. +type Acknowledger interface { + Ack(tag uint64, multiple bool) error + Nack(tag uint64, multiple bool, requeue bool) error + Reject(tag uint64, requeue bool) error +} + +// Delivery captures the fields for a previously delivered message resident in +// a queue to be delivered by the server to a consumer from Channel.Consume or +// Channel.Get. +type Delivery struct { + Acknowledger Acknowledger // the channel from which this delivery arrived + + Headers Table // Application or header exchange table + + // Properties + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + DeliveryMode uint8 // queue implemention use - non-persistent (1) or persistent (2) + Priority uint8 // queue implementation use - 0 to 9 + CorrelationId string // application use - correlation identifier + ReplyTo string // application use - address to to reply to (ex: RPC) + Expiration string // implementation use - message expiration spec + MessageId string // application use - message identifier + Timestamp time.Time // application use - message timestamp + Type string // application use - message type name + UserId string // application use - creating user - should be authenticated user + AppId string // application use - creating application id + + // Valid only with Channel.Consume + ConsumerTag string + + // Valid only with Channel.Get + MessageCount uint32 + + DeliveryTag uint64 + Redelivered bool + Exchange string // basic.publish exhange + RoutingKey string // basic.publish routing key + + Body []byte +} + +func newDelivery(channel *Channel, msg messageWithContent) *Delivery { + props, body := msg.getContent() + + delivery := Delivery{ + Acknowledger: channel, + + Headers: props.Headers, + ContentType: props.ContentType, + ContentEncoding: props.ContentEncoding, + DeliveryMode: props.DeliveryMode, + Priority: props.Priority, + CorrelationId: props.CorrelationId, + ReplyTo: props.ReplyTo, + Expiration: props.Expiration, + MessageId: props.MessageId, + Timestamp: props.Timestamp, + Type: props.Type, + UserId: props.UserId, + AppId: props.AppId, + + Body: body, + } + + // Properties for the delivery types + switch m := msg.(type) { + case *basicDeliver: + delivery.ConsumerTag = m.ConsumerTag + delivery.DeliveryTag = m.DeliveryTag + delivery.Redelivered = m.Redelivered + delivery.Exchange = m.Exchange + delivery.RoutingKey = m.RoutingKey + + case *basicGetOk: + delivery.MessageCount = m.MessageCount + delivery.DeliveryTag = m.DeliveryTag + delivery.Redelivered = m.Redelivered + delivery.Exchange = m.Exchange + delivery.RoutingKey = m.RoutingKey + } + + return &delivery +} + +/* +Ack delegates an acknowledgement through the Acknowledger interface that the +client or server has finished work on a delivery. + +All deliveries in AMQP must be acknowledged. If you called Channel.Consume +with autoAck true then the server will be automatically ack each message and +this method should not be called. Otherwise, you must call Delivery.Ack after +you have successfully processed this delivery. + +When multiple is true, this delivery and all prior unacknowledged deliveries +on the same channel will be acknowledged. This is useful for batch processing +of deliveries. + +An error will indicate that the acknowledge could not be delivered to the +channel it was sent from. + +Either Delivery.Ack, Delivery.Reject or Delivery.Nack must be called for every +delivery that is not automatically acknowledged. +*/ +func (me Delivery) Ack(multiple bool) error { + if me.Acknowledger == nil { + return errDeliveryNotInitialized + } + return me.Acknowledger.Ack(me.DeliveryTag, multiple) +} + +/* +Reject delegates a negatively acknowledgement through the Acknowledger interface. + +When requeue is true, queue this message to be delivered to a consumer on a +different channel. When requeue is false or the server is unable to queue this +message, it will be dropped. + +If you are batch processing deliveries, and your server supports it, prefer +Delivery.Nack. + +Either Delivery.Ack, Delivery.Reject or Delivery.Nack must be called for every +delivery that is not automatically acknowledged. +*/ +func (me Delivery) Reject(requeue bool) error { + if me.Acknowledger == nil { + return errDeliveryNotInitialized + } + return me.Acknowledger.Reject(me.DeliveryTag, requeue) +} + +/* +Nack negatively acknowledge the delivery of message(s) identified by the +delivery tag from either the client or server. + +When multiple is true, nack messages up to and including delivered messages up +until the delivery tag delivered on the same channel. + +When requeue is true, request the server to deliver this message to a different +consumer. If it is not possible or requeue is false, the message will be +dropped or delivered to a server configured dead-letter queue. + +This method must not be used to select or requeue messages the client wishes +not to handle, rather it is to inform the server that the client is incapable +of handling this message at this time. + +Either Delivery.Ack, Delivery.Reject or Delivery.Nack must be called for every +delivery that is not automatically acknowledged. +*/ +func (me Delivery) Nack(multiple, requeue bool) error { + if me.Acknowledger == nil { + return errDeliveryNotInitialized + } + return me.Acknowledger.Nack(me.DeliveryTag, multiple, requeue) +} diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/delivery_test.go b/services/templeton/vendor/src/github.com/streadway/amqp/delivery_test.go new file mode 100644 index 000000000..f126f87d8 --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/delivery_test.go @@ -0,0 +1,33 @@ +package amqp + +import "testing" + +func shouldNotPanic(t *testing.T) { + if err := recover(); err != nil { + t.Fatalf("should not panic, got: %s", err) + } +} + +// A closed delivery chan could produce zero value. Ack/Nack/Reject on these +// deliveries can produce a nil pointer panic. Instead return an error when +// the method can never be successful. +func TestAckZeroValueAcknowledgerDoesNotPanic(t *testing.T) { + defer shouldNotPanic(t) + if err := (Delivery{}).Ack(false); err == nil { + t.Errorf("expected Delivery{}.Ack to error") + } +} + +func TestNackZeroValueAcknowledgerDoesNotPanic(t *testing.T) { + defer shouldNotPanic(t) + if err := (Delivery{}).Nack(false, false); err == nil { + t.Errorf("expected Delivery{}.Ack to error") + } +} + +func TestRejectZeroValueAcknowledgerDoesNotPanic(t *testing.T) { + defer shouldNotPanic(t) + if err := (Delivery{}).Reject(false); err == nil { + t.Errorf("expected Delivery{}.Ack to error") + } +} diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/doc.go b/services/templeton/vendor/src/github.com/streadway/amqp/doc.go new file mode 100644 index 000000000..94c29f825 --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/doc.go @@ -0,0 +1,108 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +/* +AMQP 0.9.1 client with RabbitMQ extensions + +Understand the AMQP 0.9.1 messaging model by reviewing these links first. Much +of the terminology in this library directly relates to AMQP concepts. + + Resources + + http://www.rabbitmq.com/tutorials/amqp-concepts.html + http://www.rabbitmq.com/getstarted.html + http://www.rabbitmq.com/amqp-0-9-1-reference.html + +Design + +Most other broker clients publish to queues, but in AMQP, clients publish +Exchanges instead. AMQP is programmable, meaning that both the producers and +consumers agree on the configuration of the broker, instead requiring an +operator or system configuration that declares the logical topology in the +broker. The routing between producers and consumer queues is via Bindings. +These bindings form the logical topology of the broker. + +In this library, a message sent from publisher is called a "Publishing" and a +message received to a consumer is called a "Delivery". The fields of +Publishings and Deliveries are close but not exact mappings to the underlying +wire format to maintain stronger types. Many other libraries will combine +message properties with message headers. In this library, the message well +known properties are strongly typed fields on the Publishings and Deliveries, +whereas the user defined headers are in the Headers field. + +The method naming closely matches the protocol's method name with positional +parameters mapping to named protocol message fields. The motivation here is to +present a comprehensive view over all possible interactions with the server. + +Generally, methods that map to protocol methods of the "basic" class will be +elided in this interface, and "select" methods of various channel mode selectors +will be elided for example Channel.Confirm and Channel.Tx. + +The library is intentionally designed to be synchronous, where responses for +each protocol message are required to be received in an RPC manner. Some +methods have a noWait parameter like Channel.QueueDeclare, and some methods are +asynchronous like Channel.Publish. The error values should still be checked for +these methods as they will indicate IO failures like when the underlying +connection closes. + +Asynchronous Events + +Clients of this library may be interested in receiving some of the protocol +messages other than Deliveries like basic.ack methods while a channel is in +confirm mode. + +The Notify* methods with Connection and Channel receivers model the pattern of +asynchronous events like closes due to exceptions, or messages that are sent out +of band from an RPC call like basic.ack or basic.flow. + +Any asynchronous events, including Deliveries and Publishings must always have +a receiver until the corresponding chans are closed. Without asynchronous +receivers, the sychronous methods will block. + +Use Case + +It's important as a client to an AMQP topology to ensure the state of the +broker matches your expectations. For both publish and consume use cases, +make sure you declare the queues, exchanges and bindings you expect to exist +prior to calling Channel.Publish or Channel.Consume. + + // Connections start with amqp.Dial() typically from a command line argument + // or environment variable. + connection, err := amqp.Dial(os.Getenv("AMQP_URL")) + + // To cleanly shutdown by flushing kernel buffers, make sure to close and + // wait for the response. + defer connection.Close() + + // Most operations happen on a channel. If any error is returned on a + // channel, the channel will no longer be valid, throw it away and try with + // a different channel. If you use many channels, it's useful for the + // server to + channel, err := connection.Channel() + + // Declare your topology here, if it doesn't exist, it will be created, if + // it existed already and is not what you expect, then that's considered an + // error. + + // Use your connection on this topology with either Publish or Consume, or + // inspect your queues with QueueInspect. It's unwise to mix Publish and + // Consume to let TCP do its job well. + +SSL/TLS - Secure connections + +When Dial encounters an amqps:// scheme, it will use the zero value of a +tls.Config. This will only perform server certificate and host verification. + +Use DialTLS when you wish to provide a client certificate (recommended), +include a private certificate authority's certificate in the cert chain for +server validity, or run insecure by not verifying the server certificate dial +your own connection. DialTLS will use the provided tls.Config when it +encounters an amqps:// scheme and will dial a plain connection when it +encounters an amqp:// scheme. + +SSL/TLS in RabbitMQ is documented here: http://www.rabbitmq.com/ssl.html + +*/ +package amqp diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/examples_test.go b/services/templeton/vendor/src/github.com/streadway/amqp/examples_test.go new file mode 100644 index 000000000..8be53f427 --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/examples_test.go @@ -0,0 +1,393 @@ +package amqp_test + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "log" + "net" + "runtime" + "time" + + "github.com/streadway/amqp" +) + +func ExampleConfig_timeout() { + // Provide your own anonymous Dial function that delgates to net.DialTimout + // for custom timeouts + + conn, err := amqp.DialConfig("amqp:///", amqp.Config{ + Dial: func(network, addr string) (net.Conn, error) { + return net.DialTimeout(network, addr, 2*time.Second) + }, + }) + + log.Printf("conn: %v, err: %v", conn, err) +} + +func ExampleDialTLS() { + // To get started with SSL/TLS follow the instructions for adding SSL/TLS + // support in RabbitMQ with a private certificate authority here: + // + // http://www.rabbitmq.com/ssl.html + // + // Then in your rabbitmq.config, disable the plain AMQP port, verify clients + // and fail if no certificate is presented with the following: + // + // [ + // {rabbit, [ + // {tcp_listeners, []}, % listens on 127.0.0.1:5672 + // {ssl_listeners, [5671]}, % listens on 0.0.0.0:5671 + // {ssl_options, [{cacertfile,"/path/to/your/testca/cacert.pem"}, + // {certfile,"/path/to/your/server/cert.pem"}, + // {keyfile,"/path/to/your/server/key.pem"}, + // {verify,verify_peer}, + // {fail_if_no_peer_cert,true}]} + // ]} + // ]. + + cfg := new(tls.Config) + + // The self-signing certificate authority's certificate must be included in + // the RootCAs to be trusted so that the server certificate can be verified. + // + // Alternatively to adding it to the tls.Config you can add the CA's cert to + // your system's root CAs. The tls package will use the system roots + // specific to each support OS. Under OS X, add (drag/drop) your cacert.pem + // file to the 'Certificates' section of KeyChain.app to add and always + // trust. + // + // Or with the command line add and trust the DER encoded certificate: + // + // security add-certificate testca/cacert.cer + // security add-trusted-cert testca/cacert.cer + // + // If you depend on the system root CAs, then use nil for the RootCAs field + // so the system roots will be loaded. + + cfg.RootCAs = x509.NewCertPool() + + if ca, err := ioutil.ReadFile("testca/cacert.pem"); err == nil { + cfg.RootCAs.AppendCertsFromPEM(ca) + } + + // Move the client cert and key to a location specific to your application + // and load them here. + + if cert, err := tls.LoadX509KeyPair("client/cert.pem", "client/key.pem"); err == nil { + cfg.Certificates = append(cfg.Certificates, cert) + } + + // Server names are validated by the crypto/tls package, so the server + // certificate must be made for the hostname in the URL. Find the commonName + // (CN) and make sure the hostname in the URL matches this common name. Per + // the RabbitMQ instructions for a self-signed cert, this defautls to the + // current hostname. + // + // openssl x509 -noout -in server/cert.pem -subject + // + // If your server name in your certificate is different than the host you are + // connecting to, set the hostname used for verification in + // ServerName field of the tls.Config struct. + + conn, err := amqp.DialTLS("amqps://server-name-from-certificate/", cfg) + + log.Printf("conn: %v, err: %v", conn, err) +} + +func ExampleChannel_Confirm_bridge() { + // This example acts as a bridge, shoveling all messages sent from the source + // exchange "log" to destination exchange "log". + + // Confirming publishes can help from overproduction and ensure every message + // is delivered. + + // Setup the source of the store and forward + source, err := amqp.Dial("amqp://source/") + if err != nil { + log.Fatalf("connection.open source: %s", err) + } + defer source.Close() + + chs, err := source.Channel() + if err != nil { + log.Fatalf("channel.open source: %s", err) + } + + if err := chs.ExchangeDeclare("log", "topic", true, false, false, false, nil); err != nil { + log.Fatalf("exchange.declare destination: %s", err) + } + + if _, err := chs.QueueDeclare("remote-tee", true, true, false, false, nil); err != nil { + log.Fatalf("queue.declare source: %s", err) + } + + if err := chs.QueueBind("remote-tee", "#", "logs", false, nil); err != nil { + log.Fatalf("queue.bind source: %s", err) + } + + shovel, err := chs.Consume("remote-tee", "shovel", false, false, false, false, nil) + if err != nil { + log.Fatalf("basic.consume source: %s", err) + } + + // Setup the destination of the store and forward + destination, err := amqp.Dial("amqp://destination/") + if err != nil { + log.Fatalf("connection.open destination: %s", err) + } + defer destination.Close() + + chd, err := destination.Channel() + if err != nil { + log.Fatalf("channel.open destination: %s", err) + } + + if err := chd.ExchangeDeclare("log", "topic", true, false, false, false, nil); err != nil { + log.Fatalf("exchange.declare destination: %s", err) + } + + // Buffer of 1 for our single outstanding publishing + confirms := chd.NotifyPublish(make(chan amqp.Confirmation, 1)) + + if err := chd.Confirm(false); err != nil { + log.Fatalf("confirm.select destination: %s", err) + } + + // Now pump the messages, one by one, a smarter implementation + // would batch the deliveries and use multiple ack/nacks + for { + msg, ok := <-shovel + if !ok { + log.Fatalf("source channel closed, see the reconnect example for handling this") + } + + err = chd.Publish("logs", msg.RoutingKey, false, false, amqp.Publishing{ + // Copy all the properties + ContentType: msg.ContentType, + ContentEncoding: msg.ContentEncoding, + DeliveryMode: msg.DeliveryMode, + Priority: msg.Priority, + CorrelationId: msg.CorrelationId, + ReplyTo: msg.ReplyTo, + Expiration: msg.Expiration, + MessageId: msg.MessageId, + Timestamp: msg.Timestamp, + Type: msg.Type, + UserId: msg.UserId, + AppId: msg.AppId, + + // Custom headers + Headers: msg.Headers, + + // And the body + Body: msg.Body, + }) + + if err != nil { + msg.Nack(false, false) + log.Fatalf("basic.publish destination: %s", msg) + } + + // only ack the source delivery when the destination acks the publishing + if confirmed := <-confirms; confirmed.Ack { + msg.Ack(false) + } else { + msg.Nack(false, false) + } + } +} + +func ExampleChannel_Consume() { + // Connects opens an AMQP connection from the credentials in the URL. + conn, err := amqp.Dial("amqp://guest:guest@localhost:5672/") + if err != nil { + log.Fatalf("connection.open: %s", err) + } + defer conn.Close() + + c, err := conn.Channel() + if err != nil { + log.Fatalf("channel.open: %s", err) + } + + // We declare our topology on both the publisher and consumer to ensure they + // are the same. This is part of AMQP being a programmable messaging model. + // + // See the Channel.Publish example for the complimentary declare. + err = c.ExchangeDeclare("logs", "topic", true, false, false, false, nil) + if err != nil { + log.Fatalf("exchange.declare: %s", err) + } + + // Establish our queue topologies that we are responsible for + type bind struct { + queue string + key string + } + + bindings := []bind{ + bind{"page", "alert"}, + bind{"email", "info"}, + bind{"firehose", "#"}, + } + + for _, b := range bindings { + _, err = c.QueueDeclare(b.queue, true, false, false, false, nil) + if err != nil { + log.Fatalf("queue.declare: %v", err) + } + + err = c.QueueBind(b.queue, b.key, "logs", false, nil) + if err != nil { + log.Fatalf("queue.bind: %v", err) + } + } + + // Set our quality of service. Since we're sharing 3 consumers on the same + // channel, we want at least 3 messages in flight. + err = c.Qos(3, 0, false) + if err != nil { + log.Fatalf("basic.qos: %v", err) + } + + // Establish our consumers that have different responsibilities. Our first + // two queues do not ack the messages on the server, so require to be acked + // on the client. + + pages, err := c.Consume("page", "pager", false, false, false, false, nil) + if err != nil { + log.Fatalf("basic.consume: %v", err) + } + + go func() { + for log := range pages { + // ... this consumer is responsible for sending pages per log + log.Ack(false) + } + }() + + // Notice how the concern for which messages arrive here are in the AMQP + // topology and not in the queue. We let the server pick a consumer tag this + // time. + + emails, err := c.Consume("email", "", false, false, false, false, nil) + if err != nil { + log.Fatalf("basic.consume: %v", err) + } + + go func() { + for log := range emails { + // ... this consumer is responsible for sending emails per log + log.Ack(false) + } + }() + + // This consumer requests that every message is acknowledged as soon as it's + // delivered. + + firehose, err := c.Consume("firehose", "", true, false, false, false, nil) + if err != nil { + log.Fatalf("basic.consume: %v", err) + } + + // To show how to process the items in parallel, we'll use a work pool. + for i := 0; i < runtime.NumCPU(); i++ { + go func(work <-chan amqp.Delivery) { + for _ = range work { + // ... this consumer pulls from the firehose and doesn't need to acknowledge + } + }(firehose) + } + + // Wait until you're ready to finish, could be a signal handler here. + time.Sleep(10 * time.Second) + + // Cancelling a consumer by name will finish the range and gracefully end the + // goroutine + err = c.Cancel("pager", false) + if err != nil { + log.Fatalf("basic.cancel: %v", err) + } + + // deferred closing the Connection will also finish the consumer's ranges of + // their delivery chans. If you need every delivery to be processed, make + // sure to wait for all consumers goroutines to finish before exiting your + // process. +} + +func ExampleChannel_Publish() { + // Connects opens an AMQP connection from the credentials in the URL. + conn, err := amqp.Dial("amqp://guest:guest@localhost:5672/") + if err != nil { + log.Fatalf("connection.open: %s", err) + } + + // This waits for a server acknowledgment which means the sockets will have + // flushed all outbound publishings prior to returning. It's important to + // block on Close to not lose any publishings. + defer conn.Close() + + c, err := conn.Channel() + if err != nil { + log.Fatalf("channel.open: %s", err) + } + + // We declare our topology on both the publisher and consumer to ensure they + // are the same. This is part of AMQP being a programmable messaging model. + // + // See the Channel.Consume example for the complimentary declare. + err = c.ExchangeDeclare("logs", "topic", true, false, false, false, nil) + if err != nil { + log.Fatalf("exchange.declare: %v", err) + } + + // Prepare this message to be persistent. Your publishing requirements may + // be different. + msg := amqp.Publishing{ + DeliveryMode: amqp.Persistent, + Timestamp: time.Now(), + ContentType: "text/plain", + Body: []byte("Go Go AMQP!"), + } + + // This is not a mandatory delivery, so it will be dropped if there are no + // queues bound to the logs exchange. + err = c.Publish("logs", "info", false, false, msg) + if err != nil { + // Since publish is asynchronous this can happen if the network connection + // is reset or if the server has run out of resources. + log.Fatalf("basic.publish: %v", err) + } +} + +func publishAllTheThings(conn *amqp.Connection) { + // ... snarf snarf, barf barf +} + +func ExampleConnection_NotifyBlocked() { + // Simply logs when the server throttles the TCP connection for publishers + + // Test this by tuning your server to have a low memory watermark: + // rabbitmqctl set_vm_memory_high_watermark 0.00000001 + + conn, err := amqp.Dial("amqp://guest:guest@localhost:5672/") + if err != nil { + log.Fatalf("connection.open: %s", err) + } + defer conn.Close() + + blockings := conn.NotifyBlocked(make(chan amqp.Blocking)) + go func() { + for b := range blockings { + if b.Active { + log.Printf("TCP blocked: %q", b.Reason) + } else { + log.Printf("TCP unblocked") + } + } + }() + + // Your application domain channel setup publishings + publishAllTheThings(conn) +} diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/fuzz.go b/services/templeton/vendor/src/github.com/streadway/amqp/fuzz.go new file mode 100644 index 000000000..bf7c7689b --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/fuzz.go @@ -0,0 +1,16 @@ +// +build gofuzz +package amqp + +import "bytes" + +func Fuzz(data []byte) int { + r := reader{bytes.NewReader(data)} + frame, err := r.ReadFrame() + if err != nil { + if frame != nil { + panic("frame is not nil") + } + return 0 + } + return 1 +} diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/gen.sh b/services/templeton/vendor/src/github.com/streadway/amqp/gen.sh new file mode 100644 index 000000000..d46e19bd8 --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/gen.sh @@ -0,0 +1,2 @@ +#!/bin/sh +go run spec/gen.go < spec/amqp0-9-1.stripped.extended.xml | gofmt > spec091.go diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/integration_test.go b/services/templeton/vendor/src/github.com/streadway/amqp/integration_test.go new file mode 100644 index 000000000..ec839f221 --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/integration_test.go @@ -0,0 +1,1796 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +// +build integration + +package amqp + +import ( + "bytes" + devrand "crypto/rand" + "encoding/binary" + "fmt" + "hash/crc32" + "io" + "math/rand" + "net" + "os" + "reflect" + "strconv" + "sync" + "testing" + "testing/quick" + "time" +) + +func TestIntegrationOpenClose(t *testing.T) { + if c := integrationConnection(t, "open-close"); c != nil { + t.Logf("have connection, calling connection close") + if err := c.Close(); err != nil { + t.Fatalf("connection close: %s", err) + } + t.Logf("connection close OK") + } +} + +func TestIntegrationOpenCloseChannel(t *testing.T) { + if c := integrationConnection(t, "channel"); c != nil { + defer c.Close() + + if _, err := c.Channel(); err != nil { + t.Errorf("Channel could not be opened: %s", err) + } + } +} + +func TestIntegrationOpenConfig(t *testing.T) { + config := Config{} + + c, err := DialConfig(integrationURLFromEnv(), config) + if err != nil { + t.Fatalf("expected to dial with config %+v integration server: %s", config, err) + } + + if _, err := c.Channel(); err != nil { + t.Fatalf("expected to open channel: %s", err) + } + + if err := c.Close(); err != nil { + t.Fatalf("connection close: %s", err) + } +} + +func TestIntegrationOpenConfigWithNetDial(t *testing.T) { + config := Config{Dial: net.Dial} + + c, err := DialConfig(integrationURLFromEnv(), config) + if err != nil { + t.Errorf("expected to dial with config %+v integration server: %s", config, err) + } + + if _, err := c.Channel(); err != nil { + t.Fatalf("expected to open channel: %s", err) + } + + if err := c.Close(); err != nil { + t.Fatalf("connection close: %s", err) + } +} + +func TestIntegrationLocalAddr(t *testing.T) { + config := Config{} + + c, err := DialConfig(integrationURLFromEnv(), config) + defer c.Close() + if err != nil { + t.Errorf("expected to dial with config %+v integration server: %s", config, err) + } + + a := c.LocalAddr() + _, portString, err := net.SplitHostPort(a.String()) + if err != nil { + t.Errorf("expected to get a local network address with config %+v integration server: %s", config, a.String()) + } + + port, err := strconv.Atoi(portString) + if err != nil { + t.Errorf("expected to get a TCP port number with config %+v integration server: %s", config, err) + } + t.Logf("Connected to port %d\n", port) +} + +// https://github.com/streadway/amqp/issues/94 +func TestExchangePassiveOnMissingExchangeShouldError(t *testing.T) { + c := integrationConnection(t, "exch") + if c != nil { + defer c.Close() + + ch, err := c.Channel() + if err != nil { + t.Fatalf("create channel 1: %s", err) + } + defer ch.Close() + + if err := ch.ExchangeDeclarePassive( + "test-integration-missing-passive-exchange", + "direct", // type + false, // duration (note: is durable) + true, // auto-delete + false, // internal + false, // nowait + nil, // args + ); err == nil { + t.Fatal("ExchangeDeclarePassive of a missing exchange should return error") + } + } +} + +// https://github.com/streadway/amqp/issues/94 +func TestIntegrationExchangeDeclarePassiveOnDeclaredShouldNotError(t *testing.T) { + c := integrationConnection(t, "exch") + if c != nil { + defer c.Close() + + exchange := "test-integration-decalred-passive-exchange" + + ch, err := c.Channel() + if err != nil { + t.Fatalf("create channel: %s", err) + } + defer ch.Close() + + if err := ch.ExchangeDeclare( + exchange, // name + "direct", // type + false, // durable + true, // auto-delete + false, // internal + false, // nowait + nil, // args + ); err != nil { + t.Fatalf("declare exchange: %s", err) + } + + if err := ch.ExchangeDeclarePassive( + exchange, // name + "direct", // type + false, // durable + true, // auto-delete + false, // internal + false, // nowait + nil, // args + ); err != nil { + t.Fatalf("ExchangeDeclarePassive on a declared exchange should not error, got: %q", err) + } + } +} + +func TestIntegrationExchange(t *testing.T) { + c := integrationConnection(t, "exch") + if c != nil { + defer c.Close() + + channel, err := c.Channel() + if err != nil { + t.Fatalf("create channel: %s", err) + } + t.Logf("create channel OK") + + exchange := "test-integration-exchange" + + if err := channel.ExchangeDeclare( + exchange, // name + "direct", // type + false, // duration + true, // auto-delete + false, // internal + false, // nowait + nil, // args + ); err != nil { + t.Fatalf("declare exchange: %s", err) + } + t.Logf("declare exchange OK") + + if err := channel.ExchangeDelete(exchange, false, false); err != nil { + t.Fatalf("delete exchange: %s", err) + } + t.Logf("delete exchange OK") + + if err := channel.Close(); err != nil { + t.Fatalf("close channel: %s", err) + } + t.Logf("close channel OK") + } +} + +// https://github.com/streadway/amqp/issues/94 +func TestIntegrationQueueDeclarePassiveOnMissingExchangeShouldError(t *testing.T) { + c := integrationConnection(t, "queue") + if c != nil { + defer c.Close() + + ch, err := c.Channel() + if err != nil { + t.Fatalf("create channel1: %s", err) + } + defer ch.Close() + + if _, err := ch.QueueDeclarePassive( + "test-integration-missing-passive-queue", // name + false, // duration (note: not durable) + true, // auto-delete + false, // exclusive + false, // noWait + nil, // arguments + ); err == nil { + t.Fatal("QueueDeclarePassive of a missing queue should error") + } + } +} + +// https://github.com/streadway/amqp/issues/94 +func TestIntegrationPassiveQueue(t *testing.T) { + c := integrationConnection(t, "queue") + if c != nil { + defer c.Close() + + name := "test-integration-declared-passive-queue" + + ch, err := c.Channel() + if err != nil { + t.Fatalf("create channel1: %s", err) + } + defer ch.Close() + + if _, err := ch.QueueDeclare( + name, // name + false, // durable + true, // auto-delete + false, // exclusive + false, // noWait + nil, // arguments + ); err != nil { + t.Fatalf("queue declare: %s", err) + } + + if _, err := ch.QueueDeclarePassive( + name, // name + false, // durable + true, // auto-delete + false, // exclusive + false, // noWait + nil, // arguments + ); err != nil { + t.Fatalf("QueueDeclarePassive on declared queue should not error, got: %q", err) + } + + if _, err := ch.QueueDeclarePassive( + name, // name + true, // durable (note: differs) + true, // auto-delete + false, // exclusive + false, // noWait + nil, // arguments + ); err != nil { + t.Fatalf("QueueDeclarePassive on declared queue with different flags should error") + } + } +} + +func TestIntegrationBasicQueueOperations(t *testing.T) { + c := integrationConnection(t, "queue") + if c != nil { + defer c.Close() + + channel, err := c.Channel() + if err != nil { + t.Fatalf("create channel: %s", err) + } + t.Logf("create channel OK") + + exchangeName := "test-basic-ops-exchange" + queueName := "test-basic-ops-queue" + + deleteQueueFirstOptions := []bool{true, false} + for _, deleteQueueFirst := range deleteQueueFirstOptions { + + if err := channel.ExchangeDeclare( + exchangeName, // name + "direct", // type + true, // duration (note: is durable) + false, // auto-delete + false, // internal + false, // nowait + nil, // args + ); err != nil { + t.Fatalf("declare exchange: %s", err) + } + t.Logf("declare exchange OK") + + if _, err := channel.QueueDeclare( + queueName, // name + true, // duration (note: durable) + false, // auto-delete + false, // exclusive + false, // noWait + nil, // arguments + ); err != nil { + t.Fatalf("queue declare: %s", err) + } + t.Logf("declare queue OK") + + if err := channel.QueueBind( + queueName, // name + "", // routingKey + exchangeName, // sourceExchange + false, // noWait + nil, // arguments + ); err != nil { + t.Fatalf("queue bind: %s", err) + } + t.Logf("queue bind OK") + + if deleteQueueFirst { + if _, err := channel.QueueDelete( + queueName, // name + false, // ifUnused (false=be aggressive) + false, // ifEmpty (false=be aggressive) + false, // noWait + ); err != nil { + t.Fatalf("delete queue (first): %s", err) + } + t.Logf("delete queue (first) OK") + + if err := channel.ExchangeDelete(exchangeName, false, false); err != nil { + t.Fatalf("delete exchange (after delete queue): %s", err) + } + t.Logf("delete exchange (after delete queue) OK") + + } else { // deleteExchangeFirst + if err := channel.ExchangeDelete(exchangeName, false, false); err != nil { + t.Fatalf("delete exchange (first): %s", err) + } + t.Logf("delete exchange (first) OK") + + if _, err := channel.QueueInspect(queueName); err != nil { + t.Fatalf("inspect queue state after deleting exchange: %s", err) + } + t.Logf("queue properly remains after exchange is deleted") + + if _, err := channel.QueueDelete( + queueName, + false, // ifUnused + false, // ifEmpty + false, // noWait + ); err != nil { + t.Fatalf("delete queue (after delete exchange): %s", err) + } + t.Logf("delete queue (after delete exchange) OK") + } + } + + if err := channel.Close(); err != nil { + t.Fatalf("close channel: %s", err) + } + t.Logf("close channel OK") + } +} + +func TestIntegrationConnectionNegotiatesMaxChannels(t *testing.T) { + config := Config{ChannelMax: 0} + + c, err := DialConfig(integrationURLFromEnv(), config) + if err != nil { + t.Errorf("expected to dial with config %+v integration server: %s", config, err) + } + defer c.Close() + + if want, got := defaultChannelMax, c.Config.ChannelMax; want != got { + t.Fatalf("expected connection to negotiate uint16 (%d) channels, got: %d", want, got) + } +} + +func TestIntegrationConnectionNegotiatesClientMaxChannels(t *testing.T) { + config := Config{ChannelMax: 16} + + c, err := DialConfig(integrationURLFromEnv(), config) + if err != nil { + t.Errorf("expected to dial with config %+v integration server: %s", config, err) + } + defer c.Close() + + if want, got := config.ChannelMax, c.Config.ChannelMax; want != got { + t.Fatalf("expected client specified channel limit after handshake %d, got: %d", want, got) + } +} + +func TestIntegrationChannelIDsExhausted(t *testing.T) { + config := Config{ChannelMax: 16} + + c, err := DialConfig(integrationURLFromEnv(), config) + if err != nil { + t.Errorf("expected to dial with config %+v integration server: %s", config, err) + } + defer c.Close() + + for i := 1; i <= c.Config.ChannelMax; i++ { + if _, err := c.Channel(); err != nil { + t.Fatalf("expected allocating all channel ids to succed, failed on %d with %v", i, err) + } + } + + if _, err := c.Channel(); err != ErrChannelMax { + t.Fatalf("expected allocating all channels to produce the client side error %#v, got: %#v", ErrChannelMax, err) + } +} + +func TestIntegrationChannelClosing(t *testing.T) { + c := integrationConnection(t, "closings") + if c != nil { + defer c.Close() + + // This function is run on every channel after it is successfully + // opened. It can do something to verify something. It should be + // quick; many channels may be opened! + f := func(t *testing.T, c *Channel) { + return + } + + // open and close + channel, err := c.Channel() + if err != nil { + t.Fatalf("basic create channel: %s", err) + } + t.Logf("basic create channel OK") + + if err := channel.Close(); err != nil { + t.Fatalf("basic close channel: %s", err) + } + t.Logf("basic close channel OK") + + // deferred close + signal := make(chan bool) + go func() { + channel, err := c.Channel() + if err != nil { + t.Fatalf("second create channel: %s", err) + } + t.Logf("second create channel OK") + + <-signal // a bit of synchronization + f(t, channel) + + defer func() { + if err := channel.Close(); err != nil { + t.Fatalf("deferred close channel: %s", err) + } + t.Logf("deferred close channel OK") + signal <- true + }() + }() + signal <- true + select { + case <-signal: + t.Logf("(got close signal OK)") + break + case <-time.After(250 * time.Millisecond): + t.Fatalf("deferred close: timeout") + } + + // multiple channels + for _, n := range []int{2, 4, 8, 16, 32, 64, 128, 256} { + channels := make([]*Channel, n) + for i := 0; i < n; i++ { + var err error + if channels[i], err = c.Channel(); err != nil { + t.Fatalf("create channel %d/%d: %s", i+1, n, err) + } + } + f(t, channel) + for i, channel := range channels { + if err := channel.Close(); err != nil { + t.Fatalf("close channel %d/%d: %s", i+1, n, err) + } + } + t.Logf("created/closed %d channels OK", n) + } + + } +} + +func TestIntegrationMeaningfulChannelErrors(t *testing.T) { + c := integrationConnection(t, "pub") + if c != nil { + defer c.Close() + + ch, err := c.Channel() + if err != nil { + t.Fatalf("Could not create channel") + } + + queue := "test.integration.channel.error" + + _, err = ch.QueueDeclare(queue, false, true, false, false, nil) + if err != nil { + t.Fatalf("Could not declare") + } + + _, err = ch.QueueDeclare(queue, true, false, false, false, nil) + if err == nil { + t.Fatalf("Expected error, got nil") + } + + e, ok := err.(*Error) + if !ok { + t.Fatalf("Expected type Error response, got %T", err) + } + + if e.Code != PreconditionFailed { + t.Fatalf("Expected PreconditionFailed, got: %+v", e) + } + + _, err = ch.QueueDeclare(queue, false, true, false, false, nil) + if err != ErrClosed { + t.Fatalf("Expected channel to be closed, got: %T", err) + } + } +} + +// https://github.com/streadway/amqp/issues/6 +func TestIntegrationNonBlockingClose(t *testing.T) { + c := integrationConnection(t, "#6") + if c != nil { + defer c.Close() + + ch, err := c.Channel() + if err != nil { + t.Fatalf("Could not create channel") + } + + queue := "test.integration.blocking.close" + + _, err = ch.QueueDeclare(queue, false, true, false, false, nil) + if err != nil { + t.Fatalf("Could not declare") + } + + msgs, err := ch.Consume(queue, "", false, false, false, false, nil) + if err != nil { + t.Fatalf("Could not consume") + } + + // Simulate a consumer + go func() { + for _ = range msgs { + t.Logf("Oh my, received message on an empty queue") + } + }() + + succeed := make(chan bool) + + go func() { + if err = ch.Close(); err != nil { + t.Fatalf("Close produced an error when it shouldn't") + } + succeed <- true + }() + + select { + case <-succeed: + break + case <-time.After(1 * time.Second): + t.Fatalf("Close timed out after 1s") + } + } +} + +func TestIntegrationPublishConsume(t *testing.T) { + queue := "test.integration.publish.consume" + + c1 := integrationConnection(t, "pub") + c2 := integrationConnection(t, "sub") + + if c1 != nil && c2 != nil { + defer c1.Close() + defer c2.Close() + + pub, _ := c1.Channel() + sub, _ := c2.Channel() + + pub.QueueDeclare(queue, false, true, false, false, nil) + sub.QueueDeclare(queue, false, true, false, false, nil) + defer pub.QueueDelete(queue, false, false, false) + + messages, _ := sub.Consume(queue, "", false, false, false, false, nil) + + pub.Publish("", queue, false, false, Publishing{Body: []byte("pub 1")}) + pub.Publish("", queue, false, false, Publishing{Body: []byte("pub 2")}) + pub.Publish("", queue, false, false, Publishing{Body: []byte("pub 3")}) + + assertConsumeBody(t, messages, []byte("pub 1")) + assertConsumeBody(t, messages, []byte("pub 2")) + assertConsumeBody(t, messages, []byte("pub 3")) + } +} + +func TestIntegrationConsumeFlow(t *testing.T) { + queue := "test.integration.consumer-flow" + + c1 := integrationConnection(t, "pub-flow") + c2 := integrationConnection(t, "sub-flow") + + if c1 != nil && c2 != nil { + defer c1.Close() + defer c2.Close() + + pub, _ := c1.Channel() + sub, _ := c2.Channel() + + pub.QueueDeclare(queue, false, true, false, false, nil) + sub.QueueDeclare(queue, false, true, false, false, nil) + defer pub.QueueDelete(queue, false, false, false) + + sub.Qos(1, 0, false) + + messages, _ := sub.Consume(queue, "", false, false, false, false, nil) + + pub.Publish("", queue, false, false, Publishing{Body: []byte("pub 1")}) + pub.Publish("", queue, false, false, Publishing{Body: []byte("pub 2")}) + + msg := assertConsumeBody(t, messages, []byte("pub 1")) + + if err := sub.Flow(false); err.(*Error).Code == NotImplemented { + t.Log("flow control is not supported on this version of rabbitmq") + return + } + + msg.Ack(false) + + select { + case <-messages: + t.Fatalf("message was delivered when flow was not active") + default: + } + + sub.Flow(true) + + msg = assertConsumeBody(t, messages, []byte("pub 2")) + msg.Ack(false) + } +} + +func TestIntegrationRecoverNotImplemented(t *testing.T) { + queue := "test.recover" + + if c, ch := integrationQueue(t, queue); c != nil { + if product, ok := c.Properties["product"]; ok && product.(string) == "RabbitMQ" { + defer c.Close() + + err := ch.Recover(false) + + if ex, ok := err.(*Error); !ok || ex.Code != 540 { + t.Fatalf("Expected NOT IMPLEMENTED got: %v", ex) + } + } + } +} + +// This test is driven by a private API to simulate the server sending a channelFlow message +func TestIntegrationPublishFlow(t *testing.T) { + // TODO - no idea how to test without affecting the server or mucking internal APIs + // i'd like to make sure the RW lock can be held by multiple publisher threads + // and that multiple channelFlow messages do not block the dispatch thread +} + +func TestIntegrationConsumeCancel(t *testing.T) { + queue := "test.integration.consume-cancel" + + c := integrationConnection(t, "pub") + + if c != nil { + defer c.Close() + + ch, _ := c.Channel() + + ch.QueueDeclare(queue, false, true, false, false, nil) + defer ch.QueueDelete(queue, false, false, false) + + messages, _ := ch.Consume(queue, "integration-tag", false, false, false, false, nil) + + ch.Publish("", queue, false, false, Publishing{Body: []byte("1")}) + + assertConsumeBody(t, messages, []byte("1")) + + err := ch.Cancel("integration-tag", false) + if err != nil { + t.Fatalf("error cancelling the consumer: %v", err) + } + + ch.Publish("", queue, false, false, Publishing{Body: []byte("2")}) + + select { + case <-time.After(100 * time.Millisecond): + t.Fatalf("Timeout on Close") + case _, ok := <-messages: + if ok { + t.Fatalf("Extra message on consumer when consumer should have been closed") + } + } + } +} + +func (c *Connection) Generate(r *rand.Rand, _ int) reflect.Value { + urlStr := os.Getenv("AMQP_URL") + if urlStr == "" { + return reflect.ValueOf(nil) + } + + conn, err := Dial(urlStr) + if err != nil { + return reflect.ValueOf(nil) + } + + return reflect.ValueOf(conn) +} + +func (c Publishing) Generate(r *rand.Rand, _ int) reflect.Value { + var ok bool + var t reflect.Value + + p := Publishing{} + //p.DeliveryMode = uint8(r.Intn(3)) + //p.Priority = uint8(r.Intn(8)) + + if r.Intn(2) > 0 { + p.ContentType = "application/octet-stream" + } + + if r.Intn(2) > 0 { + p.ContentEncoding = "gzip" + } + + if r.Intn(2) > 0 { + p.CorrelationId = fmt.Sprintf("%d", r.Int()) + } + + if r.Intn(2) > 0 { + p.ReplyTo = fmt.Sprintf("%d", r.Int()) + } + + if r.Intn(2) > 0 { + p.MessageId = fmt.Sprintf("%d", r.Int()) + } + + if r.Intn(2) > 0 { + p.Type = fmt.Sprintf("%d", r.Int()) + } + + if r.Intn(2) > 0 { + p.AppId = fmt.Sprintf("%d", r.Int()) + } + + if r.Intn(2) > 0 { + p.Timestamp = time.Unix(r.Int63(), r.Int63()) + } + + if t, ok = quick.Value(reflect.TypeOf(p.Body), r); ok { + p.Body = t.Bytes() + } + + return reflect.ValueOf(p) +} + +func TestQuickPublishOnly(t *testing.T) { + if c := integrationConnection(t, "quick"); c != nil { + defer c.Close() + pub, err := c.Channel() + queue := "test-publish" + + if _, err = pub.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Errorf("Failed to declare: %s", err) + return + } + + defer pub.QueueDelete(queue, false, false, false) + + quick.Check(func(msg Publishing) bool { + return pub.Publish("", queue, false, false, msg) == nil + }, nil) + } +} + +func TestPublishEmptyBody(t *testing.T) { + c := integrationConnection(t, "empty") + if c != nil { + defer c.Close() + + ch, err := c.Channel() + if err != nil { + t.Errorf("Failed to create channel") + return + } + + queue := "test-TestPublishEmptyBody" + + if _, err := ch.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Fatalf("Could not declare") + } + + messages, err := ch.Consume(queue, "", false, false, false, false, nil) + if err != nil { + t.Fatalf("Could not consume") + } + + err = ch.Publish("", queue, false, false, Publishing{}) + if err != nil { + t.Fatalf("Could not publish") + } + + select { + case msg := <-messages: + if len(msg.Body) != 0 { + t.Errorf("Received non empty body") + } + case <-time.After(200 * time.Millisecond): + t.Errorf("Timeout on receive") + } + } +} + +func TestPublishEmptyBodyWithHeadersIssue67(t *testing.T) { + c := integrationConnection(t, "issue67") + if c != nil { + defer c.Close() + + ch, err := c.Channel() + if err != nil { + t.Errorf("Failed to create channel") + return + } + + queue := "test-TestPublishEmptyBodyWithHeaders" + + if _, err := ch.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Fatalf("Could not declare") + } + + messages, err := ch.Consume(queue, "", false, false, false, false, nil) + if err != nil { + t.Fatalf("Could not consume") + } + + headers := Table{ + "ham": "spam", + } + + err = ch.Publish("", queue, false, false, Publishing{Headers: headers}) + if err != nil { + t.Fatalf("Could not publish") + } + + select { + case msg := <-messages: + if msg.Headers["ham"] == nil { + t.Fatalf("Headers aren't sent") + } + if msg.Headers["ham"] != "spam" { + t.Fatalf("Headers are wrong") + } + case <-time.After(200 * time.Millisecond): + t.Errorf("Timeout on receive") + } + } +} + +func TestQuickPublishConsumeOnly(t *testing.T) { + c1 := integrationConnection(t, "quick-pub") + c2 := integrationConnection(t, "quick-sub") + + if c1 != nil && c2 != nil { + defer c1.Close() + defer c2.Close() + + pub, err := c1.Channel() + sub, err := c2.Channel() + + queue := "TestPublishConsumeOnly" + + if _, err = pub.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Errorf("Failed to declare: %s", err) + return + } + + if _, err = sub.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Errorf("Failed to declare: %s", err) + return + } + + defer sub.QueueDelete(queue, false, false, false) + + ch, err := sub.Consume(queue, "", false, false, false, false, nil) + if err != nil { + t.Errorf("Could not sub: %s", err) + } + + quick.CheckEqual( + func(msg Publishing) []byte { + empty := Publishing{Body: msg.Body} + if pub.Publish("", queue, false, false, empty) != nil { + return []byte{'X'} + } + return msg.Body + }, + func(msg Publishing) []byte { + out := <-ch + out.Ack(false) + return out.Body + }, + nil) + } +} + +func TestQuickPublishConsumeBigBody(t *testing.T) { + c1 := integrationConnection(t, "big-pub") + c2 := integrationConnection(t, "big-sub") + + if c1 != nil && c2 != nil { + defer c1.Close() + defer c2.Close() + + pub, err := c1.Channel() + sub, err := c2.Channel() + + queue := "test-pubsub" + + if _, err = sub.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Errorf("Failed to declare: %s", err) + return + } + + ch, err := sub.Consume(queue, "", false, false, false, false, nil) + if err != nil { + t.Errorf("Could not sub: %s", err) + } + + fixture := Publishing{ + Body: make([]byte, 1e4+1000), + } + + if _, err = pub.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Errorf("Failed to declare: %s", err) + return + } + + err = pub.Publish("", queue, false, false, fixture) + if err != nil { + t.Errorf("Could not publish big body") + } + + select { + case msg := <-ch: + if bytes.Compare(msg.Body, fixture.Body) != 0 { + t.Errorf("Consumed big body didn't match") + } + case <-time.After(200 * time.Millisecond): + t.Errorf("Timeout on receive") + } + } +} + +func TestIntegrationGetOk(t *testing.T) { + if c := integrationConnection(t, "getok"); c != nil { + defer c.Close() + + queue := "test.get-ok" + ch, _ := c.Channel() + + ch.QueueDeclare(queue, false, true, false, false, nil) + ch.Publish("", queue, false, false, Publishing{Body: []byte("ok")}) + + msg, ok, err := ch.Get(queue, false) + + if err != nil { + t.Fatalf("Failed get: %v", err) + } + + if !ok { + t.Fatalf("Get on a queued message did not find the message") + } + + if string(msg.Body) != "ok" { + t.Fatalf("Get did not get the correct message") + } + } +} + +func TestIntegrationGetEmpty(t *testing.T) { + if c := integrationConnection(t, "getok"); c != nil { + defer c.Close() + + queue := "test.get-ok" + ch, _ := c.Channel() + + ch.QueueDeclare(queue, false, true, false, false, nil) + + _, ok, err := ch.Get(queue, false) + + if err != nil { + t.Fatalf("Failed get: %v", err) + } + + if !ok { + t.Fatalf("Get on a queued message retrieved a message when it shouldn't have") + } + } +} + +func TestIntegrationTxCommit(t *testing.T) { + if c := integrationConnection(t, "txcommit"); c != nil { + defer c.Close() + + queue := "test.tx.commit" + ch, _ := c.Channel() + + ch.QueueDeclare(queue, false, true, false, false, nil) + + if err := ch.Tx(); err != nil { + t.Fatalf("tx.select failed") + } + + ch.Publish("", queue, false, false, Publishing{Body: []byte("ok")}) + + if err := ch.TxCommit(); err != nil { + t.Fatalf("tx.commit failed") + } + + msg, ok, err := ch.Get(queue, false) + + if err != nil || !ok { + t.Fatalf("Failed get: %v", err) + } + + if string(msg.Body) != "ok" { + t.Fatalf("Get did not get the correct message from the transaction") + } + } +} + +func TestIntegrationTxRollback(t *testing.T) { + if c := integrationConnection(t, "txrollback"); c != nil { + defer c.Close() + + queue := "test.tx.rollback" + ch, _ := c.Channel() + + ch.QueueDeclare(queue, false, true, false, false, nil) + + if err := ch.Tx(); err != nil { + t.Fatalf("tx.select failed") + } + + ch.Publish("", queue, false, false, Publishing{Body: []byte("ok")}) + + if err := ch.TxRollback(); err != nil { + t.Fatalf("tx.rollback failed") + } + + _, ok, err := ch.Get(queue, false) + + if err != nil { + t.Fatalf("Failed get: %v", err) + } + + if ok { + t.Fatalf("message was published when it should have been rolled back") + } + } +} + +func TestIntegrationReturn(t *testing.T) { + if c, ch := integrationQueue(t, "return"); c != nil { + defer c.Close() + + ret := make(chan Return, 1) + + ch.NotifyReturn(ret) + + // mandatory publish to an exchange without a binding should be returned + ch.Publish("", "return-without-binding", true, false, Publishing{Body: []byte("mandatory")}) + + select { + case res := <-ret: + if string(res.Body) != "mandatory" { + t.Fatalf("expected return of the same message") + } + + if res.ReplyCode != NoRoute { + t.Fatalf("expected no consumers reply code on the Return result, got: %v", res.ReplyCode) + } + + case <-time.After(200 * time.Millisecond): + t.Fatalf("no return was received within 200ms") + } + } +} + +func TestIntegrationCancel(t *testing.T) { + queue := "cancel" + consumerTag := "test.cancel" + + if c, ch := integrationQueue(t, queue); c != nil { + defer c.Close() + + cancels := ch.NotifyCancel(make(chan string, 1)) + + go func() { + if _, err := ch.Consume(queue, consumerTag, false, false, false, false, nil); err != nil { + t.Fatalf("cannot consume from %q to test NotifyCancel: %v", queue, err) + } + if _, err := ch.QueueDelete(queue, false, false, false); err != nil { + t.Fatalf("cannot delete integration queue: %v", err) + } + }() + + select { + case tag := <-cancels: + if want, got := consumerTag, tag; want != got { + t.Fatalf("expected to be notified of deleted queue with consumer tag, got: %q", got) + } + case <-time.After(200 * time.Millisecond): + t.Fatalf("expected to be notified of deleted queue with 200ms") + } + } +} + +func TestIntegrationConfirm(t *testing.T) { + if c, ch := integrationQueue(t, "confirm"); c != nil { + defer c.Close() + + confirms := ch.NotifyPublish(make(chan Confirmation, 1)) + + if err := ch.Confirm(false); err != nil { + t.Fatalf("could not confirm") + } + + ch.Publish("", "confirm", false, false, Publishing{Body: []byte("confirm")}) + + select { + case confirmed := <-confirms: + if confirmed.DeliveryTag != 1 { + t.Fatalf("expected ack starting with delivery tag of 1") + } + case <-time.After(200 * time.Millisecond): + t.Fatalf("no ack was received within 200ms") + } + } +} + +// https://github.com/streadway/amqp/issues/61 +func TestRoundTripAllFieldValueTypes61(t *testing.T) { + if conn := integrationConnection(t, "issue61"); conn != nil { + defer conn.Close() + timestamp := time.Unix(100000000, 0) + + headers := Table{ + "A": []interface{}{ + []interface{}{"nested array", int32(3)}, + Decimal{2, 1}, + Table{"S": "nested table in array"}, + int32(2 << 20), + string("array string"), + timestamp, + nil, + byte(2), + float64(2.64), + float32(2.32), + int64(2 << 60), + int16(2 << 10), + bool(true), + []byte{'b', '2'}, + }, + "D": Decimal{1, 1}, + "F": Table{"S": "nested table in table"}, + "I": int32(1 << 20), + "S": string("string"), + "T": timestamp, + "V": nil, + "b": byte(1), + "d": float64(1.64), + "f": float32(1.32), + "l": int64(1 << 60), + "s": int16(1 << 10), + "t": bool(true), + "x": []byte{'b', '1'}, + } + + queue := "test.issue61-roundtrip" + ch, _ := conn.Channel() + + if _, err := ch.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Fatalf("Could not declare") + } + + msgs, err := ch.Consume(queue, "", false, false, false, false, nil) + if err != nil { + t.Fatalf("Could not consume") + } + + err = ch.Publish("", queue, false, false, Publishing{Body: []byte("ignored"), Headers: headers}) + if err != nil { + t.Fatalf("Could not publish: %v", err) + } + + msg, ok := <-msgs + + if !ok { + t.Fatalf("Channel closed prematurely likely due to publish exception") + } + + for k, v := range headers { + if !reflect.DeepEqual(v, msg.Headers[k]) { + t.Errorf("Round trip header not the same for key %q: expected: %#v, got %#v", k, v, msg.Headers[k]) + } + } + } +} + +// Declares a queue with the x-message-ttl extension to exercise integer +// serialization. +// +// Relates to https://github.com/streadway/amqp/issues/60 +// +func TestDeclareArgsXMessageTTL(t *testing.T) { + if conn := integrationConnection(t, "declareTTL"); conn != nil { + defer conn.Close() + + ch, _ := conn.Channel() + args := Table{"x-message-ttl": int32(9000000)} + + // should not drop the connection + if _, err := ch.QueueDeclare("declareWithTTL", false, true, false, false, args); err != nil { + t.Fatalf("cannot declare with TTL: got: %v", err) + } + } +} + +// Sets up the topology where rejected messages will be forwarded +// to a fanout exchange, with a single queue bound. +// +// Relates to https://github.com/streadway/amqp/issues/56 +// +func TestDeclareArgsRejectToDeadLetterQueue(t *testing.T) { + if conn := integrationConnection(t, "declareArgs"); conn != nil { + defer conn.Close() + + ex, q := "declareArgs", "declareArgs-deliveries" + dlex, dlq := ex+"-dead-letter", q+"-dead-letter" + + ch, _ := conn.Channel() + + if err := ch.ExchangeDeclare(ex, "fanout", false, true, false, false, nil); err != nil { + t.Fatalf("cannot declare %v: got: %v", ex, err) + } + + if err := ch.ExchangeDeclare(dlex, "fanout", false, true, false, false, nil); err != nil { + t.Fatalf("cannot declare %v: got: %v", dlex, err) + } + + if _, err := ch.QueueDeclare(dlq, false, true, false, false, nil); err != nil { + t.Fatalf("cannot declare %v: got: %v", dlq, err) + } + + if err := ch.QueueBind(dlq, "#", dlex, false, nil); err != nil { + t.Fatalf("cannot bind %v to %v: got: %v", dlq, dlex, err) + } + + if _, err := ch.QueueDeclare(q, false, true, false, false, Table{ + "x-dead-letter-exchange": dlex, + }); err != nil { + t.Fatalf("cannot declare %v with dlq %v: got: %v", q, dlex, err) + } + + if err := ch.QueueBind(q, "#", ex, false, nil); err != nil { + t.Fatalf("cannot bind %v: got: %v", ex, err) + } + + fails, err := ch.Consume(q, "", false, false, false, false, nil) + if err != nil { + t.Fatalf("cannot consume %v: got: %v", q, err) + } + + // Reject everything consumed + go func() { + for d := range fails { + d.Reject(false) + } + }() + + // Publish the 'poison' + if err := ch.Publish(ex, q, true, false, Publishing{Body: []byte("ignored")}); err != nil { + t.Fatalf("publishing failed") + } + + // spin-get until message arrives on the dead-letter queue with a + // synchronous parse to exercise the array field (x-death) set by the + // server relating to issue-56 + for i := 0; i < 10; i++ { + d, got, err := ch.Get(dlq, false) + if !got && err == nil { + continue + } else if err != nil { + t.Fatalf("expected success in parsing reject, got: %v", err) + } else { + // pass if we've parsed an array + if v, ok := d.Headers["x-death"]; ok { + if _, ok := v.([]interface{}); ok { + return + } + } + t.Fatalf("array field x-death expected in the headers, got: %v (%T)", d.Headers, d.Headers["x-death"]) + } + } + + t.Fatalf("expectd dead-letter after 10 get attempts") + } +} + +// https://github.com/streadway/amqp/issues/48 +func TestDeadlockConsumerIssue48(t *testing.T) { + if conn := integrationConnection(t, "issue48"); conn != nil { + defer conn.Close() + + deadline := make(chan bool) + go func() { + select { + case <-time.After(5 * time.Second): + panic("expected to receive 2 deliveries while in an RPC, got a deadlock") + case <-deadline: + // pass + } + }() + + ch, err := conn.Channel() + if err != nil { + t.Fatalf("got error on channel.open: %v", err) + } + + queue := "test-issue48" + + if _, err := ch.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Fatalf("expected to declare a queue: %v", err) + } + + if err := ch.Confirm(false); err != nil { + t.Fatalf("got error on confirm: %v", err) + } + + confirms := ch.NotifyPublish(make(chan Confirmation, 2)) + + for i := 0; i < cap(confirms); i++ { + // Fill the queue with some new or remaining publishings + ch.Publish("", queue, false, false, Publishing{Body: []byte("")}) + } + + for i := 0; i < cap(confirms); i++ { + // Wait for them to land on the queue so they'll be delivered on consume + <-confirms + } + + // Consuming should send them all on the wire + msgs, err := ch.Consume(queue, "", false, false, false, false, nil) + if err != nil { + t.Fatalf("got error on consume: %v", err) + } + + // We pop one off the chan, the other is on the wire + <-msgs + + // Opening a new channel (any RPC) while another delivery is on the wire + if _, err := conn.Channel(); err != nil { + t.Fatalf("got error on consume: %v", err) + } + + // We pop the next off the chan + <-msgs + + deadline <- true + } +} + +// https://github.com/streadway/amqp/issues/46 +func TestRepeatedChannelExceptionWithPublishAndMaxProcsIssue46(t *testing.T) { + conn := integrationConnection(t, "issue46") + if conn != nil { + for i := 0; i < 100; i++ { + ch, err := conn.Channel() + if err != nil { + t.Fatalf("expected error only on publish, got error on channel.open: %v", err) + } + + for j := 0; j < 10; j++ { + err = ch.Publish("not-existing-exchange", "some-key", false, false, Publishing{Body: []byte("some-data")}) + if err, ok := err.(Error); ok { + if err.Code != 504 { + t.Fatalf("expected channel only exception, got: %v", err) + } + } + } + } + } +} + +// https://github.com/streadway/amqp/issues/43 +func TestChannelExceptionWithCloseIssue43(t *testing.T) { + conn := integrationConnection(t, "issue43") + if conn != nil { + go func() { + for err := range conn.NotifyClose(make(chan *Error)) { + t.Log(err.Error()) + } + }() + + c1, err := conn.Channel() + if err != nil { + panic(err) + } + + go func() { + for err := range c1.NotifyClose(make(chan *Error)) { + t.Log("Channel1 Close: " + err.Error()) + } + }() + + c2, err := conn.Channel() + if err != nil { + panic(err) + } + + go func() { + for err := range c2.NotifyClose(make(chan *Error)) { + t.Log("Channel2 Close: " + err.Error()) + } + }() + + // Cause an asynchronous channel exception causing the server + // to send a "channel.close" method either before or after the next + // asynchronous method. + err = c1.Publish("nonexisting-exchange", "", false, false, Publishing{}) + if err != nil { + panic(err) + } + + // Receive or send the channel close method, the channel shuts down + // but this expects a channel.close-ok to be received. + c1.Close() + + // This ensures that the 2nd channel is unaffected by the channel exception + // on channel 1. + err = c2.ExchangeDeclare("test-channel-still-exists", "direct", false, true, false, false, nil) + if err != nil { + panic(err) + } + } +} + +// https://github.com/streadway/amqp/issues/7 +func TestCorruptedMessageIssue7(t *testing.T) { + messageCount := 1024 + + c1 := integrationConnection(t, "") + c2 := integrationConnection(t, "") + + if c1 != nil && c2 != nil { + defer c1.Close() + defer c2.Close() + + pub, err := c1.Channel() + if err != nil { + t.Fatalf("Cannot create Channel") + } + + sub, err := c2.Channel() + if err != nil { + t.Fatalf("Cannot create Channel") + } + + queue := "test-corrupted-message-regression" + + if _, err := pub.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Fatalf("Cannot declare") + } + + if _, err := sub.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Fatalf("Cannot declare") + } + + msgs, err := sub.Consume(queue, "", false, false, false, false, nil) + if err != nil { + t.Fatalf("Cannot consume") + } + + for i := 0; i < messageCount; i++ { + err := pub.Publish("", queue, false, false, Publishing{ + Body: generateCrc32Random(7 * i), + }) + + if err != nil { + t.Fatalf("Failed to publish") + } + } + + for i := 0; i < messageCount; i++ { + select { + case msg := <-msgs: + assertMessageCrc32(t, msg.Body, fmt.Sprintf("missed match at %d", i)) + case <-time.After(200 * time.Millisecond): + t.Fatalf("Timeout on recv") + } + } + } +} + +// https://github.com/streadway/amqp/issues/136 +func TestChannelCounterShouldNotPanicIssue136(t *testing.T) { + if c := integrationConnection(t, "issue136"); c != nil { + defer c.Close() + var wg sync.WaitGroup + + // exceeds 65535 channels + for i := 0; i < 8; i++ { + wg.Add(1) + go func(i int) { + for j := 0; j < 10000; j++ { + ch, err := c.Channel() + if err != nil { + t.Fatalf("failed to create channel %d:%d, got: %v", i, j, err) + } + if err := ch.Close(); err != nil { + t.Fatalf("failed to close channel %d:%d, got: %v", i, j, err) + } + } + wg.Done() + }(i) + } + wg.Wait() + } +} + +func TestExchangeDeclarePrecondition(t *testing.T) { + c1 := integrationConnection(t, "exchange-double-declare") + c2 := integrationConnection(t, "exchange-double-declare-cleanup") + if c1 != nil && c2 != nil { + defer c1.Close() + defer c2.Close() + + ch, err := c1.Channel() + if err != nil { + t.Fatalf("Create channel") + } + + exchange := "test-mismatched-redeclare" + + err = ch.ExchangeDeclare( + exchange, + "direct", // exchangeType + false, // durable + true, // auto-delete + false, // internal + false, // noWait + nil, // arguments + ) + if err != nil { + t.Fatalf("Could not initially declare exchange") + } + + err = ch.ExchangeDeclare( + exchange, + "direct", + true, // different durability + true, + false, + false, + nil, + ) + + if err == nil { + t.Fatalf("Expected to fail a redeclare with different durability, didn't receive an error") + } + + if err, ok := err.(Error); ok { + if err.Code != PreconditionFailed { + t.Fatalf("Expected precondition error") + } + if !err.Recover { + t.Fatalf("Expected to be able to recover") + } + } + + ch2, _ := c2.Channel() + if err = ch2.ExchangeDelete(exchange, false, false); err != nil { + t.Fatalf("Could not delete exchange: %v", err) + } + } +} + +func TestRabbitMQQueueTTLGet(t *testing.T) { + if c := integrationRabbitMQ(t, "ttl"); c != nil { + defer c.Close() + + queue := "test.rabbitmq-message-ttl" + channel, err := c.Channel() + if err != nil { + t.Fatalf("channel: %v", err) + } + + if _, err = channel.QueueDeclare( + queue, + false, + true, + false, + false, + Table{"x-message-ttl": int32(100)}, // in ms + ); err != nil { + t.Fatalf("queue declare: %s", err) + } + + channel.Publish("", queue, false, false, Publishing{Body: []byte("ttl")}) + + time.Sleep(200 * time.Millisecond) + + _, ok, err := channel.Get(queue, false) + + if ok { + t.Fatalf("Expected the message to expire in 100ms, it didn't expire after 200ms") + } + + if err != nil { + t.Fatalf("Failed to get on ttl queue") + } + } +} + +func TestRabbitMQQueueNackMultipleRequeue(t *testing.T) { + if c := integrationRabbitMQ(t, "nack"); c != nil { + defer c.Close() + + if c.isCapable("basic.nack") { + queue := "test.rabbitmq-basic-nack" + channel, err := c.Channel() + if err != nil { + t.Fatalf("channel: %v", err) + } + + if _, err = channel.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Fatalf("queue declare: %s", err) + } + + channel.Publish("", queue, false, false, Publishing{Body: []byte("1")}) + channel.Publish("", queue, false, false, Publishing{Body: []byte("2")}) + + m1, ok, err := channel.Get(queue, false) + if !ok || err != nil || m1.Body[0] != '1' { + t.Fatalf("could not get message %v", m1) + } + + m2, ok, err := channel.Get(queue, false) + if !ok || err != nil || m2.Body[0] != '2' { + t.Fatalf("could not get message %v", m2) + } + + m2.Nack(true, true) + + m1, ok, err = channel.Get(queue, false) + if !ok || err != nil || m1.Body[0] != '1' { + t.Fatalf("could not get message %v", m1) + } + + m2, ok, err = channel.Get(queue, false) + if !ok || err != nil || m2.Body[0] != '2' { + t.Fatalf("could not get message %v", m2) + } + } + } +} + +/* + * Support for integration tests + */ + +func integrationURLFromEnv() string { + url := os.Getenv("AMQP_URL") + if url == "" { + url = "amqp://" + } + return url +} + +func loggedConnection(t *testing.T, conn *Connection, name string) *Connection { + if name != "" { + conn.conn = &logIO{t, name, conn.conn} + } + return conn +} + +// Returns a conneciton to the AMQP if the AMQP_URL environment +// variable is set and a connnection can be established. +func integrationConnection(t *testing.T, name string) *Connection { + conn, err := Dial(integrationURLFromEnv()) + if err != nil { + t.Errorf("dial integration server: %s", err) + return nil + } + return loggedConnection(t, conn, name) +} + +// Returns a connection, channel and delcares a queue when the AMQP_URL is in the environment +func integrationQueue(t *testing.T, name string) (*Connection, *Channel) { + if conn := integrationConnection(t, name); conn != nil { + if channel, err := conn.Channel(); err == nil { + if _, err = channel.QueueDeclare(name, false, true, false, false, nil); err == nil { + return conn, channel + } + } + } + return nil, nil +} + +// Delegates to integrationConnection and only returns a connection if the +// product is RabbitMQ +func integrationRabbitMQ(t *testing.T, name string) *Connection { + if conn := integrationConnection(t, "connect"); conn != nil { + if server, ok := conn.Properties["product"]; ok && server == "RabbitMQ" { + return conn + } + } + + return nil +} + +func assertConsumeBody(t *testing.T, messages <-chan Delivery, want []byte) (msg *Delivery) { + select { + case got := <-messages: + if bytes.Compare(want, got.Body) != 0 { + t.Fatalf("Message body does not match want: %v, got: %v, for: %+v", want, got.Body, got) + } + msg = &got + case <-time.After(200 * time.Millisecond): + t.Fatalf("Timeout waiting for %v", want) + } + + return msg +} + +// Pulls out the CRC and verifies the remaining content against the CRC +func assertMessageCrc32(t *testing.T, msg []byte, assert string) { + size := binary.BigEndian.Uint32(msg[:4]) + + crc := crc32.NewIEEE() + crc.Write(msg[8:]) + + if binary.BigEndian.Uint32(msg[4:8]) != crc.Sum32() { + t.Fatalf("Message does not match CRC: %s", assert) + } + + if int(size) != len(msg)-8 { + t.Fatalf("Message does not match size, should=%d, is=%d: %s", size, len(msg)-8, assert) + } +} + +// Creates a random body size with a leading 32-bit CRC in network byte order +// that verifies the remaining slice +func generateCrc32Random(size int) []byte { + msg := make([]byte, size+8) + if _, err := io.ReadFull(devrand.Reader, msg); err != nil { + panic(err) + } + + crc := crc32.NewIEEE() + crc.Write(msg[8:]) + + binary.BigEndian.PutUint32(msg[0:4], uint32(size)) + binary.BigEndian.PutUint32(msg[4:8], crc.Sum32()) + + return msg +} diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/read.go b/services/templeton/vendor/src/github.com/streadway/amqp/read.go new file mode 100644 index 000000000..74e90ef8f --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/read.go @@ -0,0 +1,447 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "time" +) + +/* +Reads a frame from an input stream and returns an interface that can be cast into +one of the following: + + methodFrame + PropertiesFrame + bodyFrame + heartbeatFrame + +2.3.5 frame Details + +All frames consist of a header (7 octets), a payload of arbitrary size, and a +'frame-end' octet that detects malformed frames: + + 0 1 3 7 size+7 size+8 + +------+---------+-------------+ +------------+ +-----------+ + | type | channel | size | | payload | | frame-end | + +------+---------+-------------+ +------------+ +-----------+ + octet short long size octets octet + +To read a frame, we: + 1. Read the header and check the frame type and channel. + 2. Depending on the frame type, we read the payload and process it. + 3. Read the frame end octet. + +In realistic implementations where performance is a concern, we would use +“read-ahead buffering” or + +“gathering reads” to avoid doing three separate system calls to read a frame. +*/ +func (me *reader) ReadFrame() (frame frame, err error) { + var scratch [7]byte + + if _, err = io.ReadFull(me.r, scratch[:7]); err != nil { + return + } + + typ := uint8(scratch[0]) + channel := binary.BigEndian.Uint16(scratch[1:3]) + size := binary.BigEndian.Uint32(scratch[3:7]) + + switch typ { + case frameMethod: + if frame, err = me.parseMethodFrame(channel, size); err != nil { + return + } + + case frameHeader: + if frame, err = me.parseHeaderFrame(channel, size); err != nil { + return + } + + case frameBody: + if frame, err = me.parseBodyFrame(channel, size); err != nil { + return nil, err + } + + case frameHeartbeat: + if frame, err = me.parseHeartbeatFrame(channel, size); err != nil { + return + } + + default: + return nil, ErrFrame + } + + if _, err = io.ReadFull(me.r, scratch[:1]); err != nil { + return nil, err + } + + if scratch[0] != frameEnd { + return nil, ErrFrame + } + + return +} + +func readShortstr(r io.Reader) (v string, err error) { + var length uint8 + if err = binary.Read(r, binary.BigEndian, &length); err != nil { + return + } + + bytes := make([]byte, length) + if _, err = io.ReadFull(r, bytes); err != nil { + return + } + return string(bytes), nil +} + +func readLongstr(r io.Reader) (v string, err error) { + var length uint32 + if err = binary.Read(r, binary.BigEndian, &length); err != nil { + return + } + + bytes := make([]byte, length) + if _, err = io.ReadFull(r, bytes); err != nil { + return + } + return string(bytes), nil +} + +func readDecimal(r io.Reader) (v Decimal, err error) { + if err = binary.Read(r, binary.BigEndian, &v.Scale); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &v.Value); err != nil { + return + } + return +} + +func readFloat32(r io.Reader) (v float32, err error) { + if err = binary.Read(r, binary.BigEndian, &v); err != nil { + return + } + return +} + +func readFloat64(r io.Reader) (v float64, err error) { + if err = binary.Read(r, binary.BigEndian, &v); err != nil { + return + } + return +} + +func readTimestamp(r io.Reader) (v time.Time, err error) { + var sec int64 + if err = binary.Read(r, binary.BigEndian, &sec); err != nil { + return + } + return time.Unix(sec, 0), nil +} + +/* +'A': []interface{} +'D': Decimal +'F': Table +'I': int32 +'S': string +'T': time.Time +'V': nil +'b': byte +'d': float64 +'f': float32 +'l': int64 +'s': int16 +'t': bool +'x': []byte +*/ +func readField(r io.Reader) (v interface{}, err error) { + var typ byte + if err = binary.Read(r, binary.BigEndian, &typ); err != nil { + return + } + + switch typ { + case 't': + var value uint8 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return (value != 0), nil + + case 'b': + var value [1]byte + if _, err = io.ReadFull(r, value[0:1]); err != nil { + return + } + return value[0], nil + + case 's': + var value int16 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'I': + var value int32 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'l': + var value int64 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'f': + var value float32 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'd': + var value float64 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'D': + return readDecimal(r) + + case 'S': + return readLongstr(r) + + case 'A': + return readArray(r) + + case 'T': + return readTimestamp(r) + + case 'F': + return readTable(r) + + case 'x': + var len int32 + if err = binary.Read(r, binary.BigEndian, &len); err != nil { + return nil, err + } + + value := make([]byte, len) + if _, err = io.ReadFull(r, value); err != nil { + return nil, err + } + return value, err + + case 'V': + return nil, nil + } + + return nil, ErrSyntax +} + +/* + Field tables are long strings that contain packed name-value pairs. The + name-value pairs are encoded as short string defining the name, and octet + defining the values type and then the value itself. The valid field types for + tables are an extension of the native integer, bit, string, and timestamp + types, and are shown in the grammar. Multi-octet integer fields are always + held in network byte order. +*/ +func readTable(r io.Reader) (table Table, err error) { + var nested bytes.Buffer + var str string + + if str, err = readLongstr(r); err != nil { + return + } + + nested.Write([]byte(str)) + + table = make(Table) + + for nested.Len() > 0 { + var key string + var value interface{} + + if key, err = readShortstr(&nested); err != nil { + return + } + + if value, err = readField(&nested); err != nil { + return + } + + table[key] = value + } + + return +} + +func readArray(r io.Reader) ([]interface{}, error) { + var size uint32 + var err error + + if err = binary.Read(r, binary.BigEndian, &size); err != nil { + return nil, err + } + + lim := &io.LimitedReader{R: r, N: int64(size)} + arr := make([]interface{}, 0) + var field interface{} + + for { + if field, err = readField(lim); err != nil { + if err == io.EOF { + break + } + return nil, err + } + arr = append(arr, field) + } + + return arr, nil +} + +// Checks if this bit mask matches the flags bitset +func hasProperty(mask uint16, prop int) bool { + return int(mask)&prop > 0 +} + +func (me *reader) parseHeaderFrame(channel uint16, size uint32) (frame frame, err error) { + hf := &headerFrame{ + ChannelId: channel, + } + + if err = binary.Read(me.r, binary.BigEndian, &hf.ClassId); err != nil { + return + } + + if err = binary.Read(me.r, binary.BigEndian, &hf.weight); err != nil { + return + } + + if err = binary.Read(me.r, binary.BigEndian, &hf.Size); err != nil { + return + } + + var flags uint16 + + if err = binary.Read(me.r, binary.BigEndian, &flags); err != nil { + return + } + + if hasProperty(flags, flagContentType) { + if hf.Properties.ContentType, err = readShortstr(me.r); err != nil { + return + } + } + if hasProperty(flags, flagContentEncoding) { + if hf.Properties.ContentEncoding, err = readShortstr(me.r); err != nil { + return + } + } + if hasProperty(flags, flagHeaders) { + if hf.Properties.Headers, err = readTable(me.r); err != nil { + return + } + } + if hasProperty(flags, flagDeliveryMode) { + if err = binary.Read(me.r, binary.BigEndian, &hf.Properties.DeliveryMode); err != nil { + return + } + } + if hasProperty(flags, flagPriority) { + if err = binary.Read(me.r, binary.BigEndian, &hf.Properties.Priority); err != nil { + return + } + } + if hasProperty(flags, flagCorrelationId) { + if hf.Properties.CorrelationId, err = readShortstr(me.r); err != nil { + return + } + } + if hasProperty(flags, flagReplyTo) { + if hf.Properties.ReplyTo, err = readShortstr(me.r); err != nil { + return + } + } + if hasProperty(flags, flagExpiration) { + if hf.Properties.Expiration, err = readShortstr(me.r); err != nil { + return + } + } + if hasProperty(flags, flagMessageId) { + if hf.Properties.MessageId, err = readShortstr(me.r); err != nil { + return + } + } + if hasProperty(flags, flagTimestamp) { + if hf.Properties.Timestamp, err = readTimestamp(me.r); err != nil { + return + } + } + if hasProperty(flags, flagType) { + if hf.Properties.Type, err = readShortstr(me.r); err != nil { + return + } + } + if hasProperty(flags, flagUserId) { + if hf.Properties.UserId, err = readShortstr(me.r); err != nil { + return + } + } + if hasProperty(flags, flagAppId) { + if hf.Properties.AppId, err = readShortstr(me.r); err != nil { + return + } + } + if hasProperty(flags, flagReserved1) { + if hf.Properties.reserved1, err = readShortstr(me.r); err != nil { + return + } + } + + return hf, nil +} + +func (me *reader) parseBodyFrame(channel uint16, size uint32) (frame frame, err error) { + bf := &bodyFrame{ + ChannelId: channel, + Body: make([]byte, size), + } + + if _, err = io.ReadFull(me.r, bf.Body); err != nil { + return nil, err + } + + return bf, nil +} + +var errHeartbeatPayload = errors.New("Heartbeats should not have a payload") + +func (me *reader) parseHeartbeatFrame(channel uint16, size uint32) (frame frame, err error) { + hf := &heartbeatFrame{ + ChannelId: channel, + } + + if size > 0 { + return nil, errHeartbeatPayload + } + + return hf, nil +} diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/read_test.go b/services/templeton/vendor/src/github.com/streadway/amqp/read_test.go new file mode 100644 index 000000000..bb0e30f02 --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/read_test.go @@ -0,0 +1,22 @@ +package amqp + +import ( + "strings" + "testing" +) + +func TestGoFuzzCrashers(t *testing.T) { + testData := []string{ + "\b000000", + "\x02\x16\x10�[��\t\xbdui�" + "\x10\x01\x00\xff\xbf\xef\xbfサn\x99\x00\x10r", + "\x0300\x00\x00\x00\x040000", + } + + for idx, testStr := range testData { + r := reader{strings.NewReader(testStr)} + frame, err := r.ReadFrame() + if err != nil && frame != nil { + t.Errorf("%d. frame is not nil: %#v err = %v", idx, frame, err) + } + } +} diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/reconnect_test.go b/services/templeton/vendor/src/github.com/streadway/amqp/reconnect_test.go new file mode 100644 index 000000000..5a06cb7ae --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/reconnect_test.go @@ -0,0 +1,113 @@ +package amqp_test + +import ( + "fmt" + "github.com/streadway/amqp" + "os" +) + +// Every connection should declare the topology they expect +func setup(url, queue string) (*amqp.Connection, *amqp.Channel, error) { + conn, err := amqp.Dial(url) + if err != nil { + return nil, nil, err + } + + ch, err := conn.Channel() + if err != nil { + return nil, nil, err + } + + if _, err := ch.QueueDeclare(queue, false, true, false, false, nil); err != nil { + return nil, nil, err + } + + return conn, ch, nil +} + +func consume(url, queue string) (*amqp.Connection, <-chan amqp.Delivery, error) { + conn, ch, err := setup(url, queue) + if err != nil { + return nil, nil, err + } + + // Indicate we only want 1 message to acknowledge at a time. + if err := ch.Qos(1, 0, false); err != nil { + return nil, nil, err + } + + // Exclusive consumer + deliveries, err := ch.Consume(queue, "", false, true, false, false, nil) + + return conn, deliveries, err +} + +func ExampleConnection_reconnect() { + if url := os.Getenv("AMQP_URL"); url != "" { + queue := "example.reconnect" + + // The connection/channel for publishing to interleave the ingress messages + // between reconnects, shares the same topology as the consumer. If we rather + // sent all messages up front, the first consumer would receive every message. + // We would rather show how the messages are not lost between reconnects. + _, pub, err := setup(url, queue) + if err != nil { + fmt.Println("err publisher setup:", err) + return + } + + // Purge the queue from the publisher side to establish initial state + if _, err := pub.QueuePurge(queue, false); err != nil { + fmt.Println("err purge:", err) + return + } + + // Reconnect simulation, should be for { ... } in production + for i := 1; i <= 3; i++ { + fmt.Println("connect") + + conn, deliveries, err := consume(url, queue) + if err != nil { + fmt.Println("err consume:", err) + return + } + + // Simulate a producer on a different connection showing that consumers + // continue where they were left off after each reconnect. + if err := pub.Publish("", queue, false, false, amqp.Publishing{ + Body: []byte(fmt.Sprintf("%d", i)), + }); err != nil { + fmt.Println("err publish:", err) + return + } + + // Simulates a consumer that when the range finishes, will setup a new + // session and begin ranging over the deliveries again. + for msg := range deliveries { + fmt.Println(string(msg.Body)) + msg.Ack(false) + + // Simulate an error like a server restart, loss of route or operator + // intervention that results in the connection terminating + go conn.Close() + } + } + } else { + // pass with expected output when not running in an integration + // environment. + fmt.Println("connect") + fmt.Println("1") + fmt.Println("connect") + fmt.Println("2") + fmt.Println("connect") + fmt.Println("3") + } + + // Output: + // connect + // 1 + // connect + // 2 + // connect + // 3 +} diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/return.go b/services/templeton/vendor/src/github.com/streadway/amqp/return.go new file mode 100644 index 000000000..dfebd635d --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/return.go @@ -0,0 +1,64 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "time" +) + +// Return captures a flattened struct of fields returned by the server when a +// Publishing is unable to be delivered either due to the `mandatory` flag set +// and no route found, or `immediate` flag set and no free consumer. +type Return struct { + ReplyCode uint16 // reason + ReplyText string // description + Exchange string // basic.publish exchange + RoutingKey string // basic.publish routing key + + // Properties + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + Headers Table // Application or header exchange table + DeliveryMode uint8 // queue implemention use - non-persistent (1) or persistent (2) + Priority uint8 // queue implementation use - 0 to 9 + CorrelationId string // application use - correlation identifier + ReplyTo string // application use - address to to reply to (ex: RPC) + Expiration string // implementation use - message expiration spec + MessageId string // application use - message identifier + Timestamp time.Time // application use - message timestamp + Type string // application use - message type name + UserId string // application use - creating user id + AppId string // application use - creating application + + Body []byte +} + +func newReturn(msg basicReturn) *Return { + props, body := msg.getContent() + + return &Return{ + ReplyCode: msg.ReplyCode, + ReplyText: msg.ReplyText, + Exchange: msg.Exchange, + RoutingKey: msg.RoutingKey, + + Headers: props.Headers, + ContentType: props.ContentType, + ContentEncoding: props.ContentEncoding, + DeliveryMode: props.DeliveryMode, + Priority: props.Priority, + CorrelationId: props.CorrelationId, + ReplyTo: props.ReplyTo, + Expiration: props.Expiration, + MessageId: props.MessageId, + Timestamp: props.Timestamp, + Type: props.Type, + UserId: props.UserId, + AppId: props.AppId, + + Body: body, + } +} diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/shared_test.go b/services/templeton/vendor/src/github.com/streadway/amqp/shared_test.go new file mode 100644 index 000000000..2e4715fa0 --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/shared_test.go @@ -0,0 +1,71 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "encoding/hex" + "io" + "testing" +) + +type pipe struct { + r *io.PipeReader + w *io.PipeWriter +} + +func (p pipe) Read(b []byte) (int, error) { + return p.r.Read(b) +} + +func (p pipe) Write(b []byte) (int, error) { + return p.w.Write(b) +} + +func (p pipe) Close() error { + p.r.Close() + p.w.Close() + return nil +} + +type logIO struct { + t *testing.T + prefix string + proxy io.ReadWriteCloser +} + +func (me *logIO) Read(p []byte) (n int, err error) { + me.t.Logf("%s reading %d\n", me.prefix, len(p)) + n, err = me.proxy.Read(p) + if err != nil { + me.t.Logf("%s read %x: %v\n", me.prefix, p[0:n], err) + } else { + me.t.Logf("%s read:\n%s\n", me.prefix, hex.Dump(p[0:n])) + //fmt.Printf("%s read:\n%s\n", me.prefix, hex.Dump(p[0:n])) + } + return +} + +func (me *logIO) Write(p []byte) (n int, err error) { + me.t.Logf("%s writing %d\n", me.prefix, len(p)) + n, err = me.proxy.Write(p) + if err != nil { + me.t.Logf("%s write %d, %x: %v\n", me.prefix, len(p), p[0:n], err) + } else { + me.t.Logf("%s write %d:\n%s", me.prefix, len(p), hex.Dump(p[0:n])) + //fmt.Printf("%s write %d:\n%s", me.prefix, len(p), hex.Dump(p[0:n])) + } + return +} + +func (me *logIO) Close() (err error) { + err = me.proxy.Close() + if err != nil { + me.t.Logf("%s close : %v\n", me.prefix, err) + } else { + me.t.Logf("%s close\n", me.prefix, err) + } + return +} diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/spec/amqp0-9-1.stripped.extended.xml b/services/templeton/vendor/src/github.com/streadway/amqp/spec/amqp0-9-1.stripped.extended.xml new file mode 100644 index 000000000..fbddb93a3 --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/spec/amqp0-9-1.stripped.extended.xml @@ -0,0 +1,537 @@ + + + + + + + + + + + + + + + + + Errata: Section 1.2 ought to define an exception 312 "No route", which used to + exist in 0-9 and is what RabbitMQ sends back with 'basic.return' when a + 'mandatory' message cannot be delivered to any queue. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/spec/gen.go b/services/templeton/vendor/src/github.com/streadway/amqp/spec/gen.go new file mode 100644 index 000000000..1861b9ebb --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/spec/gen.go @@ -0,0 +1,536 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +// +build ignore + +package main + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io/ioutil" + "log" + "os" + "regexp" + "strings" + "text/template" +) + +var ( + ErrUnknownType = errors.New("Unknown field type in gen") + ErrUnknownDomain = errors.New("Unknown domain type in gen") +) + +var amqpTypeToNative = map[string]string{ + "bit": "bool", + "octet": "byte", + "shortshort": "uint8", + "short": "uint16", + "long": "uint32", + "longlong": "uint64", + "timestamp": "time.Time", + "table": "Table", + "shortstr": "string", + "longstr": "string", +} + +type Rule struct { + Name string `xml:"name,attr"` + Docs []string `xml:"doc"` +} + +type Doc struct { + Type string `xml:"type,attr"` + Body string `xml:",innerxml"` +} + +type Chassis struct { + Name string `xml:"name,attr"` + Implement string `xml:"implement,attr"` +} + +type Assert struct { + Check string `xml:"check,attr"` + Value string `xml:"value,attr"` + Method string `xml:"method,attr"` +} + +type Field struct { + Name string `xml:"name,attr"` + Domain string `xml:"domain,attr"` + Type string `xml:"type,attr"` + Label string `xml:"label,attr"` + Reserved bool `xml:"reserved,attr"` + Docs []Doc `xml:"doc"` + Asserts []Assert `xml:"assert"` +} + +type Response struct { + Name string `xml:"name,attr"` +} + +type Method struct { + Name string `xml:"name,attr"` + Response Response `xml:"response"` + Synchronous bool `xml:"synchronous,attr"` + Content bool `xml:"content,attr"` + Index string `xml:"index,attr"` + Label string `xml:"label,attr"` + Docs []Doc `xml:"doc"` + Rules []Rule `xml:"rule"` + Fields []Field `xml:"field"` + Chassis []Chassis `xml:"chassis"` +} + +type Class struct { + Name string `xml:"name,attr"` + Handler string `xml:"handler,attr"` + Index string `xml:"index,attr"` + Label string `xml:"label,attr"` + Docs []Doc `xml:"doc"` + Methods []Method `xml:"method"` + Chassis []Chassis `xml:"chassis"` +} + +type Domain struct { + Name string `xml:"name,attr"` + Type string `xml:"type,attr"` + Label string `xml:"label,attr"` + Rules []Rule `xml:"rule"` + Docs []Doc `xml:"doc"` +} + +type Constant struct { + Name string `xml:"name,attr"` + Value int `xml:"value,attr"` + Class string `xml:"class,attr"` + Doc string `xml:"doc"` +} + +type Amqp struct { + Major int `xml:"major,attr"` + Minor int `xml:"minor,attr"` + Port int `xml:"port,attr"` + Comment string `xml:"comment,attr"` + + Constants []Constant `xml:"constant"` + Domains []Domain `xml:"domain"` + Classes []Class `xml:"class"` +} + +type renderer struct { + Root Amqp + bitcounter int +} + +type fieldset struct { + AmqpType string + NativeType string + Fields []Field + *renderer +} + +var ( + helpers = template.FuncMap{ + "public": public, + "private": private, + "clean": clean, + } + + packageTemplate = template.Must(template.New("package").Funcs(helpers).Parse(` + // Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + // Source code and contact info at http://github.com/streadway/amqp + + /* GENERATED FILE - DO NOT EDIT */ + /* Rebuild from the spec/gen.go tool */ + + {{with .Root}} + package amqp + + import ( + "fmt" + "encoding/binary" + "io" + ) + + // Error codes that can be sent from the server during a connection or + // channel exception or used by the client to indicate a class of error like + // ErrCredentials. The text of the error is likely more interesting than + // these constants. + const ( + {{range $c := .Constants}} + {{if $c.IsError}}{{.Name | public}}{{else}}{{.Name | private}}{{end}} = {{.Value}}{{end}} + ) + + func isSoftExceptionCode(code int) bool { + switch code { + {{range $c := .Constants}} {{if $c.IsSoftError}} case {{$c.Value}}: + return true + {{end}}{{end}} + } + return false + } + + {{range .Classes}} + {{$class := .}} + {{range .Methods}} + {{$method := .}} + {{$struct := $.StructName $class.Name $method.Name}} + {{if .Docs}}/* {{range .Docs}} {{.Body | clean}} {{end}} */{{end}} + type {{$struct}} struct { + {{range .Fields}} + {{$.FieldName .}} {{$.FieldType . | $.NativeType}} {{if .Label}}// {{.Label}}{{end}}{{end}} + {{if .Content}}Properties properties + Body []byte{{end}} + } + + func (me *{{$struct}}) id() (uint16, uint16) { + return {{$class.Index}}, {{$method.Index}} + } + + func (me *{{$struct}}) wait() (bool) { + return {{.Synchronous}}{{if $.HasField "NoWait" .}} && !me.NoWait{{end}} + } + + {{if .Content}} + func (me *{{$struct}}) getContent() (properties, []byte) { + return me.Properties, me.Body + } + + func (me *{{$struct}}) setContent(props properties, body []byte) { + me.Properties, me.Body = props, body + } + {{end}} + func (me *{{$struct}}) write(w io.Writer) (err error) { + {{if $.HasType "bit" $method}}var bits byte{{end}} + {{.Fields | $.Fieldsets | $.Partial "enc-"}} + return + } + + func (me *{{$struct}}) read(r io.Reader) (err error) { + {{if $.HasType "bit" $method}}var bits byte{{end}} + {{.Fields | $.Fieldsets | $.Partial "dec-"}} + return + } + {{end}} + {{end}} + + func (me *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err error) { + mf := &methodFrame { + ChannelId: channel, + } + + if err = binary.Read(me.r, binary.BigEndian, &mf.ClassId); err != nil { + return + } + + if err = binary.Read(me.r, binary.BigEndian, &mf.MethodId); err != nil { + return + } + + switch mf.ClassId { + {{range .Classes}} + {{$class := .}} + case {{.Index}}: // {{.Name}} + switch mf.MethodId { + {{range .Methods}} + case {{.Index}}: // {{$class.Name}} {{.Name}} + //fmt.Println("NextMethod: class:{{$class.Index}} method:{{.Index}}") + method := &{{$.StructName $class.Name .Name}}{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + {{end}} + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + {{end}} + default: + return nil, fmt.Errorf("Bad method frame, unknown class %d", mf.ClassId) + } + + return mf, nil + } + {{end}} + + {{define "enc-bit"}} + {{range $off, $field := .Fields}} + if me.{{$field | $.FieldName}} { bits |= 1 << {{$off}} } + {{end}} + if err = binary.Write(w, binary.BigEndian, bits); err != nil { return } + {{end}} + {{define "enc-octet"}} + {{range .Fields}} if err = binary.Write(w, binary.BigEndian, me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "enc-shortshort"}} + {{range .Fields}} if err = binary.Write(w, binary.BigEndian, me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "enc-short"}} + {{range .Fields}} if err = binary.Write(w, binary.BigEndian, me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "enc-long"}} + {{range .Fields}} if err = binary.Write(w, binary.BigEndian, me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "enc-longlong"}} + {{range .Fields}} if err = binary.Write(w, binary.BigEndian, me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "enc-timestamp"}} + {{range .Fields}} if err = writeTimestamp(w, me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "enc-shortstr"}} + {{range .Fields}} if err = writeShortstr(w, me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "enc-longstr"}} + {{range .Fields}} if err = writeLongstr(w, me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "enc-table"}} + {{range .Fields}} if err = writeTable(w, me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + + {{define "dec-bit"}} + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + {{range $off, $field := .Fields}} me.{{$field | $.FieldName}} = (bits & (1 << {{$off}}) > 0) + {{end}} + {{end}} + {{define "dec-octet"}} + {{range .Fields}} if err = binary.Read(r, binary.BigEndian, &me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "dec-shortshort"}} + {{range .Fields}} if err = binary.Read(r, binary.BigEndian, &me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "dec-short"}} + {{range .Fields}} if err = binary.Read(r, binary.BigEndian, &me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "dec-long"}} + {{range .Fields}} if err = binary.Read(r, binary.BigEndian, &me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "dec-longlong"}} + {{range .Fields}} if err = binary.Read(r, binary.BigEndian, &me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "dec-timestamp"}} + {{range .Fields}} if me.{{. | $.FieldName}}, err = readTimestamp(r); err != nil { return } + {{end}} + {{end}} + {{define "dec-shortstr"}} + {{range .Fields}} if me.{{. | $.FieldName}}, err = readShortstr(r); err != nil { return } + {{end}} + {{end}} + {{define "dec-longstr"}} + {{range .Fields}} if me.{{. | $.FieldName}}, err = readLongstr(r); err != nil { return } + {{end}} + {{end}} + {{define "dec-table"}} + {{range .Fields}} if me.{{. | $.FieldName}}, err = readTable(r); err != nil { return } + {{end}} + {{end}} + + `)) +) + +func (me *Constant) IsError() bool { + return strings.Contains(me.Class, "error") +} + +func (me *Constant) IsSoftError() bool { + return me.Class == "soft-error" +} + +func (me *renderer) Partial(prefix string, fields []fieldset) (s string, err error) { + var buf bytes.Buffer + for _, set := range fields { + name := prefix + set.AmqpType + t := packageTemplate.Lookup(name) + if t == nil { + return "", errors.New(fmt.Sprintf("Missing template: %s", name)) + } + if err = t.Execute(&buf, set); err != nil { + return + } + } + return string(buf.Bytes()), nil +} + +// Groups the fields so that the right encoder/decoder can be called +func (me *renderer) Fieldsets(fields []Field) (f []fieldset, err error) { + if len(fields) > 0 { + for _, field := range fields { + cur := fieldset{} + cur.AmqpType, err = me.FieldType(field) + if err != nil { + return + } + + cur.NativeType, err = me.NativeType(cur.AmqpType) + if err != nil { + return + } + cur.Fields = append(cur.Fields, field) + f = append(f, cur) + } + + i, j := 0, 1 + for j < len(f) { + if f[i].AmqpType == f[j].AmqpType { + f[i].Fields = append(f[i].Fields, f[j].Fields...) + } else { + i++ + f[i] = f[j] + } + j++ + } + return f[:i+1], nil + } + + return +} + +func (me *renderer) HasType(typ string, method Method) bool { + for _, f := range method.Fields { + name, _ := me.FieldType(f) + if name == typ { + return true + } + } + return false +} + +func (me *renderer) HasField(field string, method Method) bool { + for _, f := range method.Fields { + name := me.FieldName(f) + if name == field { + return true + } + } + return false +} + +func (me *renderer) Domain(field Field) (domain Domain, err error) { + for _, domain = range me.Root.Domains { + if field.Domain == domain.Name { + return + } + } + return domain, nil + //return domain, ErrUnknownDomain +} + +func (me *renderer) FieldName(field Field) (t string) { + t = public(field.Name) + + if field.Reserved { + t = strings.ToLower(t) + } + + return +} + +func (me *renderer) FieldType(field Field) (t string, err error) { + t = field.Type + + if t == "" { + var domain Domain + domain, err = me.Domain(field) + if err != nil { + return "", err + } + t = domain.Type + } + + return +} + +func (me *renderer) NativeType(amqpType string) (t string, err error) { + if t, ok := amqpTypeToNative[amqpType]; ok { + return t, nil + } + return "", ErrUnknownType +} + +func (me *renderer) Tag(d Domain) string { + label := "`" + + label += `domain:"` + d.Name + `"` + + if len(d.Type) > 0 { + label += `,type:"` + d.Type + `"` + } + + label += "`" + + return label +} + +func (me *renderer) StructName(parts ...string) string { + return parts[0] + public(parts[1:]...) +} + +func clean(body string) (res string) { + return strings.Replace(body, "\r", "", -1) +} + +func private(parts ...string) string { + return export(regexp.MustCompile(`[-_]\w`), parts...) +} + +func public(parts ...string) string { + return export(regexp.MustCompile(`^\w|[-_]\w`), parts...) +} + +func export(delim *regexp.Regexp, parts ...string) (res string) { + for _, in := range parts { + + res += delim.ReplaceAllStringFunc(in, func(match string) string { + switch len(match) { + case 1: + return strings.ToUpper(match) + case 2: + return strings.ToUpper(match[1:]) + } + panic("unreachable") + }) + } + + return +} + +func main() { + var r renderer + + spec, err := ioutil.ReadAll(os.Stdin) + if err != nil { + log.Fatalln("Please pass spec on stdin", err) + } + + err = xml.Unmarshal(spec, &r.Root) + + if err != nil { + log.Fatalln("Could not parse XML:", err) + } + + if err = packageTemplate.Execute(os.Stdout, &r); err != nil { + log.Fatalln("Generate error: ", err) + } +} diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/spec091.go b/services/templeton/vendor/src/github.com/streadway/amqp/spec091.go new file mode 100644 index 000000000..a95380303 --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/spec091.go @@ -0,0 +1,3306 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +/* GENERATED FILE - DO NOT EDIT */ +/* Rebuild from the spec/gen.go tool */ + +package amqp + +import ( + "encoding/binary" + "fmt" + "io" +) + +// Error codes that can be sent from the server during a connection or +// channel exception or used by the client to indicate a class of error like +// ErrCredentials. The text of the error is likely more interesting than +// these constants. +const ( + frameMethod = 1 + frameHeader = 2 + frameBody = 3 + frameHeartbeat = 8 + frameMinSize = 4096 + frameEnd = 206 + replySuccess = 200 + ContentTooLarge = 311 + NoRoute = 312 + NoConsumers = 313 + ConnectionForced = 320 + InvalidPath = 402 + AccessRefused = 403 + NotFound = 404 + ResourceLocked = 405 + PreconditionFailed = 406 + FrameError = 501 + SyntaxError = 502 + CommandInvalid = 503 + ChannelError = 504 + UnexpectedFrame = 505 + ResourceError = 506 + NotAllowed = 530 + NotImplemented = 540 + InternalError = 541 +) + +func isSoftExceptionCode(code int) bool { + switch code { + case 311: + return true + case 312: + return true + case 313: + return true + case 403: + return true + case 404: + return true + case 405: + return true + case 406: + return true + + } + return false +} + +type connectionStart struct { + VersionMajor byte + VersionMinor byte + ServerProperties Table + Mechanisms string + Locales string +} + +func (me *connectionStart) id() (uint16, uint16) { + return 10, 10 +} + +func (me *connectionStart) wait() bool { + return true +} + +func (me *connectionStart) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, me.VersionMajor); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, me.VersionMinor); err != nil { + return + } + + if err = writeTable(w, me.ServerProperties); err != nil { + return + } + + if err = writeLongstr(w, me.Mechanisms); err != nil { + return + } + if err = writeLongstr(w, me.Locales); err != nil { + return + } + + return +} + +func (me *connectionStart) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &me.VersionMajor); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &me.VersionMinor); err != nil { + return + } + + if me.ServerProperties, err = readTable(r); err != nil { + return + } + + if me.Mechanisms, err = readLongstr(r); err != nil { + return + } + if me.Locales, err = readLongstr(r); err != nil { + return + } + + return +} + +type connectionStartOk struct { + ClientProperties Table + Mechanism string + Response string + Locale string +} + +func (me *connectionStartOk) id() (uint16, uint16) { + return 10, 11 +} + +func (me *connectionStartOk) wait() bool { + return true +} + +func (me *connectionStartOk) write(w io.Writer) (err error) { + + if err = writeTable(w, me.ClientProperties); err != nil { + return + } + + if err = writeShortstr(w, me.Mechanism); err != nil { + return + } + + if err = writeLongstr(w, me.Response); err != nil { + return + } + + if err = writeShortstr(w, me.Locale); err != nil { + return + } + + return +} + +func (me *connectionStartOk) read(r io.Reader) (err error) { + + if me.ClientProperties, err = readTable(r); err != nil { + return + } + + if me.Mechanism, err = readShortstr(r); err != nil { + return + } + + if me.Response, err = readLongstr(r); err != nil { + return + } + + if me.Locale, err = readShortstr(r); err != nil { + return + } + + return +} + +type connectionSecure struct { + Challenge string +} + +func (me *connectionSecure) id() (uint16, uint16) { + return 10, 20 +} + +func (me *connectionSecure) wait() bool { + return true +} + +func (me *connectionSecure) write(w io.Writer) (err error) { + + if err = writeLongstr(w, me.Challenge); err != nil { + return + } + + return +} + +func (me *connectionSecure) read(r io.Reader) (err error) { + + if me.Challenge, err = readLongstr(r); err != nil { + return + } + + return +} + +type connectionSecureOk struct { + Response string +} + +func (me *connectionSecureOk) id() (uint16, uint16) { + return 10, 21 +} + +func (me *connectionSecureOk) wait() bool { + return true +} + +func (me *connectionSecureOk) write(w io.Writer) (err error) { + + if err = writeLongstr(w, me.Response); err != nil { + return + } + + return +} + +func (me *connectionSecureOk) read(r io.Reader) (err error) { + + if me.Response, err = readLongstr(r); err != nil { + return + } + + return +} + +type connectionTune struct { + ChannelMax uint16 + FrameMax uint32 + Heartbeat uint16 +} + +func (me *connectionTune) id() (uint16, uint16) { + return 10, 30 +} + +func (me *connectionTune) wait() bool { + return true +} + +func (me *connectionTune) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, me.ChannelMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.FrameMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.Heartbeat); err != nil { + return + } + + return +} + +func (me *connectionTune) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &me.ChannelMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.FrameMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.Heartbeat); err != nil { + return + } + + return +} + +type connectionTuneOk struct { + ChannelMax uint16 + FrameMax uint32 + Heartbeat uint16 +} + +func (me *connectionTuneOk) id() (uint16, uint16) { + return 10, 31 +} + +func (me *connectionTuneOk) wait() bool { + return true +} + +func (me *connectionTuneOk) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, me.ChannelMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.FrameMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.Heartbeat); err != nil { + return + } + + return +} + +func (me *connectionTuneOk) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &me.ChannelMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.FrameMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.Heartbeat); err != nil { + return + } + + return +} + +type connectionOpen struct { + VirtualHost string + reserved1 string + reserved2 bool +} + +func (me *connectionOpen) id() (uint16, uint16) { + return 10, 40 +} + +func (me *connectionOpen) wait() bool { + return true +} + +func (me *connectionOpen) write(w io.Writer) (err error) { + var bits byte + + if err = writeShortstr(w, me.VirtualHost); err != nil { + return + } + if err = writeShortstr(w, me.reserved1); err != nil { + return + } + + if me.reserved2 { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *connectionOpen) read(r io.Reader) (err error) { + var bits byte + + if me.VirtualHost, err = readShortstr(r); err != nil { + return + } + if me.reserved1, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.reserved2 = (bits&(1<<0) > 0) + + return +} + +type connectionOpenOk struct { + reserved1 string +} + +func (me *connectionOpenOk) id() (uint16, uint16) { + return 10, 41 +} + +func (me *connectionOpenOk) wait() bool { + return true +} + +func (me *connectionOpenOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, me.reserved1); err != nil { + return + } + + return +} + +func (me *connectionOpenOk) read(r io.Reader) (err error) { + + if me.reserved1, err = readShortstr(r); err != nil { + return + } + + return +} + +type connectionClose struct { + ReplyCode uint16 + ReplyText string + ClassId uint16 + MethodId uint16 +} + +func (me *connectionClose) id() (uint16, uint16) { + return 10, 50 +} + +func (me *connectionClose) wait() bool { + return true +} + +func (me *connectionClose) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, me.ReplyCode); err != nil { + return + } + + if err = writeShortstr(w, me.ReplyText); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.ClassId); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, me.MethodId); err != nil { + return + } + + return +} + +func (me *connectionClose) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &me.ReplyCode); err != nil { + return + } + + if me.ReplyText, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.ClassId); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &me.MethodId); err != nil { + return + } + + return +} + +type connectionCloseOk struct { +} + +func (me *connectionCloseOk) id() (uint16, uint16) { + return 10, 51 +} + +func (me *connectionCloseOk) wait() bool { + return true +} + +func (me *connectionCloseOk) write(w io.Writer) (err error) { + + return +} + +func (me *connectionCloseOk) read(r io.Reader) (err error) { + + return +} + +type connectionBlocked struct { + Reason string +} + +func (me *connectionBlocked) id() (uint16, uint16) { + return 10, 60 +} + +func (me *connectionBlocked) wait() bool { + return false +} + +func (me *connectionBlocked) write(w io.Writer) (err error) { + + if err = writeShortstr(w, me.Reason); err != nil { + return + } + + return +} + +func (me *connectionBlocked) read(r io.Reader) (err error) { + + if me.Reason, err = readShortstr(r); err != nil { + return + } + + return +} + +type connectionUnblocked struct { +} + +func (me *connectionUnblocked) id() (uint16, uint16) { + return 10, 61 +} + +func (me *connectionUnblocked) wait() bool { + return false +} + +func (me *connectionUnblocked) write(w io.Writer) (err error) { + + return +} + +func (me *connectionUnblocked) read(r io.Reader) (err error) { + + return +} + +type channelOpen struct { + reserved1 string +} + +func (me *channelOpen) id() (uint16, uint16) { + return 20, 10 +} + +func (me *channelOpen) wait() bool { + return true +} + +func (me *channelOpen) write(w io.Writer) (err error) { + + if err = writeShortstr(w, me.reserved1); err != nil { + return + } + + return +} + +func (me *channelOpen) read(r io.Reader) (err error) { + + if me.reserved1, err = readShortstr(r); err != nil { + return + } + + return +} + +type channelOpenOk struct { + reserved1 string +} + +func (me *channelOpenOk) id() (uint16, uint16) { + return 20, 11 +} + +func (me *channelOpenOk) wait() bool { + return true +} + +func (me *channelOpenOk) write(w io.Writer) (err error) { + + if err = writeLongstr(w, me.reserved1); err != nil { + return + } + + return +} + +func (me *channelOpenOk) read(r io.Reader) (err error) { + + if me.reserved1, err = readLongstr(r); err != nil { + return + } + + return +} + +type channelFlow struct { + Active bool +} + +func (me *channelFlow) id() (uint16, uint16) { + return 20, 20 +} + +func (me *channelFlow) wait() bool { + return true +} + +func (me *channelFlow) write(w io.Writer) (err error) { + var bits byte + + if me.Active { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *channelFlow) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Active = (bits&(1<<0) > 0) + + return +} + +type channelFlowOk struct { + Active bool +} + +func (me *channelFlowOk) id() (uint16, uint16) { + return 20, 21 +} + +func (me *channelFlowOk) wait() bool { + return false +} + +func (me *channelFlowOk) write(w io.Writer) (err error) { + var bits byte + + if me.Active { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *channelFlowOk) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Active = (bits&(1<<0) > 0) + + return +} + +type channelClose struct { + ReplyCode uint16 + ReplyText string + ClassId uint16 + MethodId uint16 +} + +func (me *channelClose) id() (uint16, uint16) { + return 20, 40 +} + +func (me *channelClose) wait() bool { + return true +} + +func (me *channelClose) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, me.ReplyCode); err != nil { + return + } + + if err = writeShortstr(w, me.ReplyText); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.ClassId); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, me.MethodId); err != nil { + return + } + + return +} + +func (me *channelClose) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &me.ReplyCode); err != nil { + return + } + + if me.ReplyText, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.ClassId); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &me.MethodId); err != nil { + return + } + + return +} + +type channelCloseOk struct { +} + +func (me *channelCloseOk) id() (uint16, uint16) { + return 20, 41 +} + +func (me *channelCloseOk) wait() bool { + return true +} + +func (me *channelCloseOk) write(w io.Writer) (err error) { + + return +} + +func (me *channelCloseOk) read(r io.Reader) (err error) { + + return +} + +type exchangeDeclare struct { + reserved1 uint16 + Exchange string + Type string + Passive bool + Durable bool + AutoDelete bool + Internal bool + NoWait bool + Arguments Table +} + +func (me *exchangeDeclare) id() (uint16, uint16) { + return 40, 10 +} + +func (me *exchangeDeclare) wait() bool { + return true && !me.NoWait +} + +func (me *exchangeDeclare) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Exchange); err != nil { + return + } + if err = writeShortstr(w, me.Type); err != nil { + return + } + + if me.Passive { + bits |= 1 << 0 + } + + if me.Durable { + bits |= 1 << 1 + } + + if me.AutoDelete { + bits |= 1 << 2 + } + + if me.Internal { + bits |= 1 << 3 + } + + if me.NoWait { + bits |= 1 << 4 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, me.Arguments); err != nil { + return + } + + return +} + +func (me *exchangeDeclare) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Exchange, err = readShortstr(r); err != nil { + return + } + if me.Type, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Passive = (bits&(1<<0) > 0) + me.Durable = (bits&(1<<1) > 0) + me.AutoDelete = (bits&(1<<2) > 0) + me.Internal = (bits&(1<<3) > 0) + me.NoWait = (bits&(1<<4) > 0) + + if me.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type exchangeDeclareOk struct { +} + +func (me *exchangeDeclareOk) id() (uint16, uint16) { + return 40, 11 +} + +func (me *exchangeDeclareOk) wait() bool { + return true +} + +func (me *exchangeDeclareOk) write(w io.Writer) (err error) { + + return +} + +func (me *exchangeDeclareOk) read(r io.Reader) (err error) { + + return +} + +type exchangeDelete struct { + reserved1 uint16 + Exchange string + IfUnused bool + NoWait bool +} + +func (me *exchangeDelete) id() (uint16, uint16) { + return 40, 20 +} + +func (me *exchangeDelete) wait() bool { + return true && !me.NoWait +} + +func (me *exchangeDelete) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Exchange); err != nil { + return + } + + if me.IfUnused { + bits |= 1 << 0 + } + + if me.NoWait { + bits |= 1 << 1 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *exchangeDelete) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Exchange, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.IfUnused = (bits&(1<<0) > 0) + me.NoWait = (bits&(1<<1) > 0) + + return +} + +type exchangeDeleteOk struct { +} + +func (me *exchangeDeleteOk) id() (uint16, uint16) { + return 40, 21 +} + +func (me *exchangeDeleteOk) wait() bool { + return true +} + +func (me *exchangeDeleteOk) write(w io.Writer) (err error) { + + return +} + +func (me *exchangeDeleteOk) read(r io.Reader) (err error) { + + return +} + +type exchangeBind struct { + reserved1 uint16 + Destination string + Source string + RoutingKey string + NoWait bool + Arguments Table +} + +func (me *exchangeBind) id() (uint16, uint16) { + return 40, 30 +} + +func (me *exchangeBind) wait() bool { + return true && !me.NoWait +} + +func (me *exchangeBind) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Destination); err != nil { + return + } + if err = writeShortstr(w, me.Source); err != nil { + return + } + if err = writeShortstr(w, me.RoutingKey); err != nil { + return + } + + if me.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, me.Arguments); err != nil { + return + } + + return +} + +func (me *exchangeBind) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Destination, err = readShortstr(r); err != nil { + return + } + if me.Source, err = readShortstr(r); err != nil { + return + } + if me.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.NoWait = (bits&(1<<0) > 0) + + if me.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type exchangeBindOk struct { +} + +func (me *exchangeBindOk) id() (uint16, uint16) { + return 40, 31 +} + +func (me *exchangeBindOk) wait() bool { + return true +} + +func (me *exchangeBindOk) write(w io.Writer) (err error) { + + return +} + +func (me *exchangeBindOk) read(r io.Reader) (err error) { + + return +} + +type exchangeUnbind struct { + reserved1 uint16 + Destination string + Source string + RoutingKey string + NoWait bool + Arguments Table +} + +func (me *exchangeUnbind) id() (uint16, uint16) { + return 40, 40 +} + +func (me *exchangeUnbind) wait() bool { + return true && !me.NoWait +} + +func (me *exchangeUnbind) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Destination); err != nil { + return + } + if err = writeShortstr(w, me.Source); err != nil { + return + } + if err = writeShortstr(w, me.RoutingKey); err != nil { + return + } + + if me.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, me.Arguments); err != nil { + return + } + + return +} + +func (me *exchangeUnbind) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Destination, err = readShortstr(r); err != nil { + return + } + if me.Source, err = readShortstr(r); err != nil { + return + } + if me.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.NoWait = (bits&(1<<0) > 0) + + if me.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type exchangeUnbindOk struct { +} + +func (me *exchangeUnbindOk) id() (uint16, uint16) { + return 40, 51 +} + +func (me *exchangeUnbindOk) wait() bool { + return true +} + +func (me *exchangeUnbindOk) write(w io.Writer) (err error) { + + return +} + +func (me *exchangeUnbindOk) read(r io.Reader) (err error) { + + return +} + +type queueDeclare struct { + reserved1 uint16 + Queue string + Passive bool + Durable bool + Exclusive bool + AutoDelete bool + NoWait bool + Arguments Table +} + +func (me *queueDeclare) id() (uint16, uint16) { + return 50, 10 +} + +func (me *queueDeclare) wait() bool { + return true && !me.NoWait +} + +func (me *queueDeclare) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Queue); err != nil { + return + } + + if me.Passive { + bits |= 1 << 0 + } + + if me.Durable { + bits |= 1 << 1 + } + + if me.Exclusive { + bits |= 1 << 2 + } + + if me.AutoDelete { + bits |= 1 << 3 + } + + if me.NoWait { + bits |= 1 << 4 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, me.Arguments); err != nil { + return + } + + return +} + +func (me *queueDeclare) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Passive = (bits&(1<<0) > 0) + me.Durable = (bits&(1<<1) > 0) + me.Exclusive = (bits&(1<<2) > 0) + me.AutoDelete = (bits&(1<<3) > 0) + me.NoWait = (bits&(1<<4) > 0) + + if me.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type queueDeclareOk struct { + Queue string + MessageCount uint32 + ConsumerCount uint32 +} + +func (me *queueDeclareOk) id() (uint16, uint16) { + return 50, 11 +} + +func (me *queueDeclareOk) wait() bool { + return true +} + +func (me *queueDeclareOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, me.Queue); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.MessageCount); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, me.ConsumerCount); err != nil { + return + } + + return +} + +func (me *queueDeclareOk) read(r io.Reader) (err error) { + + if me.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.MessageCount); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &me.ConsumerCount); err != nil { + return + } + + return +} + +type queueBind struct { + reserved1 uint16 + Queue string + Exchange string + RoutingKey string + NoWait bool + Arguments Table +} + +func (me *queueBind) id() (uint16, uint16) { + return 50, 20 +} + +func (me *queueBind) wait() bool { + return true && !me.NoWait +} + +func (me *queueBind) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Queue); err != nil { + return + } + if err = writeShortstr(w, me.Exchange); err != nil { + return + } + if err = writeShortstr(w, me.RoutingKey); err != nil { + return + } + + if me.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, me.Arguments); err != nil { + return + } + + return +} + +func (me *queueBind) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Queue, err = readShortstr(r); err != nil { + return + } + if me.Exchange, err = readShortstr(r); err != nil { + return + } + if me.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.NoWait = (bits&(1<<0) > 0) + + if me.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type queueBindOk struct { +} + +func (me *queueBindOk) id() (uint16, uint16) { + return 50, 21 +} + +func (me *queueBindOk) wait() bool { + return true +} + +func (me *queueBindOk) write(w io.Writer) (err error) { + + return +} + +func (me *queueBindOk) read(r io.Reader) (err error) { + + return +} + +type queueUnbind struct { + reserved1 uint16 + Queue string + Exchange string + RoutingKey string + Arguments Table +} + +func (me *queueUnbind) id() (uint16, uint16) { + return 50, 50 +} + +func (me *queueUnbind) wait() bool { + return true +} + +func (me *queueUnbind) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Queue); err != nil { + return + } + if err = writeShortstr(w, me.Exchange); err != nil { + return + } + if err = writeShortstr(w, me.RoutingKey); err != nil { + return + } + + if err = writeTable(w, me.Arguments); err != nil { + return + } + + return +} + +func (me *queueUnbind) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Queue, err = readShortstr(r); err != nil { + return + } + if me.Exchange, err = readShortstr(r); err != nil { + return + } + if me.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if me.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type queueUnbindOk struct { +} + +func (me *queueUnbindOk) id() (uint16, uint16) { + return 50, 51 +} + +func (me *queueUnbindOk) wait() bool { + return true +} + +func (me *queueUnbindOk) write(w io.Writer) (err error) { + + return +} + +func (me *queueUnbindOk) read(r io.Reader) (err error) { + + return +} + +type queuePurge struct { + reserved1 uint16 + Queue string + NoWait bool +} + +func (me *queuePurge) id() (uint16, uint16) { + return 50, 30 +} + +func (me *queuePurge) wait() bool { + return true && !me.NoWait +} + +func (me *queuePurge) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Queue); err != nil { + return + } + + if me.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *queuePurge) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.NoWait = (bits&(1<<0) > 0) + + return +} + +type queuePurgeOk struct { + MessageCount uint32 +} + +func (me *queuePurgeOk) id() (uint16, uint16) { + return 50, 31 +} + +func (me *queuePurgeOk) wait() bool { + return true +} + +func (me *queuePurgeOk) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, me.MessageCount); err != nil { + return + } + + return +} + +func (me *queuePurgeOk) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &me.MessageCount); err != nil { + return + } + + return +} + +type queueDelete struct { + reserved1 uint16 + Queue string + IfUnused bool + IfEmpty bool + NoWait bool +} + +func (me *queueDelete) id() (uint16, uint16) { + return 50, 40 +} + +func (me *queueDelete) wait() bool { + return true && !me.NoWait +} + +func (me *queueDelete) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Queue); err != nil { + return + } + + if me.IfUnused { + bits |= 1 << 0 + } + + if me.IfEmpty { + bits |= 1 << 1 + } + + if me.NoWait { + bits |= 1 << 2 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *queueDelete) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.IfUnused = (bits&(1<<0) > 0) + me.IfEmpty = (bits&(1<<1) > 0) + me.NoWait = (bits&(1<<2) > 0) + + return +} + +type queueDeleteOk struct { + MessageCount uint32 +} + +func (me *queueDeleteOk) id() (uint16, uint16) { + return 50, 41 +} + +func (me *queueDeleteOk) wait() bool { + return true +} + +func (me *queueDeleteOk) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, me.MessageCount); err != nil { + return + } + + return +} + +func (me *queueDeleteOk) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &me.MessageCount); err != nil { + return + } + + return +} + +type basicQos struct { + PrefetchSize uint32 + PrefetchCount uint16 + Global bool +} + +func (me *basicQos) id() (uint16, uint16) { + return 60, 10 +} + +func (me *basicQos) wait() bool { + return true +} + +func (me *basicQos) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.PrefetchSize); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.PrefetchCount); err != nil { + return + } + + if me.Global { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *basicQos) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.PrefetchSize); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.PrefetchCount); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Global = (bits&(1<<0) > 0) + + return +} + +type basicQosOk struct { +} + +func (me *basicQosOk) id() (uint16, uint16) { + return 60, 11 +} + +func (me *basicQosOk) wait() bool { + return true +} + +func (me *basicQosOk) write(w io.Writer) (err error) { + + return +} + +func (me *basicQosOk) read(r io.Reader) (err error) { + + return +} + +type basicConsume struct { + reserved1 uint16 + Queue string + ConsumerTag string + NoLocal bool + NoAck bool + Exclusive bool + NoWait bool + Arguments Table +} + +func (me *basicConsume) id() (uint16, uint16) { + return 60, 20 +} + +func (me *basicConsume) wait() bool { + return true && !me.NoWait +} + +func (me *basicConsume) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Queue); err != nil { + return + } + if err = writeShortstr(w, me.ConsumerTag); err != nil { + return + } + + if me.NoLocal { + bits |= 1 << 0 + } + + if me.NoAck { + bits |= 1 << 1 + } + + if me.Exclusive { + bits |= 1 << 2 + } + + if me.NoWait { + bits |= 1 << 3 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, me.Arguments); err != nil { + return + } + + return +} + +func (me *basicConsume) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Queue, err = readShortstr(r); err != nil { + return + } + if me.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.NoLocal = (bits&(1<<0) > 0) + me.NoAck = (bits&(1<<1) > 0) + me.Exclusive = (bits&(1<<2) > 0) + me.NoWait = (bits&(1<<3) > 0) + + if me.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type basicConsumeOk struct { + ConsumerTag string +} + +func (me *basicConsumeOk) id() (uint16, uint16) { + return 60, 21 +} + +func (me *basicConsumeOk) wait() bool { + return true +} + +func (me *basicConsumeOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, me.ConsumerTag); err != nil { + return + } + + return +} + +func (me *basicConsumeOk) read(r io.Reader) (err error) { + + if me.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicCancel struct { + ConsumerTag string + NoWait bool +} + +func (me *basicCancel) id() (uint16, uint16) { + return 60, 30 +} + +func (me *basicCancel) wait() bool { + return true && !me.NoWait +} + +func (me *basicCancel) write(w io.Writer) (err error) { + var bits byte + + if err = writeShortstr(w, me.ConsumerTag); err != nil { + return + } + + if me.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *basicCancel) read(r io.Reader) (err error) { + var bits byte + + if me.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.NoWait = (bits&(1<<0) > 0) + + return +} + +type basicCancelOk struct { + ConsumerTag string +} + +func (me *basicCancelOk) id() (uint16, uint16) { + return 60, 31 +} + +func (me *basicCancelOk) wait() bool { + return true +} + +func (me *basicCancelOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, me.ConsumerTag); err != nil { + return + } + + return +} + +func (me *basicCancelOk) read(r io.Reader) (err error) { + + if me.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicPublish struct { + reserved1 uint16 + Exchange string + RoutingKey string + Mandatory bool + Immediate bool + Properties properties + Body []byte +} + +func (me *basicPublish) id() (uint16, uint16) { + return 60, 40 +} + +func (me *basicPublish) wait() bool { + return false +} + +func (me *basicPublish) getContent() (properties, []byte) { + return me.Properties, me.Body +} + +func (me *basicPublish) setContent(props properties, body []byte) { + me.Properties, me.Body = props, body +} + +func (me *basicPublish) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Exchange); err != nil { + return + } + if err = writeShortstr(w, me.RoutingKey); err != nil { + return + } + + if me.Mandatory { + bits |= 1 << 0 + } + + if me.Immediate { + bits |= 1 << 1 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *basicPublish) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Exchange, err = readShortstr(r); err != nil { + return + } + if me.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Mandatory = (bits&(1<<0) > 0) + me.Immediate = (bits&(1<<1) > 0) + + return +} + +type basicReturn struct { + ReplyCode uint16 + ReplyText string + Exchange string + RoutingKey string + Properties properties + Body []byte +} + +func (me *basicReturn) id() (uint16, uint16) { + return 60, 50 +} + +func (me *basicReturn) wait() bool { + return false +} + +func (me *basicReturn) getContent() (properties, []byte) { + return me.Properties, me.Body +} + +func (me *basicReturn) setContent(props properties, body []byte) { + me.Properties, me.Body = props, body +} + +func (me *basicReturn) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, me.ReplyCode); err != nil { + return + } + + if err = writeShortstr(w, me.ReplyText); err != nil { + return + } + if err = writeShortstr(w, me.Exchange); err != nil { + return + } + if err = writeShortstr(w, me.RoutingKey); err != nil { + return + } + + return +} + +func (me *basicReturn) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &me.ReplyCode); err != nil { + return + } + + if me.ReplyText, err = readShortstr(r); err != nil { + return + } + if me.Exchange, err = readShortstr(r); err != nil { + return + } + if me.RoutingKey, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicDeliver struct { + ConsumerTag string + DeliveryTag uint64 + Redelivered bool + Exchange string + RoutingKey string + Properties properties + Body []byte +} + +func (me *basicDeliver) id() (uint16, uint16) { + return 60, 60 +} + +func (me *basicDeliver) wait() bool { + return false +} + +func (me *basicDeliver) getContent() (properties, []byte) { + return me.Properties, me.Body +} + +func (me *basicDeliver) setContent(props properties, body []byte) { + me.Properties, me.Body = props, body +} + +func (me *basicDeliver) write(w io.Writer) (err error) { + var bits byte + + if err = writeShortstr(w, me.ConsumerTag); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.DeliveryTag); err != nil { + return + } + + if me.Redelivered { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeShortstr(w, me.Exchange); err != nil { + return + } + if err = writeShortstr(w, me.RoutingKey); err != nil { + return + } + + return +} + +func (me *basicDeliver) read(r io.Reader) (err error) { + var bits byte + + if me.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Redelivered = (bits&(1<<0) > 0) + + if me.Exchange, err = readShortstr(r); err != nil { + return + } + if me.RoutingKey, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicGet struct { + reserved1 uint16 + Queue string + NoAck bool +} + +func (me *basicGet) id() (uint16, uint16) { + return 60, 70 +} + +func (me *basicGet) wait() bool { + return true +} + +func (me *basicGet) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Queue); err != nil { + return + } + + if me.NoAck { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *basicGet) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.NoAck = (bits&(1<<0) > 0) + + return +} + +type basicGetOk struct { + DeliveryTag uint64 + Redelivered bool + Exchange string + RoutingKey string + MessageCount uint32 + Properties properties + Body []byte +} + +func (me *basicGetOk) id() (uint16, uint16) { + return 60, 71 +} + +func (me *basicGetOk) wait() bool { + return true +} + +func (me *basicGetOk) getContent() (properties, []byte) { + return me.Properties, me.Body +} + +func (me *basicGetOk) setContent(props properties, body []byte) { + me.Properties, me.Body = props, body +} + +func (me *basicGetOk) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.DeliveryTag); err != nil { + return + } + + if me.Redelivered { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeShortstr(w, me.Exchange); err != nil { + return + } + if err = writeShortstr(w, me.RoutingKey); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.MessageCount); err != nil { + return + } + + return +} + +func (me *basicGetOk) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Redelivered = (bits&(1<<0) > 0) + + if me.Exchange, err = readShortstr(r); err != nil { + return + } + if me.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.MessageCount); err != nil { + return + } + + return +} + +type basicGetEmpty struct { + reserved1 string +} + +func (me *basicGetEmpty) id() (uint16, uint16) { + return 60, 72 +} + +func (me *basicGetEmpty) wait() bool { + return true +} + +func (me *basicGetEmpty) write(w io.Writer) (err error) { + + if err = writeShortstr(w, me.reserved1); err != nil { + return + } + + return +} + +func (me *basicGetEmpty) read(r io.Reader) (err error) { + + if me.reserved1, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicAck struct { + DeliveryTag uint64 + Multiple bool +} + +func (me *basicAck) id() (uint16, uint16) { + return 60, 80 +} + +func (me *basicAck) wait() bool { + return false +} + +func (me *basicAck) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.DeliveryTag); err != nil { + return + } + + if me.Multiple { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *basicAck) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Multiple = (bits&(1<<0) > 0) + + return +} + +type basicReject struct { + DeliveryTag uint64 + Requeue bool +} + +func (me *basicReject) id() (uint16, uint16) { + return 60, 90 +} + +func (me *basicReject) wait() bool { + return false +} + +func (me *basicReject) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.DeliveryTag); err != nil { + return + } + + if me.Requeue { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *basicReject) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Requeue = (bits&(1<<0) > 0) + + return +} + +type basicRecoverAsync struct { + Requeue bool +} + +func (me *basicRecoverAsync) id() (uint16, uint16) { + return 60, 100 +} + +func (me *basicRecoverAsync) wait() bool { + return false +} + +func (me *basicRecoverAsync) write(w io.Writer) (err error) { + var bits byte + + if me.Requeue { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *basicRecoverAsync) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Requeue = (bits&(1<<0) > 0) + + return +} + +type basicRecover struct { + Requeue bool +} + +func (me *basicRecover) id() (uint16, uint16) { + return 60, 110 +} + +func (me *basicRecover) wait() bool { + return true +} + +func (me *basicRecover) write(w io.Writer) (err error) { + var bits byte + + if me.Requeue { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *basicRecover) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Requeue = (bits&(1<<0) > 0) + + return +} + +type basicRecoverOk struct { +} + +func (me *basicRecoverOk) id() (uint16, uint16) { + return 60, 111 +} + +func (me *basicRecoverOk) wait() bool { + return true +} + +func (me *basicRecoverOk) write(w io.Writer) (err error) { + + return +} + +func (me *basicRecoverOk) read(r io.Reader) (err error) { + + return +} + +type basicNack struct { + DeliveryTag uint64 + Multiple bool + Requeue bool +} + +func (me *basicNack) id() (uint16, uint16) { + return 60, 120 +} + +func (me *basicNack) wait() bool { + return false +} + +func (me *basicNack) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.DeliveryTag); err != nil { + return + } + + if me.Multiple { + bits |= 1 << 0 + } + + if me.Requeue { + bits |= 1 << 1 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *basicNack) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Multiple = (bits&(1<<0) > 0) + me.Requeue = (bits&(1<<1) > 0) + + return +} + +type txSelect struct { +} + +func (me *txSelect) id() (uint16, uint16) { + return 90, 10 +} + +func (me *txSelect) wait() bool { + return true +} + +func (me *txSelect) write(w io.Writer) (err error) { + + return +} + +func (me *txSelect) read(r io.Reader) (err error) { + + return +} + +type txSelectOk struct { +} + +func (me *txSelectOk) id() (uint16, uint16) { + return 90, 11 +} + +func (me *txSelectOk) wait() bool { + return true +} + +func (me *txSelectOk) write(w io.Writer) (err error) { + + return +} + +func (me *txSelectOk) read(r io.Reader) (err error) { + + return +} + +type txCommit struct { +} + +func (me *txCommit) id() (uint16, uint16) { + return 90, 20 +} + +func (me *txCommit) wait() bool { + return true +} + +func (me *txCommit) write(w io.Writer) (err error) { + + return +} + +func (me *txCommit) read(r io.Reader) (err error) { + + return +} + +type txCommitOk struct { +} + +func (me *txCommitOk) id() (uint16, uint16) { + return 90, 21 +} + +func (me *txCommitOk) wait() bool { + return true +} + +func (me *txCommitOk) write(w io.Writer) (err error) { + + return +} + +func (me *txCommitOk) read(r io.Reader) (err error) { + + return +} + +type txRollback struct { +} + +func (me *txRollback) id() (uint16, uint16) { + return 90, 30 +} + +func (me *txRollback) wait() bool { + return true +} + +func (me *txRollback) write(w io.Writer) (err error) { + + return +} + +func (me *txRollback) read(r io.Reader) (err error) { + + return +} + +type txRollbackOk struct { +} + +func (me *txRollbackOk) id() (uint16, uint16) { + return 90, 31 +} + +func (me *txRollbackOk) wait() bool { + return true +} + +func (me *txRollbackOk) write(w io.Writer) (err error) { + + return +} + +func (me *txRollbackOk) read(r io.Reader) (err error) { + + return +} + +type confirmSelect struct { + Nowait bool +} + +func (me *confirmSelect) id() (uint16, uint16) { + return 85, 10 +} + +func (me *confirmSelect) wait() bool { + return true +} + +func (me *confirmSelect) write(w io.Writer) (err error) { + var bits byte + + if me.Nowait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *confirmSelect) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Nowait = (bits&(1<<0) > 0) + + return +} + +type confirmSelectOk struct { +} + +func (me *confirmSelectOk) id() (uint16, uint16) { + return 85, 11 +} + +func (me *confirmSelectOk) wait() bool { + return true +} + +func (me *confirmSelectOk) write(w io.Writer) (err error) { + + return +} + +func (me *confirmSelectOk) read(r io.Reader) (err error) { + + return +} + +func (me *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err error) { + mf := &methodFrame{ + ChannelId: channel, + } + + if err = binary.Read(me.r, binary.BigEndian, &mf.ClassId); err != nil { + return + } + + if err = binary.Read(me.r, binary.BigEndian, &mf.MethodId); err != nil { + return + } + + switch mf.ClassId { + + case 10: // connection + switch mf.MethodId { + + case 10: // connection start + //fmt.Println("NextMethod: class:10 method:10") + method := &connectionStart{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 11: // connection start-ok + //fmt.Println("NextMethod: class:10 method:11") + method := &connectionStartOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 20: // connection secure + //fmt.Println("NextMethod: class:10 method:20") + method := &connectionSecure{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 21: // connection secure-ok + //fmt.Println("NextMethod: class:10 method:21") + method := &connectionSecureOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 30: // connection tune + //fmt.Println("NextMethod: class:10 method:30") + method := &connectionTune{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 31: // connection tune-ok + //fmt.Println("NextMethod: class:10 method:31") + method := &connectionTuneOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 40: // connection open + //fmt.Println("NextMethod: class:10 method:40") + method := &connectionOpen{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 41: // connection open-ok + //fmt.Println("NextMethod: class:10 method:41") + method := &connectionOpenOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 50: // connection close + //fmt.Println("NextMethod: class:10 method:50") + method := &connectionClose{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 51: // connection close-ok + //fmt.Println("NextMethod: class:10 method:51") + method := &connectionCloseOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 60: // connection blocked + //fmt.Println("NextMethod: class:10 method:60") + method := &connectionBlocked{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 61: // connection unblocked + //fmt.Println("NextMethod: class:10 method:61") + method := &connectionUnblocked{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 20: // channel + switch mf.MethodId { + + case 10: // channel open + //fmt.Println("NextMethod: class:20 method:10") + method := &channelOpen{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 11: // channel open-ok + //fmt.Println("NextMethod: class:20 method:11") + method := &channelOpenOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 20: // channel flow + //fmt.Println("NextMethod: class:20 method:20") + method := &channelFlow{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 21: // channel flow-ok + //fmt.Println("NextMethod: class:20 method:21") + method := &channelFlowOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 40: // channel close + //fmt.Println("NextMethod: class:20 method:40") + method := &channelClose{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 41: // channel close-ok + //fmt.Println("NextMethod: class:20 method:41") + method := &channelCloseOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 40: // exchange + switch mf.MethodId { + + case 10: // exchange declare + //fmt.Println("NextMethod: class:40 method:10") + method := &exchangeDeclare{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 11: // exchange declare-ok + //fmt.Println("NextMethod: class:40 method:11") + method := &exchangeDeclareOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 20: // exchange delete + //fmt.Println("NextMethod: class:40 method:20") + method := &exchangeDelete{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 21: // exchange delete-ok + //fmt.Println("NextMethod: class:40 method:21") + method := &exchangeDeleteOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 30: // exchange bind + //fmt.Println("NextMethod: class:40 method:30") + method := &exchangeBind{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 31: // exchange bind-ok + //fmt.Println("NextMethod: class:40 method:31") + method := &exchangeBindOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 40: // exchange unbind + //fmt.Println("NextMethod: class:40 method:40") + method := &exchangeUnbind{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 51: // exchange unbind-ok + //fmt.Println("NextMethod: class:40 method:51") + method := &exchangeUnbindOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 50: // queue + switch mf.MethodId { + + case 10: // queue declare + //fmt.Println("NextMethod: class:50 method:10") + method := &queueDeclare{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 11: // queue declare-ok + //fmt.Println("NextMethod: class:50 method:11") + method := &queueDeclareOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 20: // queue bind + //fmt.Println("NextMethod: class:50 method:20") + method := &queueBind{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 21: // queue bind-ok + //fmt.Println("NextMethod: class:50 method:21") + method := &queueBindOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 50: // queue unbind + //fmt.Println("NextMethod: class:50 method:50") + method := &queueUnbind{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 51: // queue unbind-ok + //fmt.Println("NextMethod: class:50 method:51") + method := &queueUnbindOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 30: // queue purge + //fmt.Println("NextMethod: class:50 method:30") + method := &queuePurge{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 31: // queue purge-ok + //fmt.Println("NextMethod: class:50 method:31") + method := &queuePurgeOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 40: // queue delete + //fmt.Println("NextMethod: class:50 method:40") + method := &queueDelete{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 41: // queue delete-ok + //fmt.Println("NextMethod: class:50 method:41") + method := &queueDeleteOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 60: // basic + switch mf.MethodId { + + case 10: // basic qos + //fmt.Println("NextMethod: class:60 method:10") + method := &basicQos{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 11: // basic qos-ok + //fmt.Println("NextMethod: class:60 method:11") + method := &basicQosOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 20: // basic consume + //fmt.Println("NextMethod: class:60 method:20") + method := &basicConsume{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 21: // basic consume-ok + //fmt.Println("NextMethod: class:60 method:21") + method := &basicConsumeOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 30: // basic cancel + //fmt.Println("NextMethod: class:60 method:30") + method := &basicCancel{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 31: // basic cancel-ok + //fmt.Println("NextMethod: class:60 method:31") + method := &basicCancelOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 40: // basic publish + //fmt.Println("NextMethod: class:60 method:40") + method := &basicPublish{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 50: // basic return + //fmt.Println("NextMethod: class:60 method:50") + method := &basicReturn{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 60: // basic deliver + //fmt.Println("NextMethod: class:60 method:60") + method := &basicDeliver{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 70: // basic get + //fmt.Println("NextMethod: class:60 method:70") + method := &basicGet{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 71: // basic get-ok + //fmt.Println("NextMethod: class:60 method:71") + method := &basicGetOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 72: // basic get-empty + //fmt.Println("NextMethod: class:60 method:72") + method := &basicGetEmpty{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 80: // basic ack + //fmt.Println("NextMethod: class:60 method:80") + method := &basicAck{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 90: // basic reject + //fmt.Println("NextMethod: class:60 method:90") + method := &basicReject{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 100: // basic recover-async + //fmt.Println("NextMethod: class:60 method:100") + method := &basicRecoverAsync{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 110: // basic recover + //fmt.Println("NextMethod: class:60 method:110") + method := &basicRecover{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 111: // basic recover-ok + //fmt.Println("NextMethod: class:60 method:111") + method := &basicRecoverOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 120: // basic nack + //fmt.Println("NextMethod: class:60 method:120") + method := &basicNack{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 90: // tx + switch mf.MethodId { + + case 10: // tx select + //fmt.Println("NextMethod: class:90 method:10") + method := &txSelect{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 11: // tx select-ok + //fmt.Println("NextMethod: class:90 method:11") + method := &txSelectOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 20: // tx commit + //fmt.Println("NextMethod: class:90 method:20") + method := &txCommit{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 21: // tx commit-ok + //fmt.Println("NextMethod: class:90 method:21") + method := &txCommitOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 30: // tx rollback + //fmt.Println("NextMethod: class:90 method:30") + method := &txRollback{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 31: // tx rollback-ok + //fmt.Println("NextMethod: class:90 method:31") + method := &txRollbackOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 85: // confirm + switch mf.MethodId { + + case 10: // confirm select + //fmt.Println("NextMethod: class:85 method:10") + method := &confirmSelect{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 11: // confirm select-ok + //fmt.Println("NextMethod: class:85 method:11") + method := &confirmSelectOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + default: + return nil, fmt.Errorf("Bad method frame, unknown class %d", mf.ClassId) + } + + return mf, nil +} diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/tls_test.go b/services/templeton/vendor/src/github.com/streadway/amqp/tls_test.go new file mode 100644 index 000000000..a0795b641 --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/tls_test.go @@ -0,0 +1,218 @@ +package amqp_test + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "github.com/streadway/amqp" + "io" + "net" + "testing" + "time" +) + +type tlsServer struct { + net.Listener + URL string + Config *tls.Config + Header chan []byte +} + +// Captures the header for each accepted connection +func (s *tlsServer) Serve() { + for { + c, err := s.Accept() + if err != nil { + return + } + + header := make([]byte, 4) + io.ReadFull(c, header) + s.Header <- header + c.Write([]byte{'A', 'M', 'Q', 'P', 0, 0, 0, 0}) + c.Close() + } +} + +func tlsConfig() *tls.Config { + cfg := new(tls.Config) + + cfg.ClientCAs = x509.NewCertPool() + cfg.ClientCAs.AppendCertsFromPEM([]byte(caCert)) + + cert, err := tls.X509KeyPair([]byte(serverCert), []byte(serverKey)) + if err != nil { + panic(err) + } + + cfg.Certificates = append(cfg.Certificates, cert) + cfg.ClientAuth = tls.RequireAndVerifyClientCert + + return cfg +} + +func startTlsServer() tlsServer { + cfg := tlsConfig() + + l, err := tls.Listen("tcp", "127.0.0.1:0", cfg) + if err != nil { + panic(err) + } + + s := tlsServer{ + Listener: l, + Config: cfg, + URL: fmt.Sprintf("amqps://%s/", l.Addr().String()), + Header: make(chan []byte, 1), + } + + go s.Serve() + return s +} + +// Tests that the server has handshaked the connection and seen the client +// protocol announcement. Does not nest that the connection.open is successful. +func TestTLSHandshake(t *testing.T) { + srv := startTlsServer() + defer srv.Close() + + cfg := new(tls.Config) + cfg.RootCAs = x509.NewCertPool() + cfg.RootCAs.AppendCertsFromPEM([]byte(caCert)) + + cert, _ := tls.X509KeyPair([]byte(clientCert), []byte(clientKey)) + cfg.Certificates = append(cfg.Certificates, cert) + + _, err := amqp.DialTLS(srv.URL, cfg) + + select { + case <-time.After(10 * time.Millisecond): + t.Fatalf("did not succeed to handshake the TLS connection after 10ms") + case header := <-srv.Header: + if string(header) != "AMQP" { + t.Fatalf("expected to handshake a TLS connection, got err: %v", err) + } + } +} + +const caCert = ` +-----BEGIN CERTIFICATE----- +MIICxjCCAa6gAwIBAgIJANWuMWMQSxvdMA0GCSqGSIb3DQEBBQUAMBMxETAPBgNV +BAMTCE15VGVzdENBMB4XDTE0MDEyNzE5NTIyMloXDTI0MDEyNTE5NTIyMlowEzER +MA8GA1UEAxMITXlUZXN0Q0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQDBsIrkW4ob9Z/gzR2/Maa2stbutry6/vvz8eiJwIKIbaHGwqtFOUGiWeKw7H76 +IH3SjTAhNQY2hoKPyH41D36sDJkYBRyHFJTK/6ffvOhpyLnuXJAnoS62eKPSNUAx +5i/lkHj42ESutYAH9qbHCI/gBm9G4WmhGAyA16xzC1n07JObl6KFoY1PqHKl823z +mvF47I24DzemEfjdwC9nAAX/pGYOg9FA9nQv7NnhlsJMxueCx55RNU1ADRoqsbfE +T0CQTOT4ryugGrUp9J4Cwen6YbXZrS6+Kff5SQCAns0Qu8/bwj0DKkuBGLF+Mnwe +mq9bMzyZPUrPM3Gu48ao8YAfAgMBAAGjHTAbMAwGA1UdEwQFMAMBAf8wCwYDVR0P +BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQCBwXGblRxIEOlEP6ANZ1C8AHWyG8lR +CQduFclc0tmyCCz5fnyLK0aGu9LhXXe6/HSKqgs4mJqeqYOojdjkfOme/YdwDzjK +WIf0kRYQHcB6NeyEZwW8C7subTP1Xw6zbAmjvQrtCGvRM+fi3/cs1sSSkd/EoRk4 +7GM9qQl/JIIoCOGncninf2NQm5YSpbit6/mOQD7EhqXsw+bX+IRh3DHC1Apv/PoA +HlDNeM4vjWaBxsmvRSndrIvew1czboFM18oRSSIqAkU7dKZ0SbC11grzmNxMG2aD +f9y8FIG6RK/SEaOZuc+uBGXx7tj7dczpE/2puqYcaVGwcv4kkrC/ZuRm +-----END CERTIFICATE----- +` + +const serverCert = ` +-----BEGIN CERTIFICATE----- +MIIC8zCCAdugAwIBAgIBATANBgkqhkiG9w0BAQUFADATMREwDwYDVQQDEwhNeVRl +c3RDQTAeFw0xNDAxMjcxOTUyMjNaFw0yNDAxMjUxOTUyMjNaMCUxEjAQBgNVBAMT +CTEyNy4wLjAuMTEPMA0GA1UEChMGc2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAxYAKbeGyg0gP0xwVsZsufzk/SUCtD44Gp3lQYQ9QumQ1IVZu +PmZWwPWrzI93a1Abruz6ZhXaB3jcL5QPAy1N44IiFgVN45CZXBsqkpJe/abzRFOV +DRnHxattPDHdgwML5d3nURKGUM/7+ACj5E4pZEDlM3RIjIKVd+doJsL7n6myO8FE +tIpt4vTz1MFp3F+ntPnHU3BZ/VZ1UjSlFWnCjT0CR0tnXsPmlIaC98HThS8x5zNB +fvvSN+Zln8RWdNLnEVHVdqYtOQ828QbCx8s1HfClGgaVoSDrzz+qQgtZFO4wW264 +2CWkNd8DSJUJ/HlPNXmbXsrRMgvGaL7YUz2yRQIDAQABo0AwPjAJBgNVHRMEAjAA +MAsGA1UdDwQEAwIFIDATBgNVHSUEDDAKBggrBgEFBQcDATAPBgNVHREECDAGhwR/ +AAABMA0GCSqGSIb3DQEBBQUAA4IBAQAE2g+wAFf9Xg5svcnb7+mfseYV16k9l5WG +onrmR3FLsbTxfbr4PZJMHrswPbi2NRk0+ETPUpcv1RP7pUB7wSEvuS1NPGcU92iP +58ycP3dYtLzmuu6BkgToZqwsCU8fC2zM0wt3+ifzPpDMffWWOioVuA3zdM9WPQYz ++Ofajd0XaZwFZS8uTI5WXgObz7Xqfmln4tF3Sq1CTyuJ44qK4p83XOKFq+L04aD0 +d0c8w3YQNUENny/vMP9mDu3FQ3SnDz2GKl1LSjGe2TUnkoMkDfdk4wSzndTz/ecb +QiCPKijwVPWNOWV3NDE2edMxDPxDoKoEm5F4UGfGjxSRnYCIoZLh +-----END CERTIFICATE----- +` + +const serverKey = ` +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAxYAKbeGyg0gP0xwVsZsufzk/SUCtD44Gp3lQYQ9QumQ1IVZu +PmZWwPWrzI93a1Abruz6ZhXaB3jcL5QPAy1N44IiFgVN45CZXBsqkpJe/abzRFOV +DRnHxattPDHdgwML5d3nURKGUM/7+ACj5E4pZEDlM3RIjIKVd+doJsL7n6myO8FE +tIpt4vTz1MFp3F+ntPnHU3BZ/VZ1UjSlFWnCjT0CR0tnXsPmlIaC98HThS8x5zNB +fvvSN+Zln8RWdNLnEVHVdqYtOQ828QbCx8s1HfClGgaVoSDrzz+qQgtZFO4wW264 +2CWkNd8DSJUJ/HlPNXmbXsrRMgvGaL7YUz2yRQIDAQABAoIBAGsyEvcPAGg3DbfE +z5WFp9gPx2TIAOanbL8rnlAAEw4H47qDgfTGcSHsdeHioKuTYGMyZrpP8/YISGJe +l0NfLJ5mfH+9Q0hXrJWMfS/u2DYOjo0wXH8u1fpZEEISwqsgVS3fonSjfFmSea1j +E5GQRvEONBkYbWQuYFgjNqmLPS2r5lKbWCQvc1MB/vvVBwOTiO0ON7m/EkM5RKt9 +cDT5ZhhVjBpdmd9HpVbKTdBj8Q0l5/ZHZUEgZA6FDZEwYxTd9l87Z4YT+5SR0z9t +k8/Z0CHd3x3Rv891t7m66ZJkaOda8NC65/432MQEQwJltmrKnc22dS8yI26rrmpp +g3tcbSUCgYEA5nMXdQKS4vF+Kp10l/HqvGz2sU8qQaWYZQIg7Th3QJPo6N52po/s +nn3UF0P5mT1laeZ5ZQJKx4gnmuPnIZ2ZtJQDyFhIbRPcZ+2hSNSuLYVcrumOC3EP +3OZyFtFE1THO73aFe5e1jEdtoOne3Bds/Hq6NF45fkVdL+M9e8pfXIsCgYEA22W8 +zGjbWyrFOYvKknMQVtHnMx8BJEtsvWRknP6CWAv/8WyeZpE128Pve1m441AQnopS +CuOF5wFK0iUXBFbS3Pe1/1j3em6yfVznuUHqJ7Qc+dNzxVvkTK8jGB6x+vm+M9Hg +muHUM726IUxckoSNXbPNAVPIZab1NdSxam7F9m8CgYEAx55QZmIJXJ41XLKxqWC7 +peZ5NpPNlbncrTpPzUzJN94ntXfmrVckbxGt401VayEctMQYyZ9XqUlOjUP3FU5Q +M3S3Zhba/eljVX8o406fZf0MkNLs4QpZ5E6V6x/xEP+pMhKng6yhbVb+JpIPIvUD +yhyBKRWplbB+DRo5Sv685gsCgYA7l5m9h+m1DJv/cnn2Z2yTuHXtC8namuYRV1iA +0ByFX9UINXGc+GpBpCnDPm6ax5+MAJQiQwSW52H0TIDA+/hQbrQvhHHL/o9av8Zt +Kns4h5KrRQUYIUqUjamhnozHV9iS6LnyN87Usv8AlmY6oehoADN53dD702qdUYVT +HH2G3wKBgCdvqyw78FR/n8cUWesTPnxx5HCeWJ1J+2BESnUnPmKZ71CV1H7uweja +vPUxuuuGLKfNx84OKCfRDbtOgMOeyh9T1RmXry6Srz/7/udjlF0qmFiRXfBNAgoR +tNb0+Ri/vY0AHrQ7UnCbl12qPVaqhEXLr+kCGNEPFqpMJPPEeMK0 +-----END RSA PRIVATE KEY----- +` + +const clientCert = ` +-----BEGIN CERTIFICATE----- +MIIC4jCCAcqgAwIBAgIBAjANBgkqhkiG9w0BAQUFADATMREwDwYDVQQDEwhNeVRl +c3RDQTAeFw0xNDAxMjcxOTUyMjNaFw0yNDAxMjUxOTUyMjNaMCUxEjAQBgNVBAMT +CTEyNy4wLjAuMTEPMA0GA1UEChMGY2xpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAu7LMqd+agoH168Bsi0WJ36ulYqDypq+GZPF7uWOo2pE0raKH +B++31/hjnkt6yC5kLKVZZ0EfolBa9q4Cy6swfGaEMafy44ZCRneLnt1azL1N6Kfz ++U0KsOqyQDoMxYJG1gVTEZN19/U/ew2eazcxKyERI3oGCQ4SbpkxBTbfxtAFk49e +xIB3obsuMVUrmtXE4FkUkvG7NgpPUgrhp0yxYpj9zruZGzGGT1zNhcarbQ/4i7It +ZMbnv6pqQWtYDgnGX2TDRcEiXGeO+KrzhfpTRLfO3K4np8e8cmTyXM+4lMlWUgma +KrRdu1QXozGqRs47u2prGKGdSQWITpqNVCY8fQIDAQABoy8wLTAJBgNVHRMEAjAA +MAsGA1UdDwQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAjANBgkqhkiG9w0BAQUF +AAOCAQEAhCuBCLznPc4O96hT3P8Fx19L3ltrWbc/pWrx8JjxUaGk8kNmjMjY+/Mt +JBbjUBx2kJwaY0EHMAfw7D1f1wcCeNycx/0dyb0E6xzhmPw5fY15GGNg8rzWwqSY ++i/1iqU0IRkmRHV7XCF+trd2H0Ec+V1Fd/61E2ccJfOL5aSAyWbMCUtWxS3QMnqH +FBfKdVEiY9WNht5hnvsXQBRaNhowJ6Cwa7/1/LZjmhcXiJ0xrc1Hggj3cvS+4vll +Ew+20a0tPKjD/v/2oSQL+qkeYKV4fhCGkaBHCpPlSJrqorb7B6NmPy3nS26ETKE/ +o2UCfZc5g2MU1ENa31kT1iuhKZapsA== +-----END CERTIFICATE----- +` + +const clientKey = ` +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAu7LMqd+agoH168Bsi0WJ36ulYqDypq+GZPF7uWOo2pE0raKH +B++31/hjnkt6yC5kLKVZZ0EfolBa9q4Cy6swfGaEMafy44ZCRneLnt1azL1N6Kfz ++U0KsOqyQDoMxYJG1gVTEZN19/U/ew2eazcxKyERI3oGCQ4SbpkxBTbfxtAFk49e +xIB3obsuMVUrmtXE4FkUkvG7NgpPUgrhp0yxYpj9zruZGzGGT1zNhcarbQ/4i7It +ZMbnv6pqQWtYDgnGX2TDRcEiXGeO+KrzhfpTRLfO3K4np8e8cmTyXM+4lMlWUgma +KrRdu1QXozGqRs47u2prGKGdSQWITpqNVCY8fQIDAQABAoIBAGSEn3hFyEAmCyYi +2b5IEksXaC2GlgxQKb/7Vs/0oCPU6YonZPsKFMFzQx4tu+ZiecEzF8rlJGTPdbdv +fw3FcuTcHeVd1QSmDO4h7UK5tnu40XVMJKsY6CXQun8M13QajYbmORNLjjypOULU +C0fNueYoAj6mhX7p61MRdSAev/5+0+bVQQG/tSVDQzdngvKpaCunOphiB2VW2Aa0 +7aYPOFCoPB2uo0DwUmBB0yfx9x4hXX9ovQI0YFou7bq6iYJ0vlZBvYQ9YrVdxjKL +avcz1N5xM3WFAkZJSVT/Ho5+uTbZx4RrJ8b5T+t2spOKmXyAjwS2rL/XMAh8YRZ1 +u44duoECgYEA4jpK2qshgQ0t49rjVHEDKX5x7ElEZefl0rHZ/2X/uHUDKpKj2fTq +3TQzHquiQ4Aof7OEB9UE3DGrtpvo/j/PYxL5Luu5VR4AIEJm+CA8GYuE96+uIL0Z +M2r3Lux6Bp30Z47Eit2KiY4fhrWs59WB3NHHoFxgzHSVbnuA02gcX2ECgYEA1GZw +iXIVYaK07ED+q/0ObyS5hD1cMhJ7ifSN9BxuG0qUpSigbkTGj09fUDS4Fqsz9dvz +F0P93fZvyia242TIfDUwJEsDQCgHk7SGa4Rx/p/3x/obIEERk7K76Hdg93U5NXhV +NvczvgL0HYxnb+qtumwMgGPzncB4lGcTnRyOfp0CgYBTIsDnYwRI/KLknUf1fCKB +WSpcfwBXwsS+jQVjygQTsUyclI8KResZp1kx6DkVPT+kzj+y8SF8GfTUgq844BJC +gnJ4P8A3+3JoaH6WqKHtcUxICZOgDF36e1CjOdwOGnX6qIipz4hdzJDhXFpSSDAV +CjKmR8x61k0j8NcC2buzgQKBgFr7eo9VwBTvpoJhIPY5UvqHB7S+uAR26FZi3H/J +wdyM6PmKWpaBfXCb9l8cBhMnyP0y94FqzY9L5fz48nSbkkmqWvHg9AaCXySFOuNJ +e68vhOszlnUNimLzOAzPPkkh/JyL7Cy8XXyyNTGHGDPXmg12BTDmH8/eR4iCUuOE +/QD9AoGBALQ/SkvfO3D5+k9e/aTHRuMJ0+PWdLUMTZ39oJQxUx+qj7/xpjDvWTBn +eDmF/wjnIAg+020oXyBYo6plEZfDz3EYJQZ+3kLLEU+O/A7VxCakPYPwCr7N/InL +Ccg/TVSIXxw/6uJnojoAjMIEU45NoP6RMp0mWYYb2OlteEv08Ovp +-----END RSA PRIVATE KEY----- +` diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/types.go b/services/templeton/vendor/src/github.com/streadway/amqp/types.go new file mode 100644 index 000000000..8071bf7cd --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/types.go @@ -0,0 +1,390 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "fmt" + "io" + "time" +) + +var ( + // Errors that this library could return/emit from a channel or connection + ErrClosed = &Error{Code: ChannelError, Reason: "channel/connection is not open"} + ErrChannelMax = &Error{Code: ChannelError, Reason: "channel id space exhausted"} + ErrSASL = &Error{Code: AccessRefused, Reason: "SASL could not negotiate a shared mechanism"} + ErrCredentials = &Error{Code: AccessRefused, Reason: "username or password not allowed"} + ErrVhost = &Error{Code: AccessRefused, Reason: "no access to this vhost"} + ErrSyntax = &Error{Code: SyntaxError, Reason: "invalid field or value inside of a frame"} + ErrFrame = &Error{Code: FrameError, Reason: "frame could not be parsed"} + ErrCommandInvalid = &Error{Code: CommandInvalid, Reason: "unexpected command received"} + ErrUnexpectedFrame = &Error{Code: UnexpectedFrame, Reason: "unexpected frame received"} + ErrFieldType = &Error{Code: SyntaxError, Reason: "unsupported table field type"} +) + +// Error captures the code and reason a channel or connection has been closed +// by the server. +type Error struct { + Code int // constant code from the specification + Reason string // description of the error + Server bool // true when initiated from the server, false when from this library + Recover bool // true when this error can be recovered by retrying later or with differnet parameters +} + +func newError(code uint16, text string) *Error { + return &Error{ + Code: int(code), + Reason: text, + Recover: isSoftExceptionCode(int(code)), + Server: true, + } +} + +func (me Error) Error() string { + return fmt.Sprintf("Exception (%d) Reason: %q", me.Code, me.Reason) +} + +// Used by header frames to capture routing and header information +type properties struct { + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + Headers Table // Application or header exchange table + DeliveryMode uint8 // queue implemention use - Transient (1) or Persistent (2) + Priority uint8 // queue implementation use - 0 to 9 + CorrelationId string // application use - correlation identifier + ReplyTo string // application use - address to to reply to (ex: RPC) + Expiration string // implementation use - message expiration spec + MessageId string // application use - message identifier + Timestamp time.Time // application use - message timestamp + Type string // application use - message type name + UserId string // application use - creating user id + AppId string // application use - creating application + reserved1 string // was cluster-id - process for buffer consumption +} + +// DeliveryMode. Transient means higher throughput but messages will not be +// restored on broker restart. The delivery mode of publishings is unrelated +// to the durability of the queues they reside on. Transient messages will +// not be restored to durable queues, persistent messages will be restored to +// durable queues and lost on non-durable queues during server restart. +// +// This remains typed as uint8 to match Publishing.DeliveryMode. Other +// delivery modes specific to custom queue implementations are not enumerated +// here. +const ( + Transient uint8 = 1 + Persistent uint8 = 2 +) + +// The property flags are an array of bits that indicate the presence or +// absence of each property value in sequence. The bits are ordered from most +// high to low - bit 15 indicates the first property. +const ( + flagContentType = 0x8000 + flagContentEncoding = 0x4000 + flagHeaders = 0x2000 + flagDeliveryMode = 0x1000 + flagPriority = 0x0800 + flagCorrelationId = 0x0400 + flagReplyTo = 0x0200 + flagExpiration = 0x0100 + flagMessageId = 0x0080 + flagTimestamp = 0x0040 + flagType = 0x0020 + flagUserId = 0x0010 + flagAppId = 0x0008 + flagReserved1 = 0x0004 +) + +// Queue captures the current server state of the queue on the server returned +// from Channel.QueueDeclare or Channel.QueueInspect. +type Queue struct { + Name string // server confirmed or generated name + Messages int // count of messages not awaiting acknowledgment + Consumers int // number of consumers receiving deliveries +} + +// Publishing captures the client message sent to the server. The fields +// outside of the Headers table included in this struct mirror the underlying +// fields in the content frame. They use native types for convenience and +// efficiency. +type Publishing struct { + // Application or exchange specific fields, + // the headers exchange will inspect this field. + Headers Table + + // Properties + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + DeliveryMode uint8 // Transient (0 or 1) or Persistent (2) + Priority uint8 // 0 to 9 + CorrelationId string // correlation identifier + ReplyTo string // address to to reply to (ex: RPC) + Expiration string // message expiration spec + MessageId string // message identifier + Timestamp time.Time // message timestamp + Type string // message type name + UserId string // creating user id - ex: "guest" + AppId string // creating application id + + // The application specific payload of the message + Body []byte +} + +// Blocking notifies the server's TCP flow control of the Connection. When a +// server hits a memory or disk alarm it will block all connections until the +// resources are reclaimed. Use NotifyBlock on the Connection to receive these +// events. +type Blocking struct { + Active bool // TCP pushback active/inactive on server + Reason string // Server reason for activation +} + +// Confirmation notifies the acknowledgment or negative acknowledgement of a +// publishing identified by its delivery tag. Use NotifyPublish on the Channel +// to consume these events. +type Confirmation struct { + DeliveryTag uint64 // A 1 based counter of publishings from when the channel was put in Confirm mode + Ack bool // True when the server succesfully received the publishing +} + +// Decimal matches the AMQP decimal type. Scale is the number of decimal +// digits Scale == 2, Value == 12345, Decimal == 123.45 +type Decimal struct { + Scale uint8 + Value int32 +} + +// Table stores user supplied fields of the following types: +// +// bool +// byte +// float32 +// float64 +// int16 +// int32 +// int64 +// nil +// string +// time.Time +// amqp.Decimal +// amqp.Table +// []byte +// []interface{} - containing above types +// +// Functions taking a table will immediately fail when the table contains a +// value of an unsupported type. +// +// The caller must be specific in which precision of integer it wishes to +// encode. +// +// Use a type assertion when reading values from a table for type converstion. +// +// RabbitMQ expects int32 for integer values. +// +type Table map[string]interface{} + +func validateField(f interface{}) error { + switch fv := f.(type) { + case nil, bool, byte, int16, int32, int64, float32, float64, string, []byte, Decimal, time.Time: + return nil + + case []interface{}: + for _, v := range fv { + if err := validateField(v); err != nil { + return fmt.Errorf("in array %s", err) + } + } + return nil + + case Table: + for k, v := range fv { + if err := validateField(v); err != nil { + return fmt.Errorf("table field %q %s", k, err) + } + } + return nil + } + + return fmt.Errorf("value %t not supported", f) +} + +func (t Table) Validate() error { + return validateField(t) +} + +// Heap interface for maintaining delivery tags +type tagSet []uint64 + +func (me tagSet) Len() int { return len(me) } +func (me tagSet) Less(i, j int) bool { return (me)[i] < (me)[j] } +func (me tagSet) Swap(i, j int) { (me)[i], (me)[j] = (me)[j], (me)[i] } +func (me *tagSet) Push(tag interface{}) { *me = append(*me, tag.(uint64)) } +func (me *tagSet) Pop() interface{} { + val := (*me)[len(*me)-1] + *me = (*me)[:len(*me)-1] + return val +} + +type message interface { + id() (uint16, uint16) + wait() bool + read(io.Reader) error + write(io.Writer) error +} + +type messageWithContent interface { + message + getContent() (properties, []byte) + setContent(properties, []byte) +} + +/* +The base interface implemented as: + +2.3.5 frame Details + +All frames consist of a header (7 octets), a payload of arbitrary size, and a 'frame-end' octet that detects +malformed frames: + + 0 1 3 7 size+7 size+8 + +------+---------+-------------+ +------------+ +-----------+ + | type | channel | size | | payload | | frame-end | + +------+---------+-------------+ +------------+ +-----------+ + octet short long size octets octet + +To read a frame, we: + + 1. Read the header and check the frame type and channel. + 2. Depending on the frame type, we read the payload and process it. + 3. Read the frame end octet. + +In realistic implementations where performance is a concern, we would use +“read-ahead buffering” or “gathering reads” to avoid doing three separate +system calls to read a frame. + +*/ +type frame interface { + write(io.Writer) error + channel() uint16 +} + +type reader struct { + r io.Reader +} + +type writer struct { + w io.Writer +} + +// Implements the frame interface for Connection RPC +type protocolHeader struct{} + +func (protocolHeader) write(w io.Writer) error { + _, err := w.Write([]byte{'A', 'M', 'Q', 'P', 0, 0, 9, 1}) + return err +} + +func (protocolHeader) channel() uint16 { + panic("only valid as initial handshake") +} + +/* +Method frames carry the high-level protocol commands (which we call "methods"). +One method frame carries one command. The method frame payload has this format: + + 0 2 4 + +----------+-----------+-------------- - - + | class-id | method-id | arguments... + +----------+-----------+-------------- - - + short short ... + +To process a method frame, we: + 1. Read the method frame payload. + 2. Unpack it into a structure. A given method always has the same structure, + so we can unpack the method rapidly. 3. Check that the method is allowed in + the current context. + 4. Check that the method arguments are valid. + 5. Execute the method. + +Method frame bodies are constructed as a list of AMQP data fields (bits, +integers, strings and string tables). The marshalling code is trivially +generated directly from the protocol specifications, and can be very rapid. +*/ +type methodFrame struct { + ChannelId uint16 + ClassId uint16 + MethodId uint16 + Method message +} + +func (me *methodFrame) channel() uint16 { return me.ChannelId } + +/* +Heartbeating is a technique designed to undo one of TCP/IP's features, namely +its ability to recover from a broken physical connection by closing only after +a quite long time-out. In some scenarios we need to know very rapidly if a +peer is disconnected or not responding for other reasons (e.g. it is looping). +Since heartbeating can be done at a low level, we implement this as a special +type of frame that peers exchange at the transport level, rather than as a +class method. +*/ +type heartbeatFrame struct { + ChannelId uint16 +} + +func (me *heartbeatFrame) channel() uint16 { return me.ChannelId } + +/* +Certain methods (such as Basic.Publish, Basic.Deliver, etc.) are formally +defined as carrying content. When a peer sends such a method frame, it always +follows it with a content header and zero or more content body frames. + +A content header frame has this format: + + 0 2 4 12 14 + +----------+--------+-----------+----------------+------------- - - + | class-id | weight | body size | property flags | property list... + +----------+--------+-----------+----------------+------------- - - + short short long long short remainder... + +We place content body in distinct frames (rather than including it in the +method) so that AMQP may support "zero copy" techniques in which content is +never marshalled or encoded. We place the content properties in their own +frame so that recipients can selectively discard contents they do not want to +process +*/ +type headerFrame struct { + ChannelId uint16 + ClassId uint16 + weight uint16 + Size uint64 + Properties properties +} + +func (me *headerFrame) channel() uint16 { return me.ChannelId } + +/* +Content is the application data we carry from client-to-client via the AMQP +server. Content is, roughly speaking, a set of properties plus a binary data +part. The set of allowed properties are defined by the Basic class, and these +form the "content header frame". The data can be any size, and MAY be broken +into several (or many) chunks, each forming a "content body frame". + +Looking at the frames for a specific channel, as they pass on the wire, we +might see something like this: + + [method] + [method] [header] [body] [body] + [method] + ... +*/ +type bodyFrame struct { + ChannelId uint16 + Body []byte +} + +func (me *bodyFrame) channel() uint16 { return me.ChannelId } diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/uri.go b/services/templeton/vendor/src/github.com/streadway/amqp/uri.go new file mode 100644 index 000000000..582464db5 --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/uri.go @@ -0,0 +1,170 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "errors" + "fmt" + "net/url" + "strconv" + "strings" +) + +var errURIScheme = errors.New("AMQP scheme must be either 'amqp://' or 'amqps://'") + +var schemePorts = map[string]int{ + "amqp": 5672, + "amqps": 5671, +} + +var defaultURI = URI{ + Scheme: "amqp", + Host: "localhost", + Port: 5672, + Username: "guest", + Password: "guest", + Vhost: "/", +} + +// URI represents a parsed AMQP URI string. +type URI struct { + Scheme string + Host string + Port int + Username string + Password string + Vhost string +} + +// ParseURI attempts to parse the given AMQP URI according to the spec. +// See http://www.rabbitmq.com/uri-spec.html. +// +// Default values for the fields are: +// +// Scheme: amqp +// Host: localhost +// Port: 5672 +// Username: guest +// Password: guest +// Vhost: / +// +func ParseURI(uri string) (URI, error) { + me := defaultURI + + u, err := url.Parse(uri) + if err != nil { + return me, err + } + + defaultPort, okScheme := schemePorts[u.Scheme] + + if okScheme { + me.Scheme = u.Scheme + } else { + return me, errURIScheme + } + + host, port := splitHostPort(u.Host) + + if host != "" { + me.Host = host + } + + if port != "" { + port32, err := strconv.ParseInt(port, 10, 32) + if err != nil { + return me, err + } + me.Port = int(port32) + } else { + me.Port = defaultPort + } + + if u.User != nil { + me.Username = u.User.Username() + if password, ok := u.User.Password(); ok { + me.Password = password + } + } + + if u.Path != "" { + if strings.HasPrefix(u.Path, "/") { + if u.Host == "" && strings.HasPrefix(u.Path, "///") { + // net/url doesn't handle local context authorities and leaves that up + // to the scheme handler. In our case, we translate amqp:/// into the + // default host and whatever the vhost should be + if len(u.Path) > 3 { + me.Vhost = u.Path[3:] + } + } else if len(u.Path) > 1 { + me.Vhost = u.Path[1:] + } + } else { + me.Vhost = u.Path + } + } + + return me, nil +} + +// Splits host:port, host, [ho:st]:port, or [ho:st]. Unlike net.SplitHostPort +// which splits :port, host:port or [host]:port +// +// Handles hosts that have colons that are in brackets like [::1]:http +func splitHostPort(addr string) (host, port string) { + i := strings.LastIndex(addr, ":") + + if i >= 0 { + host, port = addr[:i], addr[i+1:] + + if len(port) > 0 && port[len(port)-1] == ']' && addr[0] == '[' { + // we've split on an inner colon, the port was missing outside of the + // brackets so use the full addr. We could assert that host should not + // contain any colons here + host, port = addr, "" + } + } else { + host = addr + } + + return +} + +// PlainAuth returns a PlainAuth structure based on the parsed URI's +// Username and Password fields. +func (me URI) PlainAuth() *PlainAuth { + return &PlainAuth{ + Username: me.Username, + Password: me.Password, + } +} + +func (me URI) String() string { + var authority string + + if me.Username != defaultURI.Username || me.Password != defaultURI.Password { + authority += me.Username + + if me.Password != defaultURI.Password { + authority += ":" + me.Password + } + + authority += "@" + } + + authority += me.Host + + if defaultPort, found := schemePorts[me.Scheme]; !found || defaultPort != me.Port { + authority += ":" + strconv.FormatInt(int64(me.Port), 10) + } + + var vhost string + if me.Vhost != defaultURI.Vhost { + vhost = me.Vhost + } + + return fmt.Sprintf("%s://%s/%s", me.Scheme, authority, url.QueryEscape(vhost)) +} diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/uri_test.go b/services/templeton/vendor/src/github.com/streadway/amqp/uri_test.go new file mode 100644 index 000000000..5d93e0bc7 --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/uri_test.go @@ -0,0 +1,328 @@ +package amqp + +import ( + "testing" +) + +// Test matrix defined on http://www.rabbitmq.com/uri-spec.html +type testURI struct { + url string + username string + password string + host string + port int + vhost string + canon string +} + +var uriTests = []testURI{ + { + url: "amqp://user:pass@host:10000/vhost", + username: "user", + password: "pass", + host: "host", + port: 10000, + vhost: "vhost", + canon: "amqp://user:pass@host:10000/vhost", + }, + + // this fails due to net/url not parsing pct-encoding in host + // testURI{url: "amqp://user%61:%61pass@ho%61st:10000/v%2Fhost", + // username: "usera", + // password: "apass", + // host: "hoast", + // port: 10000, + // vhost: "v/host", + // }, + + { + url: "amqp://", + username: defaultURI.Username, + password: defaultURI.Password, + host: defaultURI.Host, + port: defaultURI.Port, + vhost: defaultURI.Vhost, + canon: "amqp://localhost/", + }, + + { + url: "amqp://:@/", + username: "", + password: "", + host: defaultURI.Host, + port: defaultURI.Port, + vhost: defaultURI.Vhost, + canon: "amqp://:@localhost/", + }, + + { + url: "amqp://user@", + username: "user", + password: defaultURI.Password, + host: defaultURI.Host, + port: defaultURI.Port, + vhost: defaultURI.Vhost, + canon: "amqp://user@localhost/", + }, + + { + url: "amqp://user:pass@", + username: "user", + password: "pass", + host: defaultURI.Host, + port: defaultURI.Port, + vhost: defaultURI.Vhost, + canon: "amqp://user:pass@localhost/", + }, + + { + url: "amqp://guest:pass@", + username: "guest", + password: "pass", + host: defaultURI.Host, + port: defaultURI.Port, + vhost: defaultURI.Vhost, + canon: "amqp://guest:pass@localhost/", + }, + + { + url: "amqp://host", + username: defaultURI.Username, + password: defaultURI.Password, + host: "host", + port: defaultURI.Port, + vhost: defaultURI.Vhost, + canon: "amqp://host/", + }, + + { + url: "amqp://:10000", + username: defaultURI.Username, + password: defaultURI.Password, + host: defaultURI.Host, + port: 10000, + vhost: defaultURI.Vhost, + canon: "amqp://localhost:10000/", + }, + + { + url: "amqp:///vhost", + username: defaultURI.Username, + password: defaultURI.Password, + host: defaultURI.Host, + port: defaultURI.Port, + vhost: "vhost", + canon: "amqp://localhost/vhost", + }, + + { + url: "amqp://host/", + username: defaultURI.Username, + password: defaultURI.Password, + host: "host", + port: defaultURI.Port, + vhost: defaultURI.Vhost, + canon: "amqp://host/", + }, + + { + url: "amqp://host/%2F", + username: defaultURI.Username, + password: defaultURI.Password, + host: "host", + port: defaultURI.Port, + vhost: "/", + canon: "amqp://host/", + }, + + { + url: "amqp://host/%2F%2F", + username: defaultURI.Username, + password: defaultURI.Password, + host: "host", + port: defaultURI.Port, + vhost: "//", + canon: "amqp://host/%2F%2F", + }, + + { + url: "amqp://host/%2Fslash%2F", + username: defaultURI.Username, + password: defaultURI.Password, + host: "host", + port: defaultURI.Port, + vhost: "/slash/", + canon: "amqp://host/%2Fslash%2F", + }, + + { + url: "amqp://192.168.1.1:1000/", + username: defaultURI.Username, + password: defaultURI.Password, + host: "192.168.1.1", + port: 1000, + vhost: defaultURI.Vhost, + canon: "amqp://192.168.1.1:1000/", + }, + + { + url: "amqp://[::1]", + username: defaultURI.Username, + password: defaultURI.Password, + host: "[::1]", + port: defaultURI.Port, + vhost: defaultURI.Vhost, + canon: "amqp://[::1]/", + }, + + { + url: "amqp://[::1]:1000", + username: defaultURI.Username, + password: defaultURI.Password, + host: "[::1]", + port: 1000, + vhost: defaultURI.Vhost, + canon: "amqp://[::1]:1000/", + }, + + { + url: "amqps:///", + username: defaultURI.Username, + password: defaultURI.Password, + host: defaultURI.Host, + port: schemePorts["amqps"], + vhost: defaultURI.Vhost, + canon: "amqps://localhost/", + }, + + { + url: "amqps://host:1000/", + username: defaultURI.Username, + password: defaultURI.Password, + host: "host", + port: 1000, + vhost: defaultURI.Vhost, + canon: "amqps://host:1000/", + }, +} + +func TestURISpec(t *testing.T) { + for _, test := range uriTests { + u, err := ParseURI(test.url) + if err != nil { + t.Fatal("Could not parse spec URI: ", test.url, " err: ", err) + } + + if test.username != u.Username { + t.Error("For: ", test.url, " usernames do not match. want: ", test.username, " got: ", u.Username) + } + + if test.password != u.Password { + t.Error("For: ", test.url, " passwords do not match. want: ", test.password, " got: ", u.Password) + } + + if test.host != u.Host { + t.Error("For: ", test.url, " hosts do not match. want: ", test.host, " got: ", u.Host) + } + + if test.port != u.Port { + t.Error("For: ", test.url, " ports do not match. want: ", test.port, " got: ", u.Port) + } + + if test.vhost != u.Vhost { + t.Error("For: ", test.url, " vhosts do not match. want: ", test.vhost, " got: ", u.Vhost) + } + + if test.canon != u.String() { + t.Error("For: ", test.url, " canonical string does not match. want: ", test.canon, " got: ", u.String()) + } + } +} + +func TestURIUnknownScheme(t *testing.T) { + if _, err := ParseURI("http://example.com/"); err == nil { + t.Fatal("Expected error when parsing non-amqp scheme") + } +} + +func TestURIScheme(t *testing.T) { + if _, err := ParseURI("amqp://example.com/"); err != nil { + t.Fatalf("Expected to parse amqp scheme, got %v", err) + } + + if _, err := ParseURI("amqps://example.com/"); err != nil { + t.Fatalf("Expected to parse amqps scheme, got %v", err) + } +} + +func TestURIDefaults(t *testing.T) { + url := "amqp://" + uri, err := ParseURI(url) + if err != nil { + t.Fatal("Could not parse") + } + + if uri.String() != "amqp://localhost/" { + t.Fatal("Defaults not encoded properly got:", uri.String()) + } +} + +func TestURIComplete(t *testing.T) { + url := "amqp://bob:dobbs@foo.bar:5678/private" + uri, err := ParseURI(url) + if err != nil { + t.Fatal("Could not parse") + } + + if uri.String() != url { + t.Fatal("Defaults not encoded properly want:", url, " got:", uri.String()) + } +} + +func TestURIDefaultPortAmqpNotIncluded(t *testing.T) { + url := "amqp://foo.bar:5672/" + uri, err := ParseURI(url) + if err != nil { + t.Fatal("Could not parse") + } + + if uri.String() != "amqp://foo.bar/" { + t.Fatal("Defaults not encoded properly got:", uri.String()) + } +} + +func TestURIDefaultPortAmqp(t *testing.T) { + url := "amqp://foo.bar/" + uri, err := ParseURI(url) + if err != nil { + t.Fatal("Could not parse") + } + + if uri.Port != 5672 { + t.Fatal("Default port not correct for amqp, got:", uri.Port) + } +} + +func TestURIDefaultPortAmqpsNotIncludedInString(t *testing.T) { + url := "amqps://foo.bar:5671/" + uri, err := ParseURI(url) + if err != nil { + t.Fatal("Could not parse") + } + + if uri.String() != "amqps://foo.bar/" { + t.Fatal("Defaults not encoded properly got:", uri.String()) + } +} + +func TestURIDefaultPortAmqps(t *testing.T) { + url := "amqps://foo.bar/" + uri, err := ParseURI(url) + if err != nil { + t.Fatal("Could not parse") + } + + if uri.Port != 5671 { + t.Fatal("Default port not correct for amqps, got:", uri.Port) + } +} diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/write.go b/services/templeton/vendor/src/github.com/streadway/amqp/write.go new file mode 100644 index 000000000..d392ca237 --- /dev/null +++ b/services/templeton/vendor/src/github.com/streadway/amqp/write.go @@ -0,0 +1,411 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "bufio" + "bytes" + "encoding/binary" + "errors" + "io" + "math" + "time" +) + +func (me *writer) WriteFrame(frame frame) (err error) { + if err = frame.write(me.w); err != nil { + return + } + + if buf, ok := me.w.(*bufio.Writer); ok { + err = buf.Flush() + } + + return +} + +func (me *methodFrame) write(w io.Writer) (err error) { + var payload bytes.Buffer + + if me.Method == nil { + return errors.New("malformed frame: missing method") + } + + class, method := me.Method.id() + + if err = binary.Write(&payload, binary.BigEndian, class); err != nil { + return + } + + if err = binary.Write(&payload, binary.BigEndian, method); err != nil { + return + } + + if err = me.Method.write(&payload); err != nil { + return + } + + return writeFrame(w, frameMethod, me.ChannelId, payload.Bytes()) +} + +// Heartbeat +// +// Payload is empty +func (me *heartbeatFrame) write(w io.Writer) (err error) { + return writeFrame(w, frameHeartbeat, me.ChannelId, []byte{}) +} + +// CONTENT HEADER +// 0 2 4 12 14 +// +----------+--------+-----------+----------------+------------- - - +// | class-id | weight | body size | property flags | property list... +// +----------+--------+-----------+----------------+------------- - - +// short short long long short remainder... +// +func (me *headerFrame) write(w io.Writer) (err error) { + var payload bytes.Buffer + var zeroTime time.Time + + if err = binary.Write(&payload, binary.BigEndian, me.ClassId); err != nil { + return + } + + if err = binary.Write(&payload, binary.BigEndian, me.weight); err != nil { + return + } + + if err = binary.Write(&payload, binary.BigEndian, me.Size); err != nil { + return + } + + // First pass will build the mask to be serialized, second pass will serialize + // each of the fields that appear in the mask. + + var mask uint16 + + if len(me.Properties.ContentType) > 0 { + mask = mask | flagContentType + } + if len(me.Properties.ContentEncoding) > 0 { + mask = mask | flagContentEncoding + } + if me.Properties.Headers != nil && len(me.Properties.Headers) > 0 { + mask = mask | flagHeaders + } + if me.Properties.DeliveryMode > 0 { + mask = mask | flagDeliveryMode + } + if me.Properties.Priority > 0 { + mask = mask | flagPriority + } + if len(me.Properties.CorrelationId) > 0 { + mask = mask | flagCorrelationId + } + if len(me.Properties.ReplyTo) > 0 { + mask = mask | flagReplyTo + } + if len(me.Properties.Expiration) > 0 { + mask = mask | flagExpiration + } + if len(me.Properties.MessageId) > 0 { + mask = mask | flagMessageId + } + if me.Properties.Timestamp != zeroTime { + mask = mask | flagTimestamp + } + if len(me.Properties.Type) > 0 { + mask = mask | flagType + } + if len(me.Properties.UserId) > 0 { + mask = mask | flagUserId + } + if len(me.Properties.AppId) > 0 { + mask = mask | flagAppId + } + + if err = binary.Write(&payload, binary.BigEndian, mask); err != nil { + return + } + + if hasProperty(mask, flagContentType) { + if err = writeShortstr(&payload, me.Properties.ContentType); err != nil { + return + } + } + if hasProperty(mask, flagContentEncoding) { + if err = writeShortstr(&payload, me.Properties.ContentEncoding); err != nil { + return + } + } + if hasProperty(mask, flagHeaders) { + if err = writeTable(&payload, me.Properties.Headers); err != nil { + return + } + } + if hasProperty(mask, flagDeliveryMode) { + if err = binary.Write(&payload, binary.BigEndian, me.Properties.DeliveryMode); err != nil { + return + } + } + if hasProperty(mask, flagPriority) { + if err = binary.Write(&payload, binary.BigEndian, me.Properties.Priority); err != nil { + return + } + } + if hasProperty(mask, flagCorrelationId) { + if err = writeShortstr(&payload, me.Properties.CorrelationId); err != nil { + return + } + } + if hasProperty(mask, flagReplyTo) { + if err = writeShortstr(&payload, me.Properties.ReplyTo); err != nil { + return + } + } + if hasProperty(mask, flagExpiration) { + if err = writeShortstr(&payload, me.Properties.Expiration); err != nil { + return + } + } + if hasProperty(mask, flagMessageId) { + if err = writeShortstr(&payload, me.Properties.MessageId); err != nil { + return + } + } + if hasProperty(mask, flagTimestamp) { + if err = binary.Write(&payload, binary.BigEndian, uint64(me.Properties.Timestamp.Unix())); err != nil { + return + } + } + if hasProperty(mask, flagType) { + if err = writeShortstr(&payload, me.Properties.Type); err != nil { + return + } + } + if hasProperty(mask, flagUserId) { + if err = writeShortstr(&payload, me.Properties.UserId); err != nil { + return + } + } + if hasProperty(mask, flagAppId) { + if err = writeShortstr(&payload, me.Properties.AppId); err != nil { + return + } + } + + return writeFrame(w, frameHeader, me.ChannelId, payload.Bytes()) +} + +// Body +// +// Payload is one byterange from the full body who's size is declared in the +// Header frame +func (me *bodyFrame) write(w io.Writer) (err error) { + return writeFrame(w, frameBody, me.ChannelId, me.Body) +} + +func writeFrame(w io.Writer, typ uint8, channel uint16, payload []byte) (err error) { + end := []byte{frameEnd} + size := uint(len(payload)) + + _, err = w.Write([]byte{ + byte(typ), + byte((channel & 0xff00) >> 8), + byte((channel & 0x00ff) >> 0), + byte((size & 0xff000000) >> 24), + byte((size & 0x00ff0000) >> 16), + byte((size & 0x0000ff00) >> 8), + byte((size & 0x000000ff) >> 0), + }) + + if err != nil { + return + } + + if _, err = w.Write(payload); err != nil { + return + } + + if _, err = w.Write(end); err != nil { + return + } + + return +} + +func writeShortstr(w io.Writer, s string) (err error) { + b := []byte(s) + + var length uint8 = uint8(len(b)) + + if err = binary.Write(w, binary.BigEndian, length); err != nil { + return + } + + if _, err = w.Write(b[:length]); err != nil { + return + } + + return +} + +func writeLongstr(w io.Writer, s string) (err error) { + b := []byte(s) + + var length uint32 = uint32(len(b)) + + if err = binary.Write(w, binary.BigEndian, length); err != nil { + return + } + + if _, err = w.Write(b[:length]); err != nil { + return + } + + return +} + +/* +'A': []interface{} +'D': Decimal +'F': Table +'I': int32 +'S': string +'T': time.Time +'V': nil +'b': byte +'d': float64 +'f': float32 +'l': int64 +'s': int16 +'t': bool +'x': []byte +*/ +func writeField(w io.Writer, value interface{}) (err error) { + var buf [9]byte + var enc []byte + + switch v := value.(type) { + case bool: + buf[0] = 't' + if v { + buf[1] = byte(1) + } else { + buf[1] = byte(0) + } + enc = buf[:2] + + case byte: + buf[0] = 'b' + buf[1] = byte(v) + enc = buf[:2] + + case int16: + buf[0] = 's' + binary.BigEndian.PutUint16(buf[1:3], uint16(v)) + enc = buf[:3] + + case int32: + buf[0] = 'I' + binary.BigEndian.PutUint32(buf[1:5], uint32(v)) + enc = buf[:5] + + case int64: + buf[0] = 'l' + binary.BigEndian.PutUint64(buf[1:9], uint64(v)) + enc = buf[:9] + + case float32: + buf[0] = 'f' + binary.BigEndian.PutUint32(buf[1:5], math.Float32bits(v)) + enc = buf[:5] + + case float64: + buf[0] = 'd' + binary.BigEndian.PutUint64(buf[1:9], math.Float64bits(v)) + enc = buf[:9] + + case Decimal: + buf[0] = 'D' + buf[1] = byte(v.Scale) + binary.BigEndian.PutUint32(buf[2:6], uint32(v.Value)) + enc = buf[:6] + + case string: + buf[0] = 'S' + binary.BigEndian.PutUint32(buf[1:5], uint32(len(v))) + enc = append(buf[:5], []byte(v)...) + + case []interface{}: // field-array + buf[0] = 'A' + + sec := new(bytes.Buffer) + for _, val := range v { + if err = writeField(sec, val); err != nil { + return + } + } + + binary.BigEndian.PutUint32(buf[1:5], uint32(sec.Len())) + if _, err = w.Write(buf[:5]); err != nil { + return + } + + if _, err = w.Write(sec.Bytes()); err != nil { + return + } + + return + + case time.Time: + buf[0] = 'T' + binary.BigEndian.PutUint64(buf[1:9], uint64(v.Unix())) + enc = buf[:9] + + case Table: + if _, err = w.Write([]byte{'F'}); err != nil { + return + } + return writeTable(w, v) + + case []byte: + buf[0] = 'x' + binary.BigEndian.PutUint32(buf[1:5], uint32(len(v))) + if _, err = w.Write(buf[0:5]); err != nil { + return + } + if _, err = w.Write(v); err != nil { + return + } + return + + case nil: + buf[0] = 'V' + enc = buf[:1] + + default: + return ErrFieldType + } + + _, err = w.Write(enc) + + return +} + +func writeTable(w io.Writer, table Table) (err error) { + var buf bytes.Buffer + + for key, val := range table { + if err = writeShortstr(&buf, key); err != nil { + return + } + if err = writeField(&buf, val); err != nil { + return + } + } + + return writeLongstr(w, string(buf.Bytes())) +} From 57d7ff2930a2a6fea0dfbfd71287cbd18910c2cd Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Tue, 1 Mar 2016 17:43:43 -0700 Subject: [PATCH 098/183] Modify indexing to put into multiple metadata types in the index, one per target_type. --- .../templeton/elasticsearch/elasticsearch.go | 49 ++++++++++++------- 1 file changed, 32 insertions(+), 17 deletions(-) diff --git a/services/templeton/src/templeton/elasticsearch/elasticsearch.go b/services/templeton/src/templeton/elasticsearch/elasticsearch.go index 1598d3de5..1a6232a4f 100644 --- a/services/templeton/src/templeton/elasticsearch/elasticsearch.go +++ b/services/templeton/src/templeton/elasticsearch/elasticsearch.go @@ -1,6 +1,7 @@ package elasticsearch import ( + "fmt" "logcabin" "gopkg.in/olivere/elastic.v3" @@ -68,46 +69,59 @@ func (b *BulkIndexer) Flush() error { return nil } -// PurgeIndex walks an index querying a database, deleting those which should not exist -func (e *Elasticer) PurgeIndex(d *database.Databaser) { - indexer := e.NewBulkIndexer(1000) - defer indexer.Flush() - - scanner, err := e.es.Scan(e.index).Type("metadata").Scroll("1m").Fields("_id").Do() +func (e *Elasticer) PurgeType(d *database.Databaser, indexer *BulkIndexer, t string) error { + scanner, err := e.es.Scan(e.index).Type(t).Scroll("1m").Fields("_id").Do() if err != nil { - logger.Fatal(err) - return + return err } for { docs, err := scanner.Next() if err == elastic.EOS { - logger.Print("Finished all rows for purge.") + logger.Printf("Finished all rows for purge of %s.", t) break } if err != nil { - logger.Print(err) - break + return err } if docs.TotalHits() > 0 { for _, hit := range docs.Hits.Hits { avus, err := d.GetObjectAVUs(hit.Id) if err != nil { - logger.Printf("Error processing %s: %s", hit.Id, err) + logger.Printf("Error processing %s/%s: %s", t, hit.Id, err) continue } if len(avus) == 0 { - logger.Printf("Deleting %s", hit.Id) - req := elastic.NewBulkDeleteRequest().Index(e.index).Type("metadata").Id(hit.Id) + logger.Printf("Deleting %s/%s", t, hit.Id) + req := elastic.NewBulkDeleteRequest().Index(e.index).Type(t).Routing(hit.Id).Id(hit.Id) err = indexer.Add(req) if err != nil { - logger.Printf("Error enqueuing delete of %s: %s", hit.Id, err) + logger.Printf("Error enqueuing delete of %s/%s: %s", t, hit.Id, err) } } } } } + return nil +} + +// PurgeIndex walks an index querying a database, deleting those which should not exist +func (e *Elasticer) PurgeIndex(d *database.Databaser) { + indexer := e.NewBulkIndexer(1000) + defer indexer.Flush() + + err := e.PurgeType(d, indexer, "file_metadata") + if err != nil { + logger.Fatal(err) + return + } + + err = e.PurgeType(d, indexer, "folder_metadata") + if err != nil { + logger.Fatal(err) + return + } } // IndexEverything creates a bulk indexer and takes a database, and iterates to index its contents @@ -137,9 +151,10 @@ func (e *Elasticer) IndexEverything(d *database.Databaser) { logger.Print(err) break } - logger.Printf("Indexing %s", formatted.ID) + indexed_type := fmt.Sprintf("%s_metadata", ids[0].TargetType) + logger.Printf("Indexing %s/%s", indexed_type, formatted.ID) - req := elastic.NewBulkIndexRequest().Index(e.index).Type("metadata").Id(formatted.ID).Doc(formatted) + req := elastic.NewBulkIndexRequest().Index(e.index).Type(indexed_type).Parent(formatted.ID).Id(formatted.ID).Doc(formatted) err = indexer.Add(req) if err != nil { logger.Print(err) From 67ff2ff4f0e26d1b934e92cefb48fe3e7423ef0b Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Thu, 3 Mar 2016 14:21:19 -0700 Subject: [PATCH 099/183] CORE-7496: Add --periodic mode to templeton --- services/templeton/src/messaging/amqp.go | 45 +++++++++++++++--------- services/templeton/src/templeton/main.go | 32 +++++++++++++++-- 2 files changed, 59 insertions(+), 18 deletions(-) diff --git a/services/templeton/src/messaging/amqp.go b/services/templeton/src/messaging/amqp.go index 504a1898e..a45dd1985 100644 --- a/services/templeton/src/messaging/amqp.go +++ b/services/templeton/src/messaging/amqp.go @@ -11,6 +11,17 @@ import ( var logger = logcabin.New() +var ( + //ReindexExchange is the name of the exchange that full-reindex info is passed around on. + ReindexExchange = "de" + + //ReindexAllKey is the routing/binding key for full reindex messages. + ReindexAllKey = "index.all" + + //ReindexTemplatesKey is the routing/binding key for templates reindex messages. + ReindexTemplatesKey = "index.templates" +) + // MessageHandler defines a type for amqp.Delivery handlers. type MessageHandler func(amqp.Delivery) @@ -20,10 +31,11 @@ type aggregationMessage struct { } type consumer struct { - exchange string - queue string - key string - handler MessageHandler + exchange string + exchangeType string + queue string + key string + handler MessageHandler } type consumeradder struct { @@ -134,12 +146,13 @@ func (c *Client) Close() { // each time the client is set up. Note that this just adds the consumers to a // list, it doesn't actually start handling messages yet. You need to call // Listen() for that. -func (c *Client) AddConsumer(exchange, queue, key string, handler MessageHandler) { +func (c *Client) AddConsumer(exchange, exchangeType, queue, key string, handler MessageHandler) { cs := consumer{ - exchange: exchange, - queue: queue, - key: key, - handler: handler, + exchange: exchange, + exchangeType: exchangeType, + queue: queue, + key: key, + handler: handler, } adder := consumeradder{ consumer: cs, @@ -155,13 +168,13 @@ func (c *Client) initconsumer(cs *consumer) error { return err } err = channel.ExchangeDeclare( - cs.exchange, //name - "topic", //kind - true, //durable - false, //auto-delete - false, //internal - false, //no-wait - nil, //args + cs.exchange, //name + cs.exchangeType, //kind + true, //durable + false, //auto-delete + false, //internal + false, //no-wait + nil, //args ) _, err = channel.QueueDeclare( cs.queue, diff --git a/services/templeton/src/templeton/main.go b/services/templeton/src/templeton/main.go index 737d39e4d..93fc31afe 100644 --- a/services/templeton/src/templeton/main.go +++ b/services/templeton/src/templeton/main.go @@ -3,6 +3,7 @@ package main import ( "flag" "logcabin" + "messaging" "configurate" "fmt" @@ -10,6 +11,8 @@ import ( "templeton/database" "templeton/elasticsearch" + + "github.com/streadway/amqp" ) var ( @@ -133,11 +136,36 @@ func main() { loadAMQPConfig() + client, err := messaging.NewClient(amqpURI, true) + if err != nil { + logger.Fatal(err) + } + defer client.Close() + if *mode == "periodic" { logger.Println("Periodic indexing mode selected.") - // TODO: AMQP listener triggering same steps as full mode - return + go client.Listen() + + // Accept and handle messages sent out with the index.all and index.templates routing keys + client.AddConsumer(messaging.ReindexExchange, "direct", "templeton.reindexAll", messaging.ReindexAllKey, func(del amqp.Delivery) { + es.Reindex(d) + del.Ack(false) + }) + client.AddConsumer(messaging.ReindexExchange, "direct", "templeton.reindexTemplates", messaging.ReindexTemplatesKey, func(del amqp.Delivery) { + es.Reindex(d) + del.Ack(false) + }) + + // spinner in order to keep the program running since client.Listen() is in a goroutine. + spinner := make(chan int) + for { + select { + case <-spinner: + fmt.Println("Exiting") + break + } + } } if *mode == "incremental" { From 5c0092e6e8729943a4c825eeb7fe8fc71daee733 Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Thu, 3 Mar 2016 16:55:17 -0700 Subject: [PATCH 100/183] CORE-7445 removing debug statements. --- .../views/dialogs/SimpleFileUploadDialog.java | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/diskResource/client/views/dialogs/SimpleFileUploadDialog.java b/ui/de-lib/src/main/java/org/iplantc/de/diskResource/client/views/dialogs/SimpleFileUploadDialog.java index 022818ad0..97c75ae37 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/diskResource/client/views/dialogs/SimpleFileUploadDialog.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/diskResource/client/views/dialogs/SimpleFileUploadDialog.java @@ -55,8 +55,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; -import java.util.logging.Level; -import java.util.logging.Logger; /** * @author jstroot @@ -117,8 +115,6 @@ interface SimpleFileUploadPanelUiBinder extends UiBinder"); - public SimpleFileUploadDialog(final HasPath uploadDest, final DiskResourceServiceFacade drService, final EventBus eventBus, @@ -255,8 +251,6 @@ void onSubmitComplete(SubmitCompleteEvent event) { IPCFileUploadField field = fufList.get(formList.indexOf(event.getSource())); String results2 = event.getResults(); - GWT.log("upload result->" + results2); - LOG.log(Level.SEVERE, "\nUpload result -->" + results2 + "<----\n"); if (Strings.isNullOrEmpty(results2)) { IplantAnnouncer.getInstance() .schedule(new ErrorAnnouncementConfig(appearance.fileUploadsFailed(Lists.newArrayList( @@ -265,23 +259,18 @@ void onSubmitComplete(SubmitCompleteEvent event) { String results = Format.stripTags(results2); Splittable split = StringQuoter.split(results); - LOG.log(Level.SEVERE, "\nUpload split -->" + results + "<---\n"); - if (split == null) { - LOG.log(Level.SEVERE, "\n--->Upload split null-->\n"); IplantAnnouncer.getInstance() .schedule(new ErrorAnnouncementConfig(appearance.fileUploadsFailed(Lists.newArrayList( field.getValue())))); } else { if (split.isUndefined("file") || (split.get("file") == null)) { - LOG.log(Level.SEVERE, "\n--->Upload split file empty-->\n"); field.markInvalid(appearance.fileUploadsFailed(Lists.newArrayList(field.getValue()))); IplantAnnouncer.getInstance() .schedule(new ErrorAnnouncementConfig(appearance.fileUploadsFailed( Lists.newArrayList(field.getValue())))); } else { - LOG.log(Level.SEVERE, "\n--->Upload split not empty -->\n"); - eventBus.fireEvent(new FileUploadedEvent(uploadDest, field.getValue(), results)); + eventBus.fireEvent(new FileUploadedEvent(uploadDest, field.getValue(), results)); } } } @@ -379,10 +368,8 @@ public void onSubmit(SubmitEvent event) { }); try { form.submit(); - LOG.log(Level.SEVERE, "\nUpload submitted!\n"); } catch(Exception e ) { - GWT.log("\nxpcetion on submit\n" + e.getMessage()); - LOG.log(Level.SEVERE, "\nUpload exception!\n"); + GWT.log("\nexception on submit\n" + e.getMessage()); IplantAnnouncer.getInstance() .schedule(new ErrorAnnouncementConfig(appearance.fileUploadsFailed( Lists.newArrayList(field.getValue())))); From 13e22aa177c14b0521eabe763efb95d813dfc8d0 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Thu, 3 Mar 2016 18:26:20 -0700 Subject: [PATCH 101/183] String is already parsed, don't reparse it. --- services/dewey/src/dewey/curation.clj | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/dewey/src/dewey/curation.clj b/services/dewey/src/dewey/curation.clj index 803e0b553..8039c597a 100644 --- a/services/dewey/src/dewey/curation.clj +++ b/services/dewey/src/dewey/curation.clj @@ -309,7 +309,7 @@ Throws: It throws any exception perculating up from below." [irods-cfg es routing-key msg] - (log/info (format "[curation/consume-msg] [%s] [%s]" routing-key (String. msg "UTF-8"))) + (log/info (format "[curation/consume-msg] [%s] [%s]" routing-key msg)) (if-let [consume (resolve-consumer routing-key)] (try+ (irods/with-jargon irods-cfg [irods] From b9ec9e9e52e387417f59294f378a9cbbf9799ba6 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Thu, 3 Mar 2016 18:35:35 -0700 Subject: [PATCH 102/183] Require ring.adapter.jetty before trying to use it in dewey. --- services/dewey/src/dewey/status.clj | 1 + 1 file changed, 1 insertion(+) diff --git a/services/dewey/src/dewey/status.clj b/services/dewey/src/dewey/status.clj index ce3f6003a..c45d3b860 100644 --- a/services/dewey/src/dewey/status.clj +++ b/services/dewey/src/dewey/status.clj @@ -19,4 +19,5 @@ (defn start-jetty [listen-port] + (require 'ring.adapter.jetty) ((eval 'ring.adapter.jetty/run-jetty) #'dewey-handler {:port listen-port})) From 6d37c9417ca144b9e18db7d2e8d36ab16daedc24 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Thu, 3 Mar 2016 19:27:05 -0700 Subject: [PATCH 103/183] terrain tags search: zone-qualify the user for filter-user-tags, as it expects --- services/terrain/src/terrain/services/search.clj | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/terrain/src/terrain/services/search.clj b/services/terrain/src/terrain/services/search.clj index 82a206514..9eee538f2 100644 --- a/services/terrain/src/terrain/services/search.clj +++ b/services/terrain/src/terrain/services/search.clj @@ -193,7 +193,7 @@ [user tags-str] (try+ (if tags-str - (search/filter-user-tags user (map #(UUID/fromString %) (string/split tags-str #","))) + (search/filter-user-tags (str user "#iplant") (map #(UUID/fromString %) (string/split tags-str #","))) []) (catch Throwable _ (throw+ {:type :invalid-argument From 18727902b550652efb4cb80f97b0c411c51bd87a Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Thu, 3 Mar 2016 19:31:28 -0700 Subject: [PATCH 104/183] Use qualify-name rather than hardcoding the zone name. --- services/terrain/src/terrain/services/search.clj | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/services/terrain/src/terrain/services/search.clj b/services/terrain/src/terrain/services/search.clj index 9eee538f2..1d6a3fba6 100644 --- a/services/terrain/src/terrain/services/search.clj +++ b/services/terrain/src/terrain/services/search.clj @@ -189,11 +189,18 @@ :val query-str})))) + + +(defn- qualify-name + "Qualifies a user or group name with the default zone." + [name] + (str name \# (cfg/irods-zone))) + (defn- extract-tags [user tags-str] (try+ (if tags-str - (search/filter-user-tags (str user "#iplant") (map #(UUID/fromString %) (string/split tags-str #","))) + (search/filter-user-tags (qualify-name user) (map #(UUID/fromString %) (string/split tags-str #","))) []) (catch Throwable _ (throw+ {:type :invalid-argument @@ -201,13 +208,6 @@ :arg "tags" :val tags-str})))) - -(defn- qualify-name - "Qualifies a user or group name with the default zone." - [name] - (str name \# (cfg/irods-zone))) - - (defn add-timing [result start] (let [curr-time (l/local-now) From a0ab28268275367283d3da8de789fd1fc1d8b4db Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Thu, 3 Mar 2016 19:52:01 -0700 Subject: [PATCH 105/183] Switch query/filtered to query/bool as recommended by elasticsearch on deprecation notice of 'filtered' clause. --- services/terrain/src/terrain/persistence/search.clj | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/services/terrain/src/terrain/persistence/search.clj b/services/terrain/src/terrain/persistence/search.clj index e5a00cea3..44b006046 100644 --- a/services/terrain/src/terrain/persistence/search.clj +++ b/services/terrain/src/terrain/persistence/search.clj @@ -103,7 +103,7 @@ It returns the subset of the given tags that are owned by the given user." [^String user ^ISeq tag-ids] (try+ - (let [query (query/filtered :query (query/term :id tag-ids) :filter (query/term :creator user)) + (let [query (query/bool :must (query/term :id tag-ids) :filter (query/term :creator user)) hits (resp/hits-from (doc/search (connect) "data" "tag" :query query :_source false))] (map :_id hits)) (catch [:status 404] {:keys []} @@ -141,7 +141,7 @@ Returns: It returns the elastisch formatted query filtered for tags and user access." [^IPersistentMap query ^ISeq in-folders ^ISeq tags ^ISeq memberships] - (query/filtered :query query :filter (mk-filter in-folders tags memberships))) + (query/bool :must query :filter (mk-filter in-folders tags memberships))) (defn ^IPersistentMap mk-data-tags-filter @@ -158,7 +158,7 @@ Returns: It returns the elastisch formatted filter for tags and user access." [^ISeq in-folders ^ISeq tags ^ISeq memberships] - (query/filtered :filter (mk-filter in-folders tags memberships))) + (query/bool :filter (mk-filter in-folders tags memberships))) (defn- format-response From 59a32dc65dd759a655de2afdfbbd2e45686280fe Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Thu, 3 Mar 2016 20:09:40 -0700 Subject: [PATCH 106/183] [terms] query does not support [cache] within lookup element --- services/terrain/src/terrain/persistence/search.clj | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/services/terrain/src/terrain/persistence/search.clj b/services/terrain/src/terrain/persistence/search.clj index 44b006046..399008738 100644 --- a/services/terrain/src/terrain/persistence/search.clj +++ b/services/terrain/src/terrain/persistence/search.clj @@ -117,8 +117,7 @@ (str path \/)))) filter-tag (fn [tag] (query/term :id {:type "tag" :id tag - :path "targets.id" - :cache false})) + :path "targets.id"})) perm-filter (query/nested :path "userPermissions" :filter (query/term "userPermissions.user" memberships))] (query/bool :must (query/bool :must perm-filter :should (map filter-path in-folders)) From c5da5fb939050c5ad51abd9c906b6e9e45d9a206 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Fri, 4 Mar 2016 13:58:01 -0700 Subject: [PATCH 107/183] Make dewey's queue declaration not auto-delete. Durability means very little without this, since it would only be durable in the case of being created and the broker restarted before anything connected to it. --- services/dewey/src/dewey/amq.clj | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/dewey/src/dewey/amq.clj b/services/dewey/src/dewey/amq.clj index aa8ea9798..82eb68c8f 100644 --- a/services/dewey/src/dewey/amq.clj +++ b/services/dewey/src/dewey/amq.clj @@ -41,7 +41,7 @@ delivery-fn)))] (lb/qos channel qos) (le/topic channel exchange-name :durable exchange-durable :auto-delete exchange-auto-delete) - (lq/declare channel queue :durable true) + (lq/declare channel queue :durable true :auto-delete false :exclusive false) (doseq [topic topics] (lq/bind channel queue exchange-name :routing-key topic)) (lb/consume channel queue consumer :auto-ack false))) From b9503061ad9258856d0d8df338bf3c192b5d72c5 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Thu, 3 Mar 2016 19:10:14 -0700 Subject: [PATCH 108/183] CORE-7546: register newly submitted analyses in Grouper and give the submitter ownership permission --- .../apps/src/apps/clients/iplant_groups.clj | 33 +++++++++++++++---- services/apps/src/apps/service/apps/jobs.clj | 6 +++- 2 files changed, 32 insertions(+), 7 deletions(-) diff --git a/services/apps/src/apps/clients/iplant_groups.clj b/services/apps/src/apps/clients/iplant_groups.clj index d4d6a5cc0..83e727069 100644 --- a/services/apps/src/apps/clients/iplant_groups.clj +++ b/services/apps/src/apps/clients/iplant_groups.clj @@ -15,18 +15,32 @@ (def grouper-user-group-fmt "iplant:de:%s:users:de-users") (def grouper-app-permission-def-fmt "iplant:de:%s:apps:app-permission-def") (def grouper-app-resource-name-fmt "iplant:de:%s:apps:%s") +(def grouper-analysis-permission-def-fmt "iplant:de:%s:analyses:analysis-permission-def") +(def grouper-analysis-resource-name-fmt "iplant:de:%s:analyses:%s") (defn- grouper-user-group [] (format grouper-user-group-fmt (config/env-name))) -(defn- grouper-app-permission-def - [] - (format grouper-app-permission-def-fmt (config/env-name))) +(defn- grouper-permission-def + [fmt] + (format fmt (config/env-name))) -(defn- grouper-app-resource-name - [app-id] - (format grouper-app-resource-name-fmt (config/env-name) app-id)) +(def ^:private grouper-app-permission-def + (partial grouper-permission-def grouper-app-permission-def-fmt)) + +(def ^:private grouper-analysis-permission-def + (partial grouper-permission-def grouper-analysis-permission-def-fmt)) + +(defn- grouper-resource-name + [fmt id] + (format fmt (config/env-name) id)) + +(def ^:private grouper-app-resource-name + (partial grouper-resource-name grouper-app-resource-name-fmt)) + +(def ^:private grouper-analysis-resource-name + (partial grouper-resource-name grouper-analysis-resource-name-fmt)) (defn- grouper-url [& components] @@ -206,3 +220,10 @@ (let [reason (get-error-reason body status)] (log/error (str "unable to unshare " app-id " with " subject-id ": " reason))) "the app unsharing request failed"))) + +(defn register-analysis + "Registers a new analysis in Grouper." + [user analysis-id] + (let [analysis-resource-name (grouper-analysis-resource-name analysis-id)] + (create-resource analysis-resource-name (grouper-analysis-permission-def)) + (grant-role-user-permission user (grouper-user-group) analysis-resource-name "own"))) diff --git a/services/apps/src/apps/service/apps/jobs.clj b/services/apps/src/apps/service/apps/jobs.clj index 4622fd2e3..571351ed6 100644 --- a/services/apps/src/apps/service/apps/jobs.clj +++ b/services/apps/src/apps/service/apps/jobs.clj @@ -4,6 +4,7 @@ (:require [clojure.tools.logging :as log] [clojure.string :as string] [kameleon.db :as db] + [apps.clients.iplant-groups :as iplant-groups] [apps.clients.notifications :as cn] [apps.persistence.jobs :as jp] [apps.service.apps.job-listings :as listings] @@ -182,4 +183,7 @@ (defn submit [apps-client user submission] - (transaction (submissions/submit apps-client user submission))) + (transaction + (let [job-info (submissions/submit apps-client user submission)] + (iplant-groups/register-analysis (:shortUsername user) (:id job-info)) + job-info))) From c58332859f02c03f543f893e696e6ae982db7abc Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Mon, 7 Mar 2016 17:27:08 -0700 Subject: [PATCH 109/183] CORE-7552: validate UUIDs refer to extant objects in stat-gatherer endpoint --- services/data-info/src/data_info/services/stat.clj | 1 + services/data-info/src/data_info/util/validators.clj | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/services/data-info/src/data_info/services/stat.clj b/services/data-info/src/data_info/services/stat.clj index fff60720a..693f6e109 100644 --- a/services/data-info/src/data_info/services/stat.clj +++ b/services/data-info/src/data_info/services/stat.clj @@ -98,6 +98,7 @@ [{user :user validation :validation-behavior} {paths :paths uuids :ids}] (with-jargon (cfg/jargon-cfg) [cm] (validators/user-exists cm user) + (validators/all-uuids-exist cm uuids) (let [uuid-paths (map (juxt (comp keyword str) (partial uuid/get-path cm)) uuids) all-paths (into paths (map second uuid-paths))] (validators/all-paths-exist cm all-paths) diff --git a/services/data-info/src/data_info/util/validators.clj b/services/data-info/src/data_info/util/validators.clj index fe205136d..be9383220 100644 --- a/services/data-info/src/data_info/util/validators.clj +++ b/services/data-info/src/data_info/util/validators.clj @@ -8,6 +8,7 @@ [clj-jargon.item-info :as item] [clj-jargon.permissions :as perm] [clj-jargon.users :as user] + [clj-jargon.by-uuid :as uuid] [clojure-commons.error-codes :as error] [data-info.util.config :as cfg]) (:import [clojure.lang IPersistentCollection])) @@ -215,3 +216,9 @@ (throw+ {:error_code error/ERR_NOT_OWNER :user user :paths (filterv #(not (belongs-to? %)) paths)})))) + +(defn all-uuids-exist + [cm uuids] + (when-not (every? #(uuid/get-path cm %) uuids) + (throw+ {:error_code error/ERR_DOES_NOT_EXIST + :ids (filterv #(not (uuid/get-path cm %1)) uuids)}))) From 51f1ee1a39b1bd89f0b2b4418ea2c07e6d81349f Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Fri, 4 Mar 2016 17:13:30 -0700 Subject: [PATCH 110/183] CORE-7548: refactored the app permission code a little to facilitate code reuse --- .../apps/src/apps/clients/iplant_groups.clj | 24 +++++++++++++++++++ .../src/apps/service/apps/de/listings.clj | 2 +- .../src/apps/service/apps/de/permissions.clj | 18 ++------------ .../apps/src/apps/service/apps/de/sharing.clj | 4 ++-- 4 files changed, 29 insertions(+), 19 deletions(-) diff --git a/services/apps/src/apps/clients/iplant_groups.clj b/services/apps/src/apps/clients/iplant_groups.clj index 83e727069..40d21e7a3 100644 --- a/services/apps/src/apps/clients/iplant_groups.clj +++ b/services/apps/src/apps/clients/iplant_groups.clj @@ -18,6 +18,30 @@ (def grouper-analysis-permission-def-fmt "iplant:de:%s:analyses:analysis-permission-def") (def grouper-analysis-resource-name-fmt "iplant:de:%s:analyses:%s") +(def ^:private permission-precedence + (into {} (map-indexed (fn [i v] (vector v i)) ["own" "write" "read"]))) + +(defn get-permission-level + ([perms id] + (get-permission-level (perms id))) + ([perms] + (first (sort-by permission-precedence (map :action_name perms))))) + +(defn has-permission-level + [perms required-level id] + (some (comp (partial = required-level) :action_name) (perms id))) + +(def lacks-permission-level (complement has-permission-level)) + +(defn format-permission + [[subject subject-perms]] + {:user subject + :permission (get-permission-level subject-perms)}) + +(defn find-forbidden-resources + [perms required-level ids] + (filter (partial lacks-permission-level perms required-level) ids)) + (defn- grouper-user-group [] (format grouper-user-group-fmt (config/env-name))) diff --git a/services/apps/src/apps/service/apps/de/listings.clj b/services/apps/src/apps/service/apps/de/listings.clj index 64fda7e4a..dd6d55fe9 100644 --- a/services/apps/src/apps/service/apps/de/listings.clj +++ b/services/apps/src/apps/service/apps/de/listings.clj @@ -216,7 +216,7 @@ For the time being we'll deal with that by defaulting the permission level to the empty string, indicating that the user has no explicit permissions on the app." [app perms] - (assoc app :permission (or (perms/get-permission-level perms (:id app)) ""))) + (assoc app :permission (or (iplant-groups/get-permission-level perms (:id app)) ""))) (defn- format-app-listing "Formats certain app fields into types more suitable for the client." diff --git a/services/apps/src/apps/service/apps/de/permissions.clj b/services/apps/src/apps/service/apps/de/permissions.clj index ed8024394..806ce2058 100644 --- a/services/apps/src/apps/service/apps/de/permissions.clj +++ b/services/apps/src/apps/service/apps/de/permissions.clj @@ -8,24 +8,10 @@ [clojure.string :as string] [clojure-commons.exception-util :as cxu])) -(def permission-precedence (into {} (map-indexed (fn [i v] (vector v i)) ["own" "write" "read"]))) - -(defn get-permission-level - ([perms app-id] - (get-permission-level (perms app-id))) - ([perms] - (first (sort-by permission-precedence (map :action_name perms))))) - -(defn has-permission-level - [perms required-level app-id] - (some (comp (partial = required-level) :action_name) (perms app-id))) - -(def lacks-permission-level (complement has-permission-level)) - (defn check-app-permissions [user required-level app-ids] (let [perms (iplant-groups/load-app-permissions user app-ids)] - (when-let [forbidden-apps (seq (filter (partial lacks-permission-level perms required-level) app-ids))] + (when-let [forbidden-apps (seq (iplant-groups/find-forbidden-resources perms required-level app-ids))] (cxu/forbidden (str "insufficient privileges for apps: " (string/join ", " forbidden-apps)))))) (defn load-app-permissions @@ -39,7 +25,7 @@ (defn- format-app-permissions [user perms app-names app-id] (->> (group-by (comp string/lower-case :id :subject) (perms app-id)) - (map (fn [[subject subject-perms]] {:user subject :permission (get-permission-level subject-perms)})) + (map iplant-groups/format-permission) (remove (comp (partial = user) :user)) (hash-map :id (str app-id) :name (apps-util/get-app-name app-names app-id) :permissions))) diff --git a/services/apps/src/apps/service/apps/de/sharing.clj b/services/apps/src/apps/service/apps/de/sharing.clj index aab7066fb..314f4c476 100644 --- a/services/apps/src/apps/service/apps/de/sharing.clj +++ b/services/apps/src/apps/service/apps/de/sharing.clj @@ -23,7 +23,7 @@ [user sharee app-id level success-fn failure-fn perms] (let [sharer-category (listings/get-category-id-for-app user app-id) sharee-category listings/shared-with-me-id] - (if (perms/lacks-permission-level perms "own" app-id) + (if (iplant-groups/lacks-permission-level perms "own" app-id) (failure-fn sharer-category sharee-category (app-sharing-msg :not-allowed app-id)) (if-let [failure-reason (iplant-groups/share-app app-id sharee level)] (failure-fn sharer-category sharee-category failure-reason) @@ -52,7 +52,7 @@ (defn- unshare-accessible-app [user sharee app-id success-fn failure-fn perms] (let [sharer-category (listings/get-category-id-for-app user app-id)] - (if (perms/lacks-permission-level perms "own" app-id) + (if (iplant-groups/lacks-permission-level perms "own" app-id) (failure-fn sharer-category (app-sharing-msg :not-allowed app-id)) (if-let [failure-reason (iplant-groups/unshare-app app-id sharee)] (failure-fn sharer-category failure-reason) From 7aac0e60cb21badf45dd4907498a4ae9e4757c0b Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Mon, 7 Mar 2016 17:05:25 -0700 Subject: [PATCH 111/183] CORE-7548: implement the POST /analyses/permission-lister endpoint in apps --- .../src/clojure_commons/exception_util.clj | 5 +++ .../apps/src/apps/clients/iplant_groups.clj | 29 +++++++++++--- services/apps/src/apps/persistence/jobs.clj | 7 ++++ services/apps/src/apps/protocols.clj | 3 +- services/apps/src/apps/routes/analyses.clj | 11 +++++ .../src/apps/routes/domain/permission.clj | 11 +++++ services/apps/src/apps/service/apps.clj | 4 ++ services/apps/src/apps/service/apps/agave.clj | 7 +++- .../apps/src/apps/service/apps/combined.clj | 5 ++- services/apps/src/apps/service/apps/de.clj | 5 ++- services/apps/src/apps/service/apps/jobs.clj | 40 +++++++++++++++++++ 11 files changed, 117 insertions(+), 10 deletions(-) diff --git a/libs/iplant-clojure-commons/src/clojure_commons/exception_util.clj b/libs/iplant-clojure-commons/src/clojure_commons/exception_util.clj index e55bc4953..be29701c8 100644 --- a/libs/iplant-clojure-commons/src/clojure_commons/exception_util.clj +++ b/libs/iplant-clojure-commons/src/clojure_commons/exception_util.clj @@ -16,3 +16,8 @@ "Throws an error indicating that there was an attempt to create something that already exists." [reason & {:as ex-info}] (throw+ (assoc ex-info :type ::cx/exists :error reason))) + +(defn bad-request + "Throws an error indicating that that a bad request was received." + [reason & {:as ex-info}] + (throw+ (assoc ex-info :type ::cx/bad-request :error reason))) diff --git a/services/apps/src/apps/clients/iplant_groups.clj b/services/apps/src/apps/clients/iplant_groups.clj index 40d21e7a3..3b38ea7d8 100644 --- a/services/apps/src/apps/clients/iplant_groups.clj +++ b/services/apps/src/apps/clients/iplant_groups.clj @@ -86,7 +86,7 @@ (http/put (grouper-url "groups" (grouper-user-group) "members" subject-id) {:query-params {:user grouper-user}})) -(defn- retrieve-permissions +(defn- retrieve-permissions* "Retrieves permission assignments from Grouper." [role subject attribute-def attribute-def-names] (->> {:user grouper-user @@ -100,15 +100,27 @@ :body :assignments)) +(defn- retrieve-permissions + "Retrieves permission assignments from Grouper." + [role subject ids get-attribute-def to-attribute-resource-name] + (retrieve-permissions* role subject (get-attribute-def) (map to-attribute-resource-name ids))) + (defn- retrieve-app-permissions "Retrieves app permission assignments from Grouper." ([subject app-ids] (retrieve-app-permissions nil subject app-ids)) ([role subject app-ids] - (retrieve-permissions role subject (grouper-app-permission-def) (map grouper-app-resource-name app-ids)))) + (retrieve-permissions role subject app-ids grouper-app-permission-def grouper-app-resource-name))) + +(defn- retrieve-analysis-permissions + "Retrieves analysis permission assignments from Grouper." + ([subject analysis-ids] + (retrieve-analysis-permissions nil subject analysis-ids)) + ([role subject analysis-ids] + (retrieve-permissions role subject analysis-ids grouper-analysis-permission-def grouper-analysis-resource-name))) -(defn- group-app-permissions - "Groups app permissions by app ID." +(defn- group-permissions + "Groups permissions by resource ID. The resource ID must be a UUID." [perms] (group-by (comp uuidify id-from-resource :name :attribute_definition_name) perms)) @@ -117,12 +129,17 @@ ([user] (load-app-permissions user nil)) ([user app-ids] - (group-app-permissions (retrieve-app-permissions user app-ids)))) + (group-permissions (retrieve-app-permissions user app-ids)))) (defn list-app-permissions "Loads an app permission listing from Grouper." [app-ids] - (group-app-permissions (retrieve-app-permissions nil app-ids))) + (group-permissions (retrieve-app-permissions nil app-ids))) + +(defn list-analysis-permissions + "Loads an analysis permission listing from Grouper." + [analysis-ids] + (group-permissions (retrieve-analysis-permissions nil analysis-ids))) (defn- create-resource "Creates a new permission name in grouper." diff --git a/services/apps/src/apps/persistence/jobs.clj b/services/apps/src/apps/persistence/jobs.clj index 04c61e846..d436c3660 100644 --- a/services/apps/src/apps/persistence/jobs.clj +++ b/services/apps/src/apps/persistence/jobs.clj @@ -281,6 +281,13 @@ (limit (nil-if-zero row-limit)) (select))) +(defn list-jobs-by-id + "Gets a listing of jobs with the given identifiers." + [job-ids] + (-> (select* (job-base-query)) + (where {:j.id [in (map uuidify job-ids)]}) + (select))) + (defn list-child-jobs "Lists the child jobs within a batch job." [batch-id] diff --git a/services/apps/src/apps/protocols.clj b/services/apps/src/apps/protocols.clj index 98371663d..95e36e63c 100644 --- a/services/apps/src/apps/protocols.clj +++ b/services/apps/src/apps/protocols.clj @@ -67,4 +67,5 @@ (shareAppWithUser [_ app-names sharee app-id level]) (unshareApps [_ unsharing-requests]) (unshareAppsWithUser [_ app-names sharee app-ids]) - (unshareAppWithUser [_ app-names sharee app-id])) + (unshareAppWithUser [_ app-names sharee app-id]) + (supportsJobSharing [_ _])) diff --git a/services/apps/src/apps/routes/analyses.clj b/services/apps/src/apps/routes/analyses.clj index 9e2a239d7..e352f07cd 100644 --- a/services/apps/src/apps/routes/analyses.clj +++ b/services/apps/src/apps/routes/analyses.clj @@ -9,6 +9,7 @@ [ring.util.http-response :only [ok]]) (:require [apps.json :as json] [apps.service.apps :as apps] + [apps.routes.domain.permission :as perms] [apps.util.coercions :as coercions])) (defroutes* analyses @@ -38,6 +39,16 @@ (ok (coerce! AnalysisResponse (apps/submit-job current-user body)))) + (POST* "/permission-lister" [] + :query [params SecuredQueryParams] + :body [body (describe perms/AnalysisIdList "The analysis permission listing request.")] + :return perms/AnalysisPermissionListing + :summary "List App Permissions" + :description "This endpoint allows the caller to list the permissions for one or more analyses. + The authenticated user must have ownership permission on every analysis in the request body for + this endpoint to succeed." + (ok (apps/list-job-permissions current-user (:analyses body)))) + (PATCH* "/:analysis-id" [] :path-params [analysis-id :- AnalysisIdPathParam] :query [params SecuredQueryParams] diff --git a/services/apps/src/apps/routes/domain/permission.clj b/services/apps/src/apps/routes/domain/permission.clj index 7a01b9c1e..3c5889953 100644 --- a/services/apps/src/apps/routes/domain/permission.clj +++ b/services/apps/src/apps/routes/domain/permission.clj @@ -63,3 +63,14 @@ (defschema AppUnsharingResponse {:unsharing (describe [UserAppUnsharingResponseElement] "The list of app unsharing responses")}) + +(defschema AnalysisIdList + {:analyses (describe [UUID] "A List of analysis IDs")}) + +(defschema AnalysisPermissionListElement + {:id (describe UUID "The analysis ID") + :name (describe NonBlankString "The analysis name") + :permissions (describe [UserPermissionListElement] "The list of user permissions for the analysis")}) + +(defschema AnalysisPermissionListing + {:analyses (describe [AnalysisPermissionListElement] "The list of analysis permissions")}) diff --git a/services/apps/src/apps/service/apps.clj b/services/apps/src/apps/service/apps.clj index b9db57bde..ebc973166 100644 --- a/services/apps/src/apps/service/apps.clj +++ b/services/apps/src/apps/service/apps.clj @@ -356,3 +356,7 @@ (defn unshare-apps [user unsharing-requests] {:unsharing (.unshareApps (get-apps-client user) unsharing-requests)}) + +(defn list-job-permissions + [user job-ids] + (jobs/list-job-permissions (get-apps-client user) user job-ids)) diff --git a/services/apps/src/apps/service/apps/agave.clj b/services/apps/src/apps/service/apps/agave.clj index 47c4c9c2a..22f0e3c6f 100644 --- a/services/apps/src/apps/service/apps/agave.clj +++ b/services/apps/src/apps/service/apps/agave.clj @@ -17,6 +17,8 @@ (def app-permission-rejection "Cannot list or modify the permissions of HPC apps with this service") +(def analysis-permission-rejection "Cannot list or modify the permissions of HPC analyses with this service") + (defn- reject-app-permission-request [] (service/bad-request app-permission-rejection)) @@ -167,4 +169,7 @@ (when (and (user-has-access-token?) (not (util/uuid? app-id))) (let [category (.hpcAppGroup agave)] - (app-permissions/app-unsharing-failure app-names app-id category app-permission-rejection))))) + (app-permissions/app-unsharing-failure app-names app-id category app-permission-rejection)))) + + (supportsJobSharing [_ _] + false)) diff --git a/services/apps/src/apps/service/apps/combined.clj b/services/apps/src/apps/service/apps/combined.clj index d69423f1d..a1cf0b9f4 100644 --- a/services/apps/src/apps/service/apps/combined.clj +++ b/services/apps/src/apps/service/apps/combined.clj @@ -231,4 +231,7 @@ (unshareAppWithUser [self app-names sharee app-id] (or (first (remove nil? (map #(.unshareAppWithUser % app-names sharee app-id) clients))) - (app-permissions/app-unsharing-failure app-names app-id nil (str "app ID " app-id " does not exist"))))) + (app-permissions/app-unsharing-failure app-names app-id nil (str "app ID " app-id " does not exist")))) + + (supportsJobSharing [_ job-step] + (.supportsJobSharing (util/apps-client-for-job-step clients job-step) job-step))) diff --git a/services/apps/src/apps/service/apps/de.clj b/services/apps/src/apps/service/apps/de.clj index 2568bdd16..bdc6ca692 100644 --- a/services/apps/src/apps/service/apps/de.clj +++ b/services/apps/src/apps/service/apps/de.clj @@ -254,4 +254,7 @@ (sharing/unshare-app-with-user user sharee (uuidify app-id) (partial app-permissions/app-unsharing-success app-names app-id) - (partial app-permissions/app-unsharing-failure app-names app-id))))) + (partial app-permissions/app-unsharing-failure app-names app-id)))) + + (supportsJobSharing [_ _] + true)) diff --git a/services/apps/src/apps/service/apps/jobs.clj b/services/apps/src/apps/service/apps/jobs.clj index 571351ed6..64b883f32 100644 --- a/services/apps/src/apps/service/apps/jobs.clj +++ b/services/apps/src/apps/service/apps/jobs.clj @@ -3,6 +3,7 @@ [slingshot.slingshot :only [try+]]) (:require [clojure.tools.logging :as log] [clojure.string :as string] + [clojure-commons.exception-util :as cxu] [kameleon.db :as db] [apps.clients.iplant-groups :as iplant-groups] [apps.clients.notifications :as cn] @@ -187,3 +188,42 @@ (let [job-info (submissions/submit apps-client user submission)] (iplant-groups/register-analysis (:shortUsername user) (:id job-info)) job-info))) + +(defn- validate-job-permission-level + [short-username perms required-level job-ids] + (doseq [job-id job-ids] + (let [user-perms (filter (comp (partial = short-username) :id :subject) (perms job-id))] + (when (iplant-groups/lacks-permission-level {job-id user-perms} required-level job-id) + (cxu/forbidden (str "insufficient privileges for analysis " job-id)))))) + +(defn- validate-job-sharing-support + [apps-client job-ids] + (doseq [job-id job-ids + job-step (jp/list-job-steps job-id)] + (when-not (.supportsJobSharing apps-client job-step) + (cxu/bad-request (str "analysis sharing not supported for " job-id))))) + +(defn- validate-jobs-for-permissions + [apps-client {short-username :shortUsername} perms required-level job-ids] + (validate-job-existence job-ids) + (validate-job-permission-level short-username perms required-level job-ids) + (validate-job-sharing-support apps-client job-ids)) + +(defn- format-job-permission + [short-username perms {:keys [id job-name]}] + {:id id + :name job-name + :permissions (mapv iplant-groups/format-permission + (remove (comp (partial = short-username) key) + (group-by (comp :id :subject) (perms id))))}) + +(defn- format-job-permission-listing + [{short-username :shortUsername} perms jobs] + {:analyses (mapv (partial format-job-permission short-username perms) jobs)}) + +(defn list-job-permissions + [apps-client {:keys [username] :as user} job-ids] + (let [perms (iplant-groups/list-analysis-permissions job-ids)] + (transaction + (validate-jobs-for-permissions apps-client user perms "read" job-ids) + (format-job-permission-listing user perms (jp/list-jobs-by-id job-ids))))) From fe7522987bd9e72b306db28e6585d3856a758e6b Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Mon, 7 Mar 2016 17:58:28 -0700 Subject: [PATCH 112/183] CORE-7548: added the POST /analyses/permission-lister endpoint to terrain --- services/terrain/src/terrain/clients/apps/raw.clj | 9 +++++++++ services/terrain/src/terrain/routes/metadata.clj | 3 +++ 2 files changed, 12 insertions(+) diff --git a/services/terrain/src/terrain/clients/apps/raw.clj b/services/terrain/src/terrain/clients/apps/raw.clj index ca75a36db..b7aefc4f5 100644 --- a/services/terrain/src/terrain/clients/apps/raw.clj +++ b/services/terrain/src/terrain/clients/apps/raw.clj @@ -253,6 +253,15 @@ :as :stream :follow-redirects false})) +(defn list-job-permissions + [body] + (client/post (apps-url "analyses" "permission-lister") + {:query-params (secured-params) + :content-type :json + :body body + :as :stream + :follow-redirects false})) + (defn submit-job [submission] (client/post (apps-url "analyses") diff --git a/services/terrain/src/terrain/routes/metadata.clj b/services/terrain/src/terrain/routes/metadata.clj index fd0d50b8e..8b38b69d8 100644 --- a/services/terrain/src/terrain/routes/metadata.clj +++ b/services/terrain/src/terrain/routes/metadata.clj @@ -176,6 +176,9 @@ (POST "/analyses" [:as {:keys [body]}] (service/success-response (apps/submit-job body))) + (POST "/analyses/permission-lister" [:as {:keys [body]}] + (service/success-response (apps/list-job-permissions body))) + (PATCH "/analyses/:analysis-id" [analysis-id :as {body :body}] (service/success-response (apps/update-job analysis-id body))) From 218a4f599afe65e7a266d8680a8710aae1111392 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Tue, 8 Mar 2016 10:57:06 -0700 Subject: [PATCH 113/183] core-7548: make the permission precedence explicit --- services/apps/src/apps/clients/iplant_groups.clj | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/services/apps/src/apps/clients/iplant_groups.clj b/services/apps/src/apps/clients/iplant_groups.clj index 3b38ea7d8..ef20ddfca 100644 --- a/services/apps/src/apps/clients/iplant_groups.clj +++ b/services/apps/src/apps/clients/iplant_groups.clj @@ -19,7 +19,9 @@ (def grouper-analysis-resource-name-fmt "iplant:de:%s:analyses:%s") (def ^:private permission-precedence - (into {} (map-indexed (fn [i v] (vector v i)) ["own" "write" "read"]))) + {"own" 0 + "write" 1 + "read" 2}) (defn get-permission-level ([perms id] From 0e2ab01e7a35c24b197cd03fe5311c34ec6a480f Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Thu, 3 Mar 2016 19:00:54 -0700 Subject: [PATCH 114/183] Make the data index name configurable. --- ansible/inventories/group_vars/all | 1 + 1 file changed, 1 insertion(+) diff --git a/ansible/inventories/group_vars/all b/ansible/inventories/group_vars/all index b215fcea2..1b35cb174 100644 --- a/ansible/inventories/group_vars/all +++ b/ansible/inventories/group_vars/all @@ -261,6 +261,7 @@ elasticsearch: base: "http://{{ groups['elasticsearch'][0] }}" scroll_size: 1000 cluster_name: elasticsearch + data_index: data heap_size: network_http_port: network_transport_tcp_port: From 291a294fb238ee0a602b7426bcb07c193442bb21 Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Tue, 2 Feb 2016 12:00:45 -0700 Subject: [PATCH 115/183] CORE-7266 fix tests. --- .../toolBar/AppsViewToolbarImplTest.java | 91 +++++++------------ 1 file changed, 35 insertions(+), 56 deletions(-) diff --git a/ui/de-lib/src/test/java/org/iplantc/de/apps/client/views/toolBar/AppsViewToolbarImplTest.java b/ui/de-lib/src/test/java/org/iplantc/de/apps/client/views/toolBar/AppsViewToolbarImplTest.java index ea6f0abe8..2ddb19b20 100644 --- a/ui/de-lib/src/test/java/org/iplantc/de/apps/client/views/toolBar/AppsViewToolbarImplTest.java +++ b/ui/de-lib/src/test/java/org/iplantc/de/apps/client/views/toolBar/AppsViewToolbarImplTest.java @@ -46,60 +46,37 @@ */ @RunWith(GxtMockitoTestRunner.class) public class AppsViewToolbarImplTest { - @Mock - AppAutoBeanFactory mockAppFactory; - @Mock - AppSearchAutoBeanFactory mockAppSearchFactory; - @Mock - AppServiceFacade mockAppService; - @Mock - UserInfo mockUserInfo; - - @Mock - MenuItem mockAppRun; - @Mock - MenuItem mockCopyApp; - @Mock - MenuItem mockCopyWf; - @Mock - MenuItem mockCreateNewApp; - @Mock - MenuItem mockCreateWorkflow; - @Mock - MenuItem mockDeleteApp; - @Mock - MenuItem mockDeleteWf; - @Mock - MenuItem mockEditApp; - @Mock - MenuItem mockEditWf; - @Mock - MenuItem mockRequestTool; - @Mock - MenuItem mockWfRun; - - @Mock - TextButton mockAppMenu; - @Mock - TextButton mockWfMenu; - - @Mock - AutoBean mockLoadConfigAb; - @Mock - AppLoadConfig mockLoadConfig; - @Mock - AppsToolbarView.AppsToolbarAppearance mockAppearance; - @Mock - PagingLoader> mockLoader; - @Mock - List currentSelectionMock; - @Mock - AppSearchField appSearchMock; @Mock Menu mockSharingMenu; @Mock MenuItem mockShareCollab, mockSharePublic; - + @Mock AppAutoBeanFactory mockAppFactory; + @Mock AppSearchAutoBeanFactory mockAppSearchFactory; + @Mock AppServiceFacade mockAppService; + @Mock UserInfo mockUserInfo; + + @Mock MenuItem mockAppRun; + @Mock MenuItem mockCopyApp; + @Mock MenuItem mockCopyWf; + @Mock MenuItem mockCreateNewApp; + @Mock MenuItem mockCreateWorkflow; + @Mock MenuItem mockDeleteApp; + @Mock MenuItem mockDeleteWf; + @Mock MenuItem mockEditApp; + @Mock MenuItem mockEditWf; + @Mock MenuItem mockRequestTool; + @Mock MenuItem mockSubmitApp; + @Mock MenuItem mockWfRun; + + @Mock TextButton mockAppMenu; + @Mock TextButton mockWfMenu; + + @Mock AutoBean mockLoadConfigAb; + @Mock AppLoadConfig mockLoadConfig; + @Mock AppsToolbarView.AppsToolbarAppearance mockAppearance; + @Mock PagingLoader> mockLoader; + @Mock List currentSelectionMock; + @Mock AppSearchField appSearchMock; private AppsViewToolbarImpl uut; @Before @@ -873,6 +850,7 @@ boolean allAppsPrivate(List apps) { when(appMock.isPublic()).thenReturn(true); when(wfMock.getAppType()).thenReturn("DE"); + currentSelectionMock = spy(new ArrayList()); uut.currentSelection = currentSelectionMock; List singleAppSelection = Lists.newArrayList(wfMock, appMock); @@ -945,13 +923,13 @@ boolean allAppsPrivate(List apps) { App wfMock = mock(App.class); when(wfMock.getStepCount()).thenReturn(1); - when(wfMock.getPermission()).thenReturn(PermissionValue.read); + when(wfMock.getPermission()).thenReturn(PermissionValue.own); when(wfMock.getAppType()).thenReturn("DE"); when(wfMock.getStepCount()).thenReturn(2); when(wfMock.isPublic()).thenReturn(false); App appMock = mock(App.class); - when(appMock.getPermission()).thenReturn(PermissionValue.read); + when(appMock.getPermission()).thenReturn(PermissionValue.own); when(appMock.getAppType()).thenReturn("DE"); when(appMock.getStepCount()).thenReturn(1); when(appMock.isPublic()).thenReturn(false); @@ -979,8 +957,8 @@ boolean allAppsPrivate(List apps) { verify(mockCopyWf).setEnabled(false); verify(mockWfRun).setEnabled(false); verify(mockSharePublic).setEnabled(false); - verify(mockSharingMenu).setEnabled(false); - verify(mockShareCollab).setEnabled(false); + verify(mockSharingMenu).setEnabled(true); + verify(mockShareCollab).setEnabled(true); verifyNoMoreInteractions(mockAppMenu, mockWfMenu, @@ -1028,12 +1006,12 @@ boolean allAppsPrivate(List apps) { App wfMock = mock(App.class); when(wfMock.getStepCount()).thenReturn(1); - when(wfMock.getPermission()).thenReturn(PermissionValue.read); + when(wfMock.getPermission()).thenReturn(PermissionValue.own); when(wfMock.getStepCount()).thenReturn(2); when(wfMock.isPublic()).thenReturn(true); App appMock = mock(App.class); - when(appMock.getPermission()).thenReturn(PermissionValue.read); + when(appMock.getPermission()).thenReturn(PermissionValue.own); when(appMock.getAppType()).thenReturn("DE"); when(appMock.getStepCount()).thenReturn(1); when(appMock.isPublic()).thenReturn(true); @@ -1122,6 +1100,7 @@ boolean allAppsPrivate(List apps) { when(appMock.getAppType()).thenReturn("DE"); when(appMock.isPublic()).thenReturn(false); + currentSelectionMock = spy(new ArrayList()); uut.currentSelection = currentSelectionMock; List singleAppSelection = Lists.newArrayList(wfMock, appMock); From 4db30627ace9175c29828f5b65af656e50d5a7cf Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Mon, 29 Feb 2016 17:04:02 -0700 Subject: [PATCH 116/183] CORE-7152 Analysis sharing UI --- .../de/analysis/client/AnalysesView.java | 14 ++++ .../presenter/AnalysesPresenterImpl.java | 32 ++++++++- .../client/views/AnalysesToolBarImpl.java | 68 +++++++++++++++---- .../client/views/AnalysesToolBarImpl.ui.xml | 18 +++-- .../views/dialogs/AnalysisSharingDialog.java | 48 +------------ .../sharing/AnalysisSharingViewImpl.java | 63 ++++++++++++++--- .../de/analysis/shared/AnalysisModule.java | 4 ++ .../services/AnalysisServiceFacade.java | 6 ++ .../impl/AnalysisServiceFacadeImpl.java | 10 +++ .../client/analyses/AnalysesMessages.java | 12 ++++ .../analyses/AnalysesMessages.properties | 6 ++ .../AnalysesViewDefaultAppearance.java | 30 ++++++++ 12 files changed, 237 insertions(+), 74 deletions(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/AnalysesView.java b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/AnalysesView.java index 0e3854f29..68d9a3440 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/AnalysesView.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/AnalysesView.java @@ -112,10 +112,24 @@ interface Appearance { AnalysisInfoStyle css(); ImageResource shareIcon(); + + String share(); + + String shareCollab(); + + String shareSupport(); + + String shareSupportConfirm(); + + String shareWithInput(); + + String shareOutputOnly(); } interface Presenter { + void onShareSupportSelected(List currentSelection, boolean shareInputs); + interface Appearance { String analysesRetrievalFailure(); diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/AnalysesPresenterImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/AnalysesPresenterImpl.java index 1e3261252..b1896ed6d 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/AnalysesPresenterImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/AnalysesPresenterImpl.java @@ -7,15 +7,21 @@ import org.iplantc.de.analysis.client.events.selection.AnalysisNameSelectedEvent; import org.iplantc.de.analysis.client.gin.factory.AnalysesViewFactory; import org.iplantc.de.analysis.client.presenter.proxy.AnalysisRpcProxy; +import org.iplantc.de.analysis.client.presenter.sharing.AnalysisSharingPresenter; import org.iplantc.de.analysis.client.views.AnalysisStepsView; import org.iplantc.de.analysis.client.views.dialogs.AnalysisSharingDialog; import org.iplantc.de.analysis.client.views.dialogs.AnalysisStepsInfoDialog; +import org.iplantc.de.analysis.client.views.sharing.AnalysisSharingView; +import org.iplantc.de.analysis.client.views.sharing.AnalysisSharingViewImpl; import org.iplantc.de.analysis.client.views.widget.AnalysisSearchField; import org.iplantc.de.client.events.EventBus; import org.iplantc.de.client.events.diskResources.OpenFolderEvent; import org.iplantc.de.client.models.analysis.Analysis; import org.iplantc.de.client.models.analysis.AnalysisStepsInfo; import org.iplantc.de.client.services.AnalysisServiceFacade; +import org.iplantc.de.client.sharing.SharingPresenter; +import org.iplantc.de.client.util.JsonUtil; +import org.iplantc.de.collaborators.client.util.CollaboratorsUtil; import org.iplantc.de.commons.client.ErrorHandler; import org.iplantc.de.commons.client.info.ErrorAnnouncementConfig; import org.iplantc.de.commons.client.info.IplantAnnouncer; @@ -177,6 +183,16 @@ public void onSuccess(Void result) { @Inject Provider aSharingDialogProvider; + @Inject + CollaboratorsUtil collaboratorsUtil; + @Inject + JsonUtil jsonUtil; + + private SharingPresenter sharingPresenter; + private AnalysisSharingView sharingView; + + + private final ListStore listStore; private final AnalysesView view; @@ -201,6 +217,12 @@ public void onSuccess(Void result) { this.view.addAnalysisNameSelectedEventHandler(this); this.view.addAnalysisAppSelectedEventHandler(this); this.view.addHTAnalysisExpandEventHandler(this); + sharingView = new AnalysisSharingViewImpl(); + } + + @Override + public void onShareSupportSelected(List currentSelection,boolean shareWithInput) { + } @Override @@ -296,7 +318,15 @@ public void onShowAllSelected() { @Override public void onShareSelected(List selected) { - aSharingDialogProvider.get().show(selected); + sharingView.setSelectedAnalysis(selected); + sharingPresenter = new AnalysisSharingPresenter(analysisService, + selected, + sharingView, + collaboratorsUtil, + jsonUtil); + AnalysisSharingDialog asd = aSharingDialogProvider.get(); + asd.setPresenter(sharingPresenter); + asd.show(selected); } @Override diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java index 1f0074541..c7d953099 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java @@ -44,6 +44,7 @@ import com.sencha.gxt.widget.core.client.button.TextButton; import com.sencha.gxt.widget.core.client.event.DialogHideEvent; import com.sencha.gxt.widget.core.client.event.SelectEvent; +import com.sencha.gxt.widget.core.client.info.Info; import com.sencha.gxt.widget.core.client.menu.Item; import com.sencha.gxt.widget.core.client.menu.MenuItem; import com.sencha.gxt.widget.core.client.selection.SelectionChangedEvent; @@ -75,8 +76,13 @@ interface AnalysesToolbarUiBinder extends UiBinder @UiField TextButton showAllTb; @UiField AnalysisSearchField searchField; @UiField(provided = true) final AnalysesView.Appearance appearance; - // @UiField - // MenuItem shareMI; + + @UiField + TextButton share_menu; + @UiField + MenuItem shareCollabMI; + @UiField + MenuItem shareSupportMI; @Inject AsyncProvider analysisParametersDialogAsyncProvider; @@ -168,14 +174,9 @@ public void onSelectionChanged(SelectionChangedEvent event) { relaunchMI.setEnabled(relaunchEnabled); cancelMI.setEnabled(cancelEnabled); deleteMI.setEnabled(deleteEnabled); - /** - * always disabled for now. - * - */ - // shareMI.setEnabled(shareEnabled); - /** - * uncomment when feature is needed. - */ + share_menu.setEnabled(shareEnabled); + shareCollabMI.setEnabled(shareEnabled); + shareSupportMI.setEnabled(shareEnabled); renameMI.setEnabled(renameEnabled); updateCommentsMI.setEnabled(updateCommentsEnabled); } @@ -217,6 +218,12 @@ protected void onEnsureDebugId(String baseID) { refreshTb.ensureDebugId(baseID + AnalysisModule.Ids.BUTTON_REFRESH); searchField.ensureDebugId(baseID + AnalysisModule.Ids.FIELD_SEARCH); + + //share menu + share_menu.ensureDebugId(baseID + AnalysisModule.Ids.SHARE_MENU ); + shareCollabMI.ensureDebugId(baseID + AnalysisModule.Ids.SHARE_COLLAB); + shareCollabMI.ensureDebugId(baseID + AnalysisModule.Ids.SHARE_SUPPORT); + } /** @@ -397,8 +404,41 @@ void onShowAllSelected(SelectEvent event) { } // - /** - * @UiHandler("shareMI") void onShareSelected(SelectionEvent event) { - * presenter.onShareSelected(currentSelection); } - **/ + + @UiHandler("shareCollabMI") + void onShareSelected(SelectionEvent event) { + presenter.onShareSelected(currentSelection); + } + + @UiHandler("shareSupportMI") + void onShareSupportSelected(SelectionEvent event) { + ConfirmMessageBox messageBox = new ConfirmMessageBox(appearance.shareSupport(), + appearance.shareSupportConfirm()); + messageBox.setPredefinedButtons(Dialog.PredefinedButton.YES, + Dialog.PredefinedButton.NO, + Dialog.PredefinedButton.CANCEL); + messageBox.getButton(Dialog.PredefinedButton.YES).setText(appearance.shareWithInput()); + messageBox.getButton(Dialog.PredefinedButton.NO).setText(appearance.shareOutputOnly()); + + messageBox.addDialogHideHandler(new DialogHideEvent.DialogHideHandler() { + @Override + public void onDialogHide(DialogHideEvent event) { + switch (event.getHideButton()) { + case YES: + presenter.onShareSupportSelected(currentSelection, true); + break; + case NO: + presenter.onShareSupportSelected(currentSelection, false); + break; + + case CANCEL: + break; + } + } + }); + + messageBox.show(); + + } + } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.ui.xml b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.ui.xml index 2d504a6fd..edd9773d7 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.ui.xml +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.ui.xml @@ -31,10 +31,6 @@ text="{appearance.relaunchAnalysis}" icon="{appearance.runIcon}" enabled="false"/> - + + + + + + + + + + resourcesToShare) { - ListStore analysisStore = new ListStore<>(new ModelKeyProvider() { - - @Override - public String getKey(Analysis item) { - return item.getId(); - } - }); - AnalysisSharingView view = new AnalysisSharingViewImpl(buildAnalysisColumnModel(), analysisStore); - view.setSelectedAnalysis(resourcesToShare); - sharingPresenter = new AnalysisSharingPresenter(analysisService, - resourcesToShare, - view, - collaboratorsUtil, - jsonUtil); sharingPresenter.go(this); super.show(); } @@ -79,28 +56,7 @@ public void show() throws UnsupportedOperationException { throw new UnsupportedOperationException("This method is not supported for this class. "); } - private ColumnModel buildAnalysisColumnModel() { - List> list = new ArrayList<>(); - - ColumnConfig name = new ColumnConfig<>(new ValueProvider() { - - @Override - public String getValue(Analysis object) { - return object.getName(); - } - - @Override - public void setValue(Analysis object, String value) { - // TODO Auto-generated method stub - } - - @Override - public String getPath() { - return "name"; - } - }, 180, "Name"); - list.add(name); - return new ColumnModel<>(list); + public void setPresenter(SharingPresenter sharingPresenter) { + this.sharingPresenter = sharingPresenter; } - } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/sharing/AnalysisSharingViewImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/sharing/AnalysisSharingViewImpl.java index 074651566..a3d3dad4a 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/sharing/AnalysisSharingViewImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/sharing/AnalysisSharingViewImpl.java @@ -1,21 +1,25 @@ package org.iplantc.de.analysis.client.views.sharing; import org.iplantc.de.client.models.analysis.Analysis; -import org.iplantc.de.client.models.apps.App; import org.iplantc.de.client.sharing.SharingPresenter; import com.google.gwt.core.client.GWT; import com.google.gwt.uibinder.client.UiBinder; +import com.google.gwt.uibinder.client.UiFactory; import com.google.gwt.uibinder.client.UiField; import com.google.gwt.uibinder.client.UiTemplate; import com.google.gwt.user.client.ui.Widget; +import com.sencha.gxt.core.client.ValueProvider; import com.sencha.gxt.data.shared.ListStore; +import com.sencha.gxt.data.shared.ModelKeyProvider; import com.sencha.gxt.widget.core.client.FramedPanel; import com.sencha.gxt.widget.core.client.container.VerticalLayoutContainer; +import com.sencha.gxt.widget.core.client.grid.ColumnConfig; import com.sencha.gxt.widget.core.client.grid.ColumnModel; import com.sencha.gxt.widget.core.client.grid.Grid; +import java.util.ArrayList; import java.util.List; public class AnalysisSharingViewImpl implements AnalysisSharingView { @@ -26,23 +30,26 @@ public class AnalysisSharingViewImpl implements AnalysisSharingView { interface AnalysisSharingViewUiBinder extends UiBinder { } - @UiField(provided = true) - final ColumnModel analysisColumnModel; - @UiField(provided = true) - final ListStore analysisListStore; - final Widget widget; + @UiField + ColumnModel analysisColumnModel; + + @UiField + ListStore analysisListStore; + @UiField VerticalLayoutContainer container; + @UiField FramedPanel analysisListPnl; + @UiField Grid grid; SharingPresenter presenter; - public AnalysisSharingViewImpl(ColumnModel columnModel, ListStore listStore) { - this.analysisColumnModel = columnModel; - this.analysisListStore = listStore; + final Widget widget; + + public AnalysisSharingViewImpl() { widget = uiBinder.createAndBindUi(this); } @@ -72,4 +79,42 @@ public void setSelectedAnalysis(List models) { } + @UiFactory + ColumnModel buildAnalysisColumnModel() { + List> list = new ArrayList<>(); + + ColumnConfig name = new ColumnConfig<>(new ValueProvider() { + + @Override + public String getValue(Analysis object) { + return object.getName(); + } + + @Override + public void setValue(Analysis object, String value) { + // TODO Auto-generated method stub + } + + @Override + public String getPath() { + return "name"; + } + }, 180, "Name"); + list.add(name); + return new ColumnModel<>(list); + } + + @UiFactory + ListStore buildAnalyisStore() { + ListStore analysisStore = new ListStore<>(new ModelKeyProvider() { + + @Override + public String getKey(Analysis item) { + return item.getId(); + } + }); + + return analysisStore; + } + } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/shared/AnalysisModule.java b/ui/de-lib/src/main/java/org/iplantc/de/analysis/shared/AnalysisModule.java index b37c43a5b..fe46077d9 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/shared/AnalysisModule.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/shared/AnalysisModule.java @@ -25,5 +25,9 @@ interface Ids { String BUTTON_REFRESH = ".refresh"; String FIELD_SEARCH = ".search"; + + String SHARE_MENU =".share"; + String SHARE_COLLAB = ".sharecollab"; + String SHARE_SUPPORT =".sharesupport"; } } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/services/AnalysisServiceFacade.java b/ui/de-lib/src/main/java/org/iplantc/de/client/services/AnalysisServiceFacade.java index 4c4fd2f0b..f8d550f16 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/client/services/AnalysisServiceFacade.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/services/AnalysisServiceFacade.java @@ -57,4 +57,10 @@ public interface AnalysisServiceFacade { * @param callback */ void getAnalysisSteps(Analysis analysis, AsyncCallback callback); + + void shareAnalyses(List analysisList, AsyncCallback callback); + + void unshareAnalyses(List analysisList, AsyncCallback callback); + + } \ No newline at end of file diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/AnalysisServiceFacadeImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/AnalysisServiceFacadeImpl.java index 7ebe78578..f30636798 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/AnalysisServiceFacadeImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/AnalysisServiceFacadeImpl.java @@ -289,5 +289,15 @@ public void getAnalysisSteps(Analysis analysis, AsyncCallback deServiceFacade.getServiceData(wrapper, new StringAnalaysisStepInfoConverter(callback, factory)); } + + @Override + public void shareAnalyses(List analysisList, AsyncCallback callback) { + + } + + @Override + public void unshareAnalyses(List analysisList, AsyncCallback callback) { + + } } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/analyses/AnalysesMessages.java b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/analyses/AnalysesMessages.java index 62060a8b4..ade1bb311 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/analyses/AnalysesMessages.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/analyses/AnalysesMessages.java @@ -75,4 +75,16 @@ public interface AnalysesMessages extends Messages { String stepType(); String jobId(); + + String share(); + + String shareCollab(); + + String shareSupportMi(); + + String shareSupportConfirm(); + + String shareWithInput(); + + String shareOutputOnly(); } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/analyses/AnalysesMessages.properties b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/analyses/AnalysesMessages.properties index e240a47dd..93ba0ae0c 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/analyses/AnalysesMessages.properties +++ b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/analyses/AnalysesMessages.properties @@ -32,3 +32,9 @@ analysesRetrievalFailure = Unable to retrieve the list of analyses. analysisStepInfoError = Unable to retrieve analysis information! stepType = Type jobId = Job Id +share = Share +shareCollab = Share with collaborators... +shareSupportMi = Share with support... +shareSupportConfirm = Your output folder(s) and file(s) will be shared with CyVerse support team. Do you want to continue ? +shareWithInput = Yes. Share my input files as well! +shareOutputOnly = Share output folder(s) only! diff --git a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/analyses/AnalysesViewDefaultAppearance.java b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/analyses/AnalysesViewDefaultAppearance.java index 9d59ebd01..f42e1e41d 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/analyses/AnalysesViewDefaultAppearance.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/analyses/AnalysesViewDefaultAppearance.java @@ -277,4 +277,34 @@ public String jobId() { public ImageResource shareIcon() { return iplantResources.share(); } + + @Override + public String share() { + return analysesMessages.share(); + } + + @Override + public String shareCollab() { + return analysesMessages.shareCollab(); + } + + @Override + public String shareSupport() { + return analysesMessages.shareSupportMi(); + } + + @Override + public String shareSupportConfirm() { + return analysesMessages.shareSupportConfirm(); + } + + @Override + public String shareWithInput() { + return analysesMessages.shareWithInput(); + } + + @Override + public String shareOutputOnly() { + return analysesMessages.shareOutputOnly(); + } } From adbc83461451fcf4568161aac361a0df7506e2eb Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Tue, 1 Mar 2016 13:13:02 -0700 Subject: [PATCH 117/183] CORE-7152 fix analysis unit tests. --- .../client/views/AnalysesToolBarImplTest.java | 38 +++++++++++-------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/ui/de-lib/src/test/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImplTest.java b/ui/de-lib/src/test/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImplTest.java index d392d73c7..87e5ff071 100644 --- a/ui/de-lib/src/test/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImplTest.java +++ b/ui/de-lib/src/test/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImplTest.java @@ -19,6 +19,7 @@ import static org.mockito.Mockito.when; import org.iplantc.de.analysis.client.AnalysesView; +import org.iplantc.de.client.models.UserInfo; import org.iplantc.de.client.models.analysis.Analysis; import com.google.common.collect.Lists; @@ -53,6 +54,8 @@ public class AnalysesToolBarImplTest { @Mock AnalysesView.Appearance appearanceMock; @Mock PagingLoader> loaderMock; @Mock AnalysesView.Presenter presenterMock; + @Mock + UserInfo mockUserInfo; List currentSelectionMock; private AnalysesToolBarImpl uut; @@ -73,6 +76,7 @@ void mockMenuItems(AnalysesToolBarImpl uut){ uut.renameMI = renameMiMock; uut.updateCommentsMI = updateCommentsMiMock; uut.currentSelection = currentSelectionMock; + uut.userInfo = mockUserInfo; } @Test public void testOnSelectionChanged_ZeroSelected() { @@ -106,17 +110,18 @@ boolean canDeleteSelection(List selection) { final Analysis mockAnalysis = mock(Analysis.class); // Selected analysis' app is Enabled when(mockAnalysis.isAppDisabled()).thenReturn(false); - when(mockAnalysis.getUserName()).thenReturn(""); + when(mockAnalysis.getUserName()).thenReturn("user@iplantcollaborative.org"); + when(mockUserInfo.getFullUsername()).thenReturn("user@iplantcollaborative.org"); when(mockSelectionEvent.getSelection()).thenReturn(Lists.newArrayList(mockAnalysis)); uut.onSelectionChanged(mockSelectionEvent); verify(goToFolderMiMock).setEnabled(eq(true)); verify(viewParamsMiMock).setEnabled(eq(true)); verify(relaunchMock).setEnabled(eq(true)); - // verify(cancelMiMock).setEnabled(eq(true)); - // verify(deleteMiMock).setEnabled(eq(true)); - // verify(renameMiMock).setEnabled(eq(true)); - // verify(updateCommentsMiMock).setEnabled(eq(true)); + verify(cancelMiMock).setEnabled(eq(true)); + verify(deleteMiMock).setEnabled(eq(true)); + verify(renameMiMock).setEnabled(eq(true)); + verify(updateCommentsMiMock).setEnabled(eq(true)); } @Test public void testOnSelectionChanged_OneSelected_appDisabled() { @@ -136,16 +141,18 @@ boolean canDeleteSelection(List selection) { // Selected analysis' app is disabled when(mockAnalysis.isAppDisabled()).thenReturn(true); when(mockSelectionEvent.getSelection()).thenReturn(Lists.newArrayList(mockAnalysis)); - when(mockAnalysis.getUserName()).thenReturn(""); + when(mockAnalysis.getUserName()).thenReturn("user@iplantcollaborative.org"); + when(mockUserInfo.getFullUsername()).thenReturn("user@iplantcollaborative.org"); + uut.onSelectionChanged(mockSelectionEvent); verify(goToFolderMiMock).setEnabled(eq(true)); verify(viewParamsMiMock).setEnabled(eq(true)); verify(relaunchMock).setEnabled(eq(false)); - // verify(cancelMiMock).setEnabled(eq(true)); - // verify(deleteMiMock).setEnabled(eq(true)); - // verify(renameMiMock).setEnabled(eq(true)); - // verify(updateCommentsMiMock).setEnabled(eq(true)); + verify(cancelMiMock).setEnabled(eq(true)); + verify(deleteMiMock).setEnabled(eq(true)); + verify(renameMiMock).setEnabled(eq(true)); + verify(updateCommentsMiMock).setEnabled(eq(true)); } @Test public void testOnSelectionChanged_ManySelected() { @@ -165,16 +172,17 @@ boolean canDeleteSelection(List selection) { // Selected analysis' app is Enabled when(mockAnalysis.isAppDisabled()).thenReturn(false); when(mockSelectionEvent.getSelection()).thenReturn(Lists.newArrayList(mockAnalysis, mock(Analysis.class))); - when(mockAnalysis.getUserName()).thenReturn(""); + when(mockAnalysis.getUserName()).thenReturn("user@iplantcollaborative.org"); + when(mockUserInfo.getFullUsername()).thenReturn("user@iplantcollaborative.org"); uut.onSelectionChanged(mockSelectionEvent); verify(goToFolderMiMock).setEnabled(eq(false)); verify(viewParamsMiMock).setEnabled(eq(false)); verify(relaunchMock).setEnabled(eq(false)); - // verify(cancelMiMock).setEnabled(eq(true)); - // verify(deleteMiMock).setEnabled(eq(true)); - // verify(renameMiMock).setEnabled(eq(false)); - // verify(updateCommentsMiMock).setEnabled(eq(false)); + verify(cancelMiMock).setEnabled(eq(true)); + verify(deleteMiMock).setEnabled(eq(true)); + verify(renameMiMock).setEnabled(eq(false)); + verify(updateCommentsMiMock).setEnabled(eq(false)); } @Test public void testCanCancelSelection() { From 0da0d99582b031dcb285c2f24dc5fa816edf3ea4 Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Tue, 1 Mar 2016 15:45:01 -0700 Subject: [PATCH 118/183] CORE-7152 refactor code and fix tests. --- .../sharing/AnalysisSharingPresenter.java | 10 ++++------ .../analysis/client/views/AnalysesToolBarImpl.java | 14 ++++++++------ .../de/client/sharing/SharingPermissionsPanel.java | 13 +++++++++++-- .../client/views/AnalysesToolBarImplTest.java | 14 ++++++++++++++ 4 files changed, 37 insertions(+), 14 deletions(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/sharing/AnalysisSharingPresenter.java b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/sharing/AnalysisSharingPresenter.java index 262f7ba0e..6c4b6ac25 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/sharing/AnalysisSharingPresenter.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/sharing/AnalysisSharingPresenter.java @@ -1,7 +1,5 @@ /** - * * @author sriram - * */ package org.iplantc.de.analysis.client.presenter.sharing; @@ -42,14 +40,14 @@ public AnalysisSharingPresenter(final AnalysisServiceFacade aService, final JsonUtil jsonUtil) { this.view = view; - view.setPresenter(this); this.jsonUtil = jsonUtil; this.collaboratorsUtil = collaboratorsUtil; this.selectedAnalysis = selectedAnalysis; - this.permissionsPanel = new SharingPermissionsPanel(this, - getSelectedResourcesAsMap(this.selectedAnalysis)); - permissionsPanel.setPermissionColumnVisibility(false); + this.permissionsPanel = + new SharingPermissionsPanel(this, getSelectedResourcesAsMap(this.selectedAnalysis)); + permissionsPanel.hidePermissionColumn(); permissionsPanel.setExplainPanelVisibility(false); + view.setPresenter(this); view.addShareWidget(permissionsPanel.asWidget()); } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java index c7d953099..b4a64f8ac 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java @@ -44,7 +44,6 @@ import com.sencha.gxt.widget.core.client.button.TextButton; import com.sencha.gxt.widget.core.client.event.DialogHideEvent; import com.sencha.gxt.widget.core.client.event.SelectEvent; -import com.sencha.gxt.widget.core.client.info.Info; import com.sencha.gxt.widget.core.client.menu.Item; import com.sencha.gxt.widget.core.client.menu.MenuItem; import com.sencha.gxt.widget.core.client.selection.SelectionChangedEvent; @@ -83,7 +82,10 @@ interface AnalysesToolbarUiBinder extends UiBinder MenuItem shareCollabMI; @UiField MenuItem shareSupportMI; - @Inject AsyncProvider analysisParametersDialogAsyncProvider; + @Inject + AsyncProvider analysisParametersDialogAsyncProvider; + @Inject + UserInfo userInfo; List currentSelection; @@ -121,13 +123,13 @@ public void filterByParentAnalysisId(String analysisId) { @Override public void onSelectionChanged(SelectionChangedEvent event) { - GWT.log("user--->" + UserInfo.getInstance().getFullUsername()); + GWT.log("user--->" + userInfo.getFullUsername()); currentSelection = event.getSelection(); int size = currentSelection.size(); final boolean canCancelSelection = canCancelSelection(currentSelection); final boolean canDeleteSelection = canDeleteSelection(currentSelection); - boolean isOwner = canShare(currentSelection); + boolean isOwner = isOwner(currentSelection); boolean goToFolderEnabled, viewParamsEnabled, relaunchEnabled, cancelEnabled, deleteEnabled; boolean renameEnabled, updateCommentsEnabled, shareEnabled; @@ -181,9 +183,9 @@ public void onSelectionChanged(SelectionChangedEvent event) { updateCommentsMI.setEnabled(updateCommentsEnabled); } - private boolean canShare(List selection) { + private boolean isOwner(List selection) { for (Analysis a : selection) { - if (!(a.getUserName().equals(UserInfo.getInstance().getFullUsername()))) { + if (!(a.getUserName().equals(userInfo.getFullUsername()))) { return false; } } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/sharing/SharingPermissionsPanel.java b/ui/de-lib/src/main/java/org/iplantc/de/client/sharing/SharingPermissionsPanel.java index 473e52815..6c6c37cbc 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/client/sharing/SharingPermissionsPanel.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/sharing/SharingPermissionsPanel.java @@ -325,15 +325,24 @@ public String getPath() { return new ColumnModel<>(configs); } - public void setPermissionColumnVisibility(boolean visible) { + public void hidePermissionColumn() { for(ColumnConfig cc: grid.getColumnModel().getColumns()) { if(cc.getHeader().asString().equals(appearance.permissionsColumnLabel())) { - cc.setHidden(visible); + cc.setHidden(true); return; } } } + public void showPermissionColumn() { + for(ColumnConfig cc: grid.getColumnModel().getColumns()) { + if(cc.getHeader().asString().equals(appearance.permissionsColumnLabel())) { + cc.setHidden(false); + return; + } + } + } + public void setExplainPanelVisibility(boolean visible) { explainPanel.setVisible(visible); } diff --git a/ui/de-lib/src/test/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImplTest.java b/ui/de-lib/src/test/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImplTest.java index 87e5ff071..5855366bb 100644 --- a/ui/de-lib/src/test/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImplTest.java +++ b/ui/de-lib/src/test/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImplTest.java @@ -28,6 +28,7 @@ import com.sencha.gxt.data.shared.loader.FilterPagingLoadConfig; import com.sencha.gxt.data.shared.loader.PagingLoadResult; import com.sencha.gxt.data.shared.loader.PagingLoader; +import com.sencha.gxt.widget.core.client.button.TextButton; import com.sencha.gxt.widget.core.client.menu.MenuItem; import com.sencha.gxt.widget.core.client.selection.SelectionChangedEvent; @@ -58,6 +59,13 @@ public class AnalysesToolBarImplTest { UserInfo mockUserInfo; List currentSelectionMock; + @Mock + TextButton share_menuMock; + @Mock + MenuItem shareCollabMIMock; + @Mock + MenuItem shareSupportMIMock; + private AnalysesToolBarImpl uut; @Before public void setUp() { @@ -76,6 +84,9 @@ void mockMenuItems(AnalysesToolBarImpl uut){ uut.renameMI = renameMiMock; uut.updateCommentsMI = updateCommentsMiMock; uut.currentSelection = currentSelectionMock; + uut.share_menu = share_menuMock; + uut.shareCollabMI = shareCollabMIMock; + uut.shareSupportMI = shareSupportMIMock; uut.userInfo = mockUserInfo; } @@ -90,6 +101,9 @@ void mockMenuItems(AnalysesToolBarImpl uut){ verify(deleteMiMock).setEnabled(eq(false)); verify(renameMiMock).setEnabled(eq(false)); verify(updateCommentsMiMock).setEnabled(eq(false)); + verify(share_menuMock).setEnabled(eq(false)); + verify(shareCollabMIMock).setEnabled(eq(false)); + verify(shareSupportMIMock).setEnabled(eq(false)); } From 4fb988bc2948fb6578022678eba5b32efa0c25a3 Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Tue, 8 Mar 2016 16:11:46 -0700 Subject: [PATCH 119/183] CORE-7152 sharing models and refactor. --- .../client/models/AnalysisFilter.java | 24 +++++ .../client/views/AnalysesToolBarImpl.java | 87 +++++++++++++++---- .../client/views/AnalysesToolBarImpl.ui.xml | 12 ++- .../analysis/sharing/AnalysisPermission.java | 20 +++++ .../AnalysisSharingAutoBeanFactory.java | 15 ++++ .../sharing/AnalysisSharingRequest.java | 22 +++++ .../sharing/AnalysisSharingRequestList.java | 17 ++++ .../sharing/AnalysisUnsharingRequest.java | 20 +++++ .../sharing/AnalysisUnsharingRequestList.java | 16 ++++ .../sharing/AnalysisUserPermissions.java | 18 ++++ .../sharing/AnalysisUserPermissionsList.java | 13 +++ .../analyses/AnalysesMessages.properties | 2 +- 12 files changed, 244 insertions(+), 22 deletions(-) create mode 100644 ui/de-lib/src/main/java/org/iplantc/de/analysis/client/models/AnalysisFilter.java create mode 100644 ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisPermission.java create mode 100644 ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisSharingAutoBeanFactory.java create mode 100644 ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisSharingRequest.java create mode 100644 ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisSharingRequestList.java create mode 100644 ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisUnsharingRequest.java create mode 100644 ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisUnsharingRequestList.java create mode 100644 ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisUserPermissions.java create mode 100644 ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisUserPermissionsList.java diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/models/AnalysisFilter.java b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/models/AnalysisFilter.java new file mode 100644 index 000000000..6aaf1c6b5 --- /dev/null +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/models/AnalysisFilter.java @@ -0,0 +1,24 @@ +package org.iplantc.de.analysis.client.models; + +/** + * Created by sriram on 3/4/16. + */ +public enum AnalysisFilter { + + ALL("All"), SHARED_WITH_ME("Analyses shared with me"), MY_ANALYSES("Only my analyses"); + + private String filter; + + private AnalysisFilter(String label) { + this.filter = label; + } + + public String getFilterString() { + return toString(); + } + + @Override + public String toString() { + return filter; + } +} diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java index b4a64f8ac..bf9fa51ae 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java @@ -9,21 +9,24 @@ import org.iplantc.de.analysis.client.AnalysesView; import org.iplantc.de.analysis.client.AnalysisToolBarView; +import org.iplantc.de.analysis.client.models.AnalysisFilter; import org.iplantc.de.analysis.client.views.dialogs.AnalysisCommentsDialog; import org.iplantc.de.analysis.client.views.dialogs.AnalysisParametersDialog; import org.iplantc.de.analysis.client.views.widget.AnalysisSearchField; import org.iplantc.de.analysis.shared.AnalysisModule; import org.iplantc.de.client.models.UserInfo; import org.iplantc.de.client.models.analysis.Analysis; +import org.iplantc.de.client.models.analysis.AnalysisExecutionStatus; import org.iplantc.de.commons.client.ErrorHandler; import org.iplantc.de.commons.client.validators.DiskResourceNameValidator; import org.iplantc.de.commons.client.views.dialogs.IPlantPromptDialog; import com.google.common.base.Preconditions; -import com.google.common.base.Strings; import com.google.gwt.core.client.GWT; -import com.google.gwt.event.dom.client.KeyUpEvent; import com.google.gwt.event.logical.shared.SelectionEvent; +import com.google.gwt.event.logical.shared.SelectionHandler; +import com.google.gwt.event.logical.shared.ValueChangeEvent; +import com.google.gwt.event.logical.shared.ValueChangeHandler; import com.google.gwt.inject.client.AsyncProvider; import com.google.gwt.uibinder.client.UiBinder; import com.google.gwt.uibinder.client.UiFactory; @@ -35,6 +38,7 @@ import com.google.inject.Inject; import com.google.inject.assistedinject.Assisted; +import com.sencha.gxt.data.shared.StringLabelProvider; import com.sencha.gxt.data.shared.loader.FilterPagingLoadConfig; import com.sencha.gxt.data.shared.loader.PagingLoadResult; import com.sencha.gxt.data.shared.loader.PagingLoader; @@ -44,11 +48,13 @@ import com.sencha.gxt.widget.core.client.button.TextButton; import com.sencha.gxt.widget.core.client.event.DialogHideEvent; import com.sencha.gxt.widget.core.client.event.SelectEvent; +import com.sencha.gxt.widget.core.client.form.SimpleComboBox; import com.sencha.gxt.widget.core.client.menu.Item; import com.sencha.gxt.widget.core.client.menu.MenuItem; import com.sencha.gxt.widget.core.client.selection.SelectionChangedEvent; import com.sencha.gxt.widget.core.client.toolbar.ToolBar; +import java.util.Arrays; import java.util.List; /** @@ -72,7 +78,6 @@ interface AnalysesToolbarUiBinder extends UiBinder @UiField MenuItem renameMI; @UiField TextButton editTb; @UiField TextButton refreshTb; - @UiField TextButton showAllTb; @UiField AnalysisSearchField searchField; @UiField(provided = true) final AnalysesView.Appearance appearance; @@ -82,9 +87,14 @@ interface AnalysesToolbarUiBinder extends UiBinder MenuItem shareCollabMI; @UiField MenuItem shareSupportMI; + + @UiField(provided = true) + SimpleComboBox filterCombo; + @Inject AsyncProvider analysisParametersDialogAsyncProvider; - @Inject + + @Inject UserInfo userInfo; @@ -99,8 +109,40 @@ interface AnalysesToolbarUiBinder extends UiBinder this.appearance = appearance; this.presenter = presenter; this.loader = loader; + + filterCombo = new SimpleComboBox(new StringLabelProvider()); + filterCombo.add(Arrays.asList(AnalysisFilter.ALL, + AnalysisFilter.MY_ANALYSES, + AnalysisFilter.SHARED_WITH_ME)); AnalysesToolbarUiBinder uiBinder = GWT.create(AnalysesToolbarUiBinder.class); initWidget(uiBinder.createAndBindUi(this)); + filterCombo.setEditable(false); + filterCombo.setValue(AnalysisFilter.ALL); + filterCombo.addSelectionHandler(new SelectionHandler() { + @Override + public void onSelection(SelectionEvent event) { + onFilterChange(event.getSelectedItem()); + } + }); + filterCombo.addValueChangeHandler(new ValueChangeHandler() { + @Override + public void onValueChange(ValueChangeEvent event) { + onFilterChange(event.getValue()); + } + }); + } + + private void onFilterChange(AnalysisFilter af) { + switch (af) { + case ALL: + onShowAllSelected(); + break; + case SHARED_WITH_ME: + break; + + case MY_ANALYSES: + break; + } } @UiFactory @@ -111,13 +153,15 @@ AnalysisSearchField createSearchField() { @Override public void filterByAnalysisId(String analysisId, String name) { searchField.filterByAnalysisId(analysisId, name); - showAllTb.enable(); + //reset filter. Users need to set Filter to ALL to go back... + filterCombo.setValue(null); } @Override public void filterByParentAnalysisId(String analysisId) { searchField.filterByParentId(analysisId); - showAllTb.enable(); + //reset filter. Users need to set Filter to ALL to go back... + filterCombo.setValue(null); } @Override @@ -130,6 +174,7 @@ public void onSelectionChanged(SelectionChangedEvent event) { final boolean canCancelSelection = canCancelSelection(currentSelection); final boolean canDeleteSelection = canDeleteSelection(currentSelection); boolean isOwner = isOwner(currentSelection); + boolean isShare = isSharable(currentSelection); boolean goToFolderEnabled, viewParamsEnabled, relaunchEnabled, cancelEnabled, deleteEnabled; boolean renameEnabled, updateCommentsEnabled, shareEnabled; @@ -155,7 +200,7 @@ public void onSelectionChanged(SelectionChangedEvent event) { renameEnabled = isOwner; updateCommentsEnabled = isOwner; - shareEnabled = isOwner; + shareEnabled = isOwner && isShare; break; default: @@ -165,7 +210,7 @@ public void onSelectionChanged(SelectionChangedEvent event) { relaunchEnabled = false; cancelEnabled = canCancelSelection && isOwner; deleteEnabled = canDeleteSelection && isOwner; - shareEnabled = isOwner; + shareEnabled = isOwner && isShare; renameEnabled = false; updateCommentsEnabled = false; } @@ -193,6 +238,18 @@ private boolean isOwner(List selection) { return true; } + private boolean isSharable(List selection) { + for (Analysis a : selection) { + if (!(a.getStatus().equals(AnalysisExecutionStatus.COMPLETED.toString()) || a.getStatus() + .equals(AnalysisExecutionStatus.FAILED + .toString()))) { + return false; + } + + } + return true; + } + @Override protected void onEnsureDebugId(String baseID) { super.onEnsureDebugId(baseID); @@ -271,15 +328,15 @@ boolean canDeleteSelection(List selection) { } // - @UiHandler("searchField") +/* @UiHandler("searchField") void searchFieldKeyUp(KeyUpEvent event){ if (Strings.isNullOrEmpty(searchField.getCurrentValue())) { // disable show all since an empty search field would fire load all. - showAllTb.disable(); + showTb.disable(); } else { - showAllTb.enable(); + showTb.enable(); } - } + }*/ @UiHandler("cancelMI") void onCancelSelected(SelectionEvent event) { @@ -398,14 +455,10 @@ void onRefreshSelected(SelectEvent event) { presenter.onRefreshSelected(); } - @UiHandler("showAllTb") - void onShowAllSelected(SelectEvent event) { + void onShowAllSelected() { searchField.clear(); - showAllTb.setEnabled(false); presenter.onShowAllSelected(); } - // - @UiHandler("shareCollabMI") void onShareSelected(SelectionEvent event) { diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.ui.xml b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.ui.xml index edd9773d7..2463d2206 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.ui.xml +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.ui.xml @@ -3,6 +3,7 @@ xmlns:menu="urn:import:com.sencha.gxt.widget.core.client.menu" xmlns:toolbar="urn:import:com.sencha.gxt.widget.core.client.toolbar" xmlns:button="urn:import:com.sencha.gxt.widget.core.client.button" + xmlns:form="urn:import:com.sencha.gxt.widget.core.client.form" xmlns:search="urn:import:org.iplantc.de.analysis.client.views.widget"> - + + + + + + + resourceUserPermissionsList(); + + AutoBean resourceUserPermissions(); +} diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisSharingRequest.java b/ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisSharingRequest.java new file mode 100644 index 000000000..baf45dea6 --- /dev/null +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisSharingRequest.java @@ -0,0 +1,22 @@ +package org.iplantc.de.client.models.analysis.sharing; + +import com.google.web.bindery.autobean.shared.AutoBean; + +import java.util.List; + +/** + * Created by sriram on 3/8/16. + */ +public interface AnalysisSharingRequest { + + String getUser(); + + @AutoBean.PropertyName("analyses") + List getAnalysisPermissions(); + + void setUser(String user); + + @AutoBean.PropertyName("analyses") + void setAnalysisPermissions(List appPerms); + +} diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisSharingRequestList.java b/ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisSharingRequestList.java new file mode 100644 index 000000000..b6e21c969 --- /dev/null +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisSharingRequestList.java @@ -0,0 +1,17 @@ +package org.iplantc.de.client.models.analysis.sharing; + +import com.google.web.bindery.autobean.shared.AutoBean; + +import java.util.List; + +/** + * Created by sriram on 3/8/16. + */ +public interface AnalysisSharingRequestList { + @AutoBean.PropertyName("sharing") + List getAnalysisSharingRequestList(); + + @AutoBean.PropertyName("sharing") + void setAnalysisSharingRequestList(List sharinglist); + +} diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisUnsharingRequest.java b/ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisUnsharingRequest.java new file mode 100644 index 000000000..59bd580ec --- /dev/null +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisUnsharingRequest.java @@ -0,0 +1,20 @@ +package org.iplantc.de.client.models.analysis.sharing; + +import com.google.web.bindery.autobean.shared.AutoBean; + +import java.util.List; + +/** + * Created by sriram on 3/8/16. + */ +public interface AnalysisUnsharingRequest { + void setUser(String user); + + @AutoBean.PropertyName("analyses") + void setAnalyses(List analyses); + + String getUser(); + + @AutoBean.PropertyName("analyses") + List getAnalyses(); +} diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisUnsharingRequestList.java b/ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisUnsharingRequestList.java new file mode 100644 index 000000000..691dd7478 --- /dev/null +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisUnsharingRequestList.java @@ -0,0 +1,16 @@ +package org.iplantc.de.client.models.analysis.sharing; + +import com.google.web.bindery.autobean.shared.AutoBean; + +import java.util.List; + +/** + * Created by sriram on 3/8/16. + */ +public interface AnalysisUnsharingRequestList { + @AutoBean.PropertyName("unsharing") + List getAnalysisUnSharingRequestList(); + + @AutoBean.PropertyName("unsharing") + void setAnalysisUnSharingRequestList(List unsharinglist); +} diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisUserPermissions.java b/ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisUserPermissions.java new file mode 100644 index 000000000..d723bd1db --- /dev/null +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisUserPermissions.java @@ -0,0 +1,18 @@ +package org.iplantc.de.client.models.analysis.sharing; + +import org.iplantc.de.client.models.HasId; +import org.iplantc.de.client.models.sharing.UserPermission; + +import com.google.gwt.user.client.ui.HasName; +import com.google.web.bindery.autobean.shared.AutoBean; + +import java.util.List; + +/** + * Created by sriram on 3/8/16. + */ +public interface AnalysisUserPermissions extends HasId, HasName{ + + @AutoBean.PropertyName("permissions") + List getPermissions(); +} diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisUserPermissionsList.java b/ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisUserPermissionsList.java new file mode 100644 index 000000000..ba2c5d030 --- /dev/null +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisUserPermissionsList.java @@ -0,0 +1,13 @@ +package org.iplantc.de.client.models.analysis.sharing; + +import com.google.web.bindery.autobean.shared.AutoBean; + +import java.util.List; + +/** + * Created by sriram on 3/8/16. + */ +public interface AnalysisUserPermissionsList { + @AutoBean.PropertyName("analyses") + List getResourceUserPermissionsList(); +} diff --git a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/analyses/AnalysesMessages.properties b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/analyses/AnalysesMessages.properties index 93ba0ae0c..3a1c73488 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/analyses/AnalysesMessages.properties +++ b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/analyses/AnalysesMessages.properties @@ -1,7 +1,7 @@ gridEmptyText = No items to display viewParameters = Viewing parameters for {0} selectionCount = {0} item(s) -searchFieldEmptyText = Filter by Name or App +searchFieldEmptyText = Search by Name or App goToOutputFolder = Go to output folder viewParamLbl = View Parameters viewAnalysisStepInfo = View Analysis Info From da2112882ee2ca254a2fcd65c0953b4778b2921d Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Tue, 8 Mar 2016 16:12:45 -0700 Subject: [PATCH 120/183] CORE-7152 fix tests --- .../client/views/AnalysesToolBarImplTest.java | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/ui/de-lib/src/test/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImplTest.java b/ui/de-lib/src/test/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImplTest.java index 5855366bb..8bf4092e3 100644 --- a/ui/de-lib/src/test/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImplTest.java +++ b/ui/de-lib/src/test/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImplTest.java @@ -127,6 +127,7 @@ boolean canDeleteSelection(List selection) { when(mockAnalysis.getUserName()).thenReturn("user@iplantcollaborative.org"); when(mockUserInfo.getFullUsername()).thenReturn("user@iplantcollaborative.org"); when(mockSelectionEvent.getSelection()).thenReturn(Lists.newArrayList(mockAnalysis)); + when(mockAnalysis.getStatus()).thenReturn(COMPLETED.toString()); uut.onSelectionChanged(mockSelectionEvent); verify(goToFolderMiMock).setEnabled(eq(true)); @@ -157,7 +158,7 @@ boolean canDeleteSelection(List selection) { when(mockSelectionEvent.getSelection()).thenReturn(Lists.newArrayList(mockAnalysis)); when(mockAnalysis.getUserName()).thenReturn("user@iplantcollaborative.org"); when(mockUserInfo.getFullUsername()).thenReturn("user@iplantcollaborative.org"); - + when(mockAnalysis.getStatus()).thenReturn(COMPLETED.toString()); uut.onSelectionChanged(mockSelectionEvent); verify(goToFolderMiMock).setEnabled(eq(true)); @@ -183,11 +184,19 @@ boolean canDeleteSelection(List selection) { }; mockMenuItems(uut); final Analysis mockAnalysis = mock(Analysis.class); + final Analysis mockAnalysis2 = mock(Analysis.class); // Selected analysis' app is Enabled when(mockAnalysis.isAppDisabled()).thenReturn(false); - when(mockSelectionEvent.getSelection()).thenReturn(Lists.newArrayList(mockAnalysis, mock(Analysis.class))); + when(mockSelectionEvent.getSelection()).thenReturn(Lists.newArrayList(mockAnalysis, mockAnalysis2)); + when(mockAnalysis.getUserName()).thenReturn("user@iplantcollaborative.org"); when(mockUserInfo.getFullUsername()).thenReturn("user@iplantcollaborative.org"); + when(mockAnalysis.getStatus()).thenReturn(COMPLETED.toString()); + + when(mockAnalysis2.getUserName()).thenReturn("user@iplantcollaborative.org"); + when(mockUserInfo.getFullUsername()).thenReturn("user@iplantcollaborative.org"); + when(mockAnalysis2.getStatus()).thenReturn(FAILED.toString()); + uut.onSelectionChanged(mockSelectionEvent); verify(goToFolderMiMock).setEnabled(eq(false)); From 04366064b026ac6a983cdacb3ff14a4c34c56cd2 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Wed, 9 Mar 2016 13:21:04 -0700 Subject: [PATCH 121/183] Add :jvm-opts mirroring other services to dewey, so 'lein run' can pick up custom logging configs if desired. --- services/dewey/project.clj | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/services/dewey/project.clj b/services/dewey/project.clj index ed2d1d6cc..8e3b79a65 100644 --- a/services/dewey/project.clj +++ b/services/dewey/project.clj @@ -40,4 +40,5 @@ :resource-paths [] :profiles {:dev {:dependencies [[midje "1.6.3"]] :resource-paths ["dev-resource"]} - :uberjar {:aot :all}}) + :uberjar {:aot :all}} + :jvm-opts ["-Dlogback.configurationFile=/etc/iplant/de/logging/dewey-logging.xml"]) From 87674c24efe3aaadab45821eff6a1fd270b1afd4 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Wed, 9 Mar 2016 15:18:59 -0700 Subject: [PATCH 122/183] Force streadway/amqp version down to 56eb08f0d1f9d90936ab1ab4e20ee9b9bf45ce26 which is the last successful build. Alternatives may be advisable in the future. --- services/templeton/vendor/manifest | 4 +- .../src/github.com/streadway/amqp/README.md | 2 +- .../streadway/amqp/_examples/pubsub/pubsub.go | 2 +- .../src/github.com/streadway/amqp/channel.go | 11 +--- .../github.com/streadway/amqp/client_test.go | 65 +++---------------- 5 files changed, 13 insertions(+), 71 deletions(-) diff --git a/services/templeton/vendor/manifest b/services/templeton/vendor/manifest index b04f79d82..85495b029 100644 --- a/services/templeton/vendor/manifest +++ b/services/templeton/vendor/manifest @@ -16,7 +16,7 @@ { "importpath": "github.com/streadway/amqp", "repository": "https://github.com/streadway/amqp", - "revision": "b4f3ceab0337f013208d31348b578d83c0064744", + "revision": "56eb08f0d1f9d90936ab1ab4e20ee9b9bf45ce26", "branch": "master" }, { @@ -32,4 +32,4 @@ "branch": "v2" } ] -} \ No newline at end of file +} diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/README.md b/services/templeton/vendor/src/github.com/streadway/amqp/README.md index 7869af81e..c4291fb68 100644 --- a/services/templeton/vendor/src/github.com/streadway/amqp/README.md +++ b/services/templeton/vendor/src/github.com/streadway/amqp/README.md @@ -14,7 +14,7 @@ enhancements. # Goals -Provide a functional interface that closely represents the AMQP 0.9.1 model +Provide an functional interface that closely represents the AMQP 0.9.1 model targeted to RabbitMQ as a server. This includes the minimum necessary to interact the semantics of the protocol. diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/_examples/pubsub/pubsub.go b/services/templeton/vendor/src/github.com/streadway/amqp/_examples/pubsub/pubsub.go index 29b4a53a3..109af2406 100644 --- a/services/templeton/vendor/src/github.com/streadway/amqp/_examples/pubsub/pubsub.go +++ b/services/templeton/vendor/src/github.com/streadway/amqp/_examples/pubsub/pubsub.go @@ -91,7 +91,7 @@ func publish(sessions chan chan session, messages <-chan message) { running bool reading = messages pending = make(chan message, 1) - confirm = make(chan amqp.Confirmation, 1) + confirm = make(amqp.Confirmation, 1) ) for session := range sessions { diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/channel.go b/services/templeton/vendor/src/github.com/streadway/amqp/channel.go index 8976fa905..9cf93b4d2 100644 --- a/services/templeton/vendor/src/github.com/streadway/amqp/channel.go +++ b/services/templeton/vendor/src/github.com/streadway/amqp/channel.go @@ -197,15 +197,7 @@ func (me *Channel) sendOpen(msg message) (err error) { if content, ok := msg.(messageWithContent); ok { props, body := content.getContent() class, _ := content.id() - - // catch client max frame size==0 and server max frame size==0 - // set size to length of what we're trying to publish - var size int - if me.connection.Config.FrameSize > 0 { - size = me.connection.Config.FrameSize - frameHeaderSize - } else { - size = len(body) - } + size := me.connection.Config.FrameSize - frameHeaderSize if err = me.connection.send(&methodFrame{ ChannelId: me.id, @@ -223,7 +215,6 @@ func (me *Channel) sendOpen(msg message) (err error) { return } - // chunk body into size (max frame size - frame header size) for i, j := 0, size; i < len(body); i, j = j, j+size { if j > len(body) { j = len(body) diff --git a/services/templeton/vendor/src/github.com/streadway/amqp/client_test.go b/services/templeton/vendor/src/github.com/streadway/amqp/client_test.go index 23acc9744..2df12dae9 100644 --- a/services/templeton/vendor/src/github.com/streadway/amqp/client_test.go +++ b/services/templeton/vendor/src/github.com/streadway/amqp/client_test.go @@ -327,7 +327,6 @@ func TestConfirmMultipleOrdersDeliveryTags(t *testing.T) { // Single tag, plus multiple, should produce // 2, 1, 3, 4 srv.send(1, &basicAck{DeliveryTag: 2}) - srv.send(1, &basicAck{DeliveryTag: 1}) srv.send(1, &basicAck{DeliveryTag: 4, Multiple: true}) srv.recv(1, &basicPublish{}) @@ -356,12 +355,10 @@ func TestConfirmMultipleOrdersDeliveryTags(t *testing.T) { ch.Confirm(false) - go func() { - ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 1")}) - ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 2")}) - ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 3")}) - ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 4")}) - }() + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 1")}) + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 2")}) + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 3")}) + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 4")}) // received out of order, consumed in order for i, tag := range []uint64{1, 2, 3, 4} { @@ -370,12 +367,10 @@ func TestConfirmMultipleOrdersDeliveryTags(t *testing.T) { } } - go func() { - ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 5")}) - ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 6")}) - ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 7")}) - ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 8")}) - }() + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 5")}) + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 6")}) + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 7")}) + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 8")}) for i, tag := range []uint64{5, 6, 7, 8} { if ack := <-confirm; tag != ack.DeliveryTag { @@ -527,50 +522,6 @@ func TestPublishBodySliceIssue74(t *testing.T) { <-done } -// Should not panic when server and client have frame_size of 0 -func TestPublishZeroFrameSizeIssue161(t *testing.T) { - rwc, srv := newSession(t) - defer rwc.Close() - - const frameSize = 0 - const publishings = 1 - done := make(chan bool) - - go func() { - srv.connectionOpen() - srv.channelOpen(1) - - for i := 0; i < publishings; i++ { - srv.recv(1, &basicPublish{}) - } - - done <- true - }() - - cfg := defaultConfig() - cfg.FrameSize = frameSize - - c, err := Open(rwc, cfg) - - // override the tuned framesize with a hard 0, as would happen when rabbit is configured with 0 - c.Config.FrameSize = frameSize - - if err != nil { - t.Fatalf("could not create connection: %v (%s)", c, err) - } - - ch, err := c.Channel() - if err != nil { - t.Fatalf("could not open channel: %v (%s)", ch, err) - } - - for i := 0; i < publishings; i++ { - go ch.Publish("", "q", false, false, Publishing{Body: []byte("anything")}) - } - - <-done -} - func TestPublishAndShutdownDeadlockIssue84(t *testing.T) { rwc, srv := newSession(t) defer rwc.Close() From 00adc08cfde5b4e74d582659a288bbee58f3e3d4 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Thu, 10 Mar 2016 10:21:28 -0700 Subject: [PATCH 123/183] Coalesce attribute, value, and unit to blank string to properly handle nulls. --- services/templeton/src/templeton/database/database.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/services/templeton/src/templeton/database/database.go b/services/templeton/src/templeton/database/database.go index 4ce2378fb..e17792b02 100644 --- a/services/templeton/src/templeton/database/database.go +++ b/services/templeton/src/templeton/database/database.go @@ -66,9 +66,9 @@ func avuRecordFromRow(row *sql.Rows) (*model.AVURecord, error) { const _selectAVU = ` SELECT cast(id as varchar), - attribute, - value, - unit, + coalesce(attribute, ''), + coalesce(value, ''), + coalesce(unit, ''), cast(target_id as varchar), cast(target_type as varchar), created_by, From 8c1ba521af28b44930aff2ca4c0553406b20ff2f Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Thu, 10 Mar 2016 10:43:07 -0700 Subject: [PATCH 124/183] Bump version of langohr in info-typer. Our other clojure+AMQP projects use versions older than 3.0.0 and need code changes, but not here. This primarily pulls in updates to dependencies. --- services/info-typer/project.clj | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/info-typer/project.clj b/services/info-typer/project.clj index afe3f1f96..a9b6bef61 100644 --- a/services/info-typer/project.clj +++ b/services/info-typer/project.clj @@ -15,7 +15,7 @@ :manifest {"Git-Ref" ~(git-ref)} :uberjar-name "info-typer-standalone.jar" :dependencies [[org.clojure/clojure "1.7.0"] - [com.novemberain/langohr "3.1.0"] + [com.novemberain/langohr "3.5.1"] [me.raynes/fs "1.4.6"] [org.iplantc/clj-jargon "5.2.5.0" :exclusions [[org.slf4j/slf4j-log4j12] From c63cfb8394f30ba03c78cc9a42f49e9648e88de9 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Thu, 10 Mar 2016 11:01:23 -0700 Subject: [PATCH 125/183] Update dewey's langohr to 3.5.1 (plus requisite code changes) --- services/dewey/project.clj | 2 +- services/dewey/src/dewey/amq.clj | 30 +++++++++++++++--------------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/services/dewey/project.clj b/services/dewey/project.clj index 8e3b79a65..6944ef51e 100644 --- a/services/dewey/project.clj +++ b/services/dewey/project.clj @@ -25,7 +25,7 @@ [com.fasterxml.jackson.core/jackson-databind] [com.fasterxml.jackson.core/jackson-core]]] [clojurewerkz/elastisch "2.0.0"] - [com.novemberain/langohr "2.11.0"] + [com.novemberain/langohr "3.5.1"] [liberator "0.11.1"] [compojure "1.1.8"] [ring "1.4.0"] diff --git a/services/dewey/src/dewey/amq.clj b/services/dewey/src/dewey/amq.clj index 82eb68c8f..330ffe0a9 100644 --- a/services/dewey/src/dewey/amq.clj +++ b/services/dewey/src/dewey/amq.clj @@ -28,22 +28,22 @@ [connection queue exchange-name exchange-durable exchange-auto-delete qos topics delivery-fn] (let [channel (lch/open connection) consumer (lc/create-default channel - :handle-consume-ok-fn (fn [_] (log/info "Registered with AMQP broker")) - :handle-delivery-fn delivery-fn - :handle-cancel-fn (fn [_] (log/info "AMQP broker registration canceled") - (Thread/sleep 1000) - (consume connection - queue - exchange-name - exchange-durable - exchange-auto-delete - topics - delivery-fn)))] + {:handle-consume-ok-fn (fn [_] (log/info "Registered with AMQP broker")) + :handle-delivery-fn delivery-fn + :handle-cancel-fn (fn [_] (log/info "AMQP broker registration canceled") + (Thread/sleep 1000) + (consume connection + queue + exchange-name + exchange-durable + exchange-auto-delete + topics + delivery-fn))})] (lb/qos channel qos) - (le/topic channel exchange-name :durable exchange-durable :auto-delete exchange-auto-delete) - (lq/declare channel queue :durable true :auto-delete false :exclusive false) - (doseq [topic topics] (lq/bind channel queue exchange-name :routing-key topic)) - (lb/consume channel queue consumer :auto-ack false))) + (le/topic channel exchange-name {:durable exchange-durable :auto-delete exchange-auto-delete}) + (lq/declare channel queue {:durable true :auto-delete false :exclusive false}) + (doseq [topic topics] (lq/bind channel queue exchange-name {:routing-key topic})) + (lb/consume channel queue consumer {:auto-ack false}))) (defn attach-to-exchange From 53f4331ee5ec5d7b438af962c357d3f6882e9130 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Thu, 10 Mar 2016 11:07:32 -0700 Subject: [PATCH 126/183] Update Infosquito's langohr to 3.5.1 (plus requisite code changes) --- services/Infosquito/project.clj | 2 +- services/Infosquito/src/infosquito/messages.clj | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/services/Infosquito/project.clj b/services/Infosquito/project.clj index 2c8acfeff..b396e20ae 100644 --- a/services/Infosquito/project.clj +++ b/services/Infosquito/project.clj @@ -26,7 +26,7 @@ [com.fasterxml.jackson.core/jackson-databind] [com.fasterxml.jackson.core/jackson-core]]] [clojurewerkz/elastisch "2.0.0"] - [com.novemberain/langohr "2.11.0"] + [com.novemberain/langohr "3.5.1"] [slingshot "0.10.3"] [me.raynes/fs "1.4.6"] [org.iplantc/clojure-commons "5.2.5.0"] diff --git a/services/Infosquito/src/infosquito/messages.clj b/services/Infosquito/src/infosquito/messages.clj index 3bfb33920..30c459c4c 100644 --- a/services/Infosquito/src/infosquito/messages.clj +++ b/services/Infosquito/src/infosquito/messages.clj @@ -48,11 +48,11 @@ (defn- declare-queue [ch exchange queue-name] (lq/declare ch queue-name - :durable true - :auto-delete false - :exclusive false) + {:durable true + :auto-delete false + :exclusive false}) (doseq [key ["index.all" "index.data"]] - (lq/bind ch queue-name exchange :routing-key key))) + (lq/bind ch queue-name exchange {:routing-key key}))) (defn- reindex-handler [props ch {:keys [delivery-tag]} _] @@ -70,8 +70,8 @@ (let [exchange (cfg/get-amqp-exchange-name props) queue-name (cfg/get-amqp-reindex-queue props)] (le/direct ch exchange - :durable (cfg/amqp-exchange-durable? props) - :auto-delete (cfg/amqp-exchange-auto-delete? props)) + {:durable (cfg/amqp-exchange-durable? props) + :auto-delete (cfg/amqp-exchange-auto-delete? props)}) (declare-queue ch exchange queue-name) (lc/blocking-subscribe ch queue-name (partial reindex-handler props)))) From 31d9e95a3a36ab09b6d121d48268089bd1dec685 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Thu, 10 Mar 2016 12:30:34 -0700 Subject: [PATCH 127/183] Update monkey's langohr to 3.5.1 (plus requisite code changes) --- services/monkey/project.clj | 2 +- services/monkey/src/monkey/messenger.clj | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/services/monkey/project.clj b/services/monkey/project.clj index 9303a25be..d1cfd63f1 100644 --- a/services/monkey/project.clj +++ b/services/monkey/project.clj @@ -21,7 +21,7 @@ [postgresql "9.1-901-1.jdbc4"] [org.clojure/java.jdbc "0.3.5"] [clojurewerkz/elastisch "2.0.0"] - [com.novemberain/langohr "2.11.0"] + [com.novemberain/langohr "3.5.1"] [me.raynes/fs "1.4.6"] [slingshot "0.10.3"] [org.iplantc/clojure-commons "5.2.5.0"] diff --git a/services/monkey/src/monkey/messenger.clj b/services/monkey/src/monkey/messenger.clj index 7b0d91630..2091bc566 100644 --- a/services/monkey/src/monkey/messenger.clj +++ b/services/monkey/src/monkey/messenger.clj @@ -42,11 +42,11 @@ (let [exchange (props/amqp-exchange-name props) queue (props/amqp-queue props)] (exchange/direct ch exchange - :durable (props/amqp-exchange-durable? props) - :auto-delete (props/amqp-exchange-auto-delete? props)) - (queue/declare ch queue :durable true) + {:durable (props/amqp-exchange-durable? props) + :auto-delete (props/amqp-exchange-auto-delete? props)}) + (queue/declare ch queue {:durable true}) (doseq [key ["index.all" "index.tags"]] - (queue/bind ch queue exchange :routing-key key)) + (queue/bind ch queue exchange {:routing-key key})) queue)) From 94f44e127ef67ba1e2600b86c67637540a6e72c9 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Thu, 10 Mar 2016 14:09:00 -0700 Subject: [PATCH 128/183] Modernize dewey's config to use defprop etc. --- services/dewey/src/dewey/config.clj | 115 ++++++++++++++++++++++++++++ services/dewey/src/dewey/core.clj | 81 ++++++++------------ 2 files changed, 149 insertions(+), 47 deletions(-) create mode 100644 services/dewey/src/dewey/config.clj diff --git a/services/dewey/src/dewey/config.clj b/services/dewey/src/dewey/config.clj new file mode 100644 index 000000000..9ec85a8f8 --- /dev/null +++ b/services/dewey/src/dewey/config.clj @@ -0,0 +1,115 @@ +(ns dewey.config + (:use [slingshot.slingshot :only [throw+]]) + (:require [clojure-commons.config :as cc] + [clojure-commons.error-codes :as ce])) + +(def ^:private props (ref nil)) +(def ^:private config-valid (ref true)) +(def ^:private configs (ref [])) + +(cc/defprop-str environment-name + "The name of the deployment environment this is part of." + [props config-valid configs] + "dewey.environment-name") + +(cc/defprop-str amqp-host + "The hostname for the AMQP server" + [props config-valid configs] + "dewey.amqp.host") + +(cc/defprop-int amqp-port + "The port number for the AMQP server" + [props config-valid configs] + "dewey.amqp.port") + +(cc/defprop-str amqp-user + "The username for the AMQP server" + [props config-valid configs] + "dewey.amqp.user") + +(cc/defprop-str amqp-pass + "The password for the AMQP user" + [props config-valid configs] + "dewey.amqp.password") + +(cc/defprop-str amqp-exchange + "The exchange name for the AMQP server" + [props config-valid configs] + "dewey.amqp.exchange.name") + +(cc/defprop-boolean amqp-exchange-durable + "Whether the AMQP exchange is durable" + [props config-valid configs] + "dewey.amqp.exchange.durable") + +(cc/defprop-boolean amqp-exchange-autodelete + "Whether the AMQP exchange is auto-delete" + [props config-valid configs] + "dewey.amqp.exchange.auto-delete") + +(cc/defprop-int amqp-qos + "How many messages to prefetch from the AMQP queue." + [props config-valid configs] + "dewey.amqp.qos") + +(cc/defprop-str es-host + "The hostname for the Elasticsearch server" + [props config-valid configs] + "dewey.es.host") + +(cc/defprop-int es-port + "The port number for the Elasticsearch server" + [props config-valid configs] + "dewey.es.port") + +(cc/defprop-str irods-host + "The hostname for the iRODS server" + [props config-valid configs] + "dewey.irods.host") + +(cc/defprop-int irods-port + "The port number for the iRODS server" + [props config-valid configs] + "dewey.irods.port") + +(cc/defprop-str irods-zone + "The zone name for the iRODS server" + [props config-valid configs] + "dewey.irods.zone") + +(cc/defprop-str irods-user + "The username for the iRODS server" + [props config-valid configs] + "dewey.irods.user") + +(cc/defprop-str irods-pass + "The password for the iRODS user" + [props config-valid configs] + "dewey.irods.password") + +(cc/defprop-optstr irods-default-resource + "The default resource to use with the iRODS server. Probably blank." + [props config-valid configs] + "dewey.irods.default-resource" + "") + +(cc/defprop-str irods-home + "The base home directory for the iRODS server." + [props config-valid configs] + "dewey.irods.home") + +(cc/defprop-int listen-port + "The port number to listen on for status requests." + [props config-valid configs] + "dewey.status.listen-port") + +(defn- validate-config + [] + (when-not (cc/validate-config configs config-valid) + (throw+ {:error_code ce/ERR_CONFIG_INVALID}))) + +(defn load-config-from-file + [cfg-path] + (cc/load-config-from-file cfg-path props) + (cc/log-config props :filters [#"(irods|amqp)\.(user|pass)"]) + (validate-config)) diff --git a/services/dewey/src/dewey/core.clj b/services/dewey/src/dewey/core.clj index 8d637dd71..284593db3 100644 --- a/services/dewey/src/dewey/core.clj +++ b/services/dewey/src/dewey/core.clj @@ -9,6 +9,7 @@ [dewey.amq :as amq] [dewey.curation :as curation] [dewey.status :as status] + [dewey.config :as cfg] [common-cli.core :as ccli] [me.raynes.fs :as fs] [service-logging.thread-context :as tc]) @@ -18,8 +19,8 @@ (defn- init-es "Establishes a connection to elasticsearch" - [props] - (let [url (URL. "http" (get props "dewey.es.host") (Integer. (get props "dewey.es.port")) "") + [] + (let [url (URL. "http" (cfg/es-host) (cfg/es-port) "") conn (try (es/connect(str url)) (catch Exception e @@ -32,72 +33,55 @@ (do (log/info "Failed to find elasticsearch. Retrying...") (Thread/sleep 1000) - (recur props))))) + (recur))))) (defn- init-irods - [props] - (irods/init (get props "dewey.irods.host") - (get props "dewey.irods.port") - (get props "dewey.irods.user") - (get props "dewey.irods.password") - (get props "dewey.irods.home") - (get props "dewey.irods.zone") - (get props "dewey.irods.default-resource"))) + [] + (irods/init (cfg/irods-host) + (cfg/irods-port) + (cfg/irods-user) + (cfg/irods-pass) + (cfg/irods-home) + (cfg/irods-zone) + (cfg/irods-default-resource))) (defn- listen - [props irods-cfg es] + [irods-cfg es] (let [attached? (try - (amq/attach-to-exchange (get props "dewey.amqp.host") - (Integer. (get props "dewey.amqp.port")) - (get props "dewey.amqp.user") - (get props "dewey.amqp.password") - (str "indexing." (get props "dewey.environment-name")) - (get props "dewey.amqp.exchange.name") - (Boolean. (get props "dewey.amqp.exchange.durable")) - (Boolean. (get props "dewey.amqp.exchange.auto-delete")) - (Integer. (get props "dewey.amqp.qos")) + (amq/attach-to-exchange (cfg/amqp-host) + (cfg/amqp-port) + (cfg/amqp-user) + (cfg/amqp-pass) + (str "indexing." (cfg/environment-name)) + (cfg/amqp-exchange) + (cfg/amqp-exchange-durable) + (cfg/amqp-exchange-autodelete) + (cfg/amqp-qos) (partial curation/consume-msg irods-cfg es) "data-object.#" "collection.#") (log/info "Attached to the AMQP broker.") true (catch Exception e - (log/debug (str e)) - (log/info "Failed to attach to the AMQP broker. Retrying...") + (log/info e "Failed to attach to the AMQP broker. Retrying...") false))] (when-not attached? (Thread/sleep 1000) - (recur props irods-cfg es)))) + (recur irods-cfg es)))) (defn- listen-for-status - [props] + [] (.start (Thread. - (partial status/start-jetty (Integer/parseInt (get props "dewey.status.listen-port")))))) - - -(defn- update-props - [load-props props] - (let [props-ref (ref props)] - (try+ - (load-props props-ref) - (catch Object _ - (log/error "Failed to load configuration parameters."))) - (when (.isEmpty @props-ref) - (throw+ {:type :cfg-problem :msg "Don't have any configuration parameters."})) - (when-not (= props @props-ref) - (config/log-config props-ref)) - @props-ref)) - + (partial status/start-jetty (cfg/listen-port))))) (defn- run - [props-loader] - (let [props (update-props props-loader (Properties.))] - (listen-for-status props) - (listen props (init-irods props) (init-es props)))) + [] + (listen-for-status) + (listen (init-irods) (init-es))) (def svc-info @@ -122,7 +106,10 @@ (try+ (let [{:keys [options arguments errors summary]} (ccli/handle-args svc-info args cli-options)] (when-not (fs/exists? (:config options)) - (ccli/exit 1 (str "The config file does not exist."))) - (run (partial config/load-config-from-file (:config options)))) + (ccli/exit 1 "The config file does not exist.")) + (when-not (fs/readable? (:config options)) + (ccli/exit 1 "The config file is not readable.")) + (cfg/load-config-from-file (:config options)) + (run)) (catch Object _ (log/error (:throwable &throw-context) "UNEXPECTED ERROR - EXITING"))))) From 178a8c9dea9f417f5898f924b58270936ee2c705 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Wed, 9 Mar 2016 18:01:34 -0700 Subject: [PATCH 129/183] CORE-7558: added the GET /data/:data-id/permissions endpoint to data-info --- .../data-info/src/data_info/routes/data.clj | 13 +++++++- .../src/data_info/routes/domain/common.clj | 1 + .../domain/{users.clj => permissions.clj} | 10 +++--- .../src/data_info/routes/domain/stats.clj | 1 - .../data-info/src/data_info/routes/users.clj | 2 +- .../src/data_info/services/permissions.clj | 31 +++++++++++++++++++ .../src/data_info/services/users.clj | 12 ++----- 7 files changed, 53 insertions(+), 17 deletions(-) rename services/data-info/src/data_info/routes/domain/{users.clj => permissions.clj} (63%) create mode 100644 services/data-info/src/data_info/services/permissions.clj diff --git a/services/data-info/src/data_info/routes/data.clj b/services/data-info/src/data_info/routes/data.clj index e5890674d..685dd57bf 100644 --- a/services/data-info/src/data_info/routes/data.clj +++ b/services/data-info/src/data_info/routes/data.clj @@ -2,6 +2,7 @@ (:use [common-swagger-api.schema] [data-info.routes.domain.common] [data-info.routes.domain.data] + [data-info.routes.domain.permissions] [data-info.routes.domain.stats]) (:require [data-info.services.create :as create] [data-info.services.metadata :as meta] @@ -9,6 +10,7 @@ [data-info.services.write :as write] [data-info.services.page-file :as page-file] [data-info.services.page-tabular :as page-tabular] + [data-info.services.permissions :as perms] [data-info.util.config :as cfg] [clojure-commons.error-codes :as ce] [data-info.util.service :as svc] @@ -130,4 +132,13 @@ with characters in a runtime-configurable parameter. Currently, this parameter l "ERR_INVALID_JSON, ERR_EXISTS, ERR_DOES_NOT_EXIST, ERR_NOT_READABLE," "ERR_NOT_WRITEABLE, ERR_NOT_A_USER, ERR_BAD_PATH_LENGTH, ERR_BAD_DIRNAME_LENGTH," "ERR_BAD_BASENAME_LENGTH, ERR_TOO_MANY_RESULTS")) - (svc/trap uri meta/do-metadata-save data-id params body))))) + (svc/trap uri meta/do-metadata-save data-id params body)) + + (GET* "/permissions" [:as {uri :uri}] + :query [params StandardUserQueryParams] + :return DataItemPermissionsResponse + :summary "Lists Data Item Permissions" + :description (str + "Lists permissions for a data item." + (get-error-code-block "ERR_DOES_NOT_EXIST, ERR_NOT_READABLE, ERR_NOT_A_USER")) + (svc/trap uri perms/list-permissions params data-id))))) diff --git a/services/data-info/src/data_info/routes/domain/common.clj b/services/data-info/src/data_info/routes/domain/common.clj index 8c5cd555a..b16d5f98a 100644 --- a/services/data-info/src/data_info/routes/domain/common.clj +++ b/services/data-info/src/data_info/routes/domain/common.clj @@ -25,3 +25,4 @@ (def ValidInfoTypesEnum (apply s/enum (hm/supported-formats))) (def ValidInfoTypesEnumPlusBlank (apply s/enum (conj (hm/supported-formats) ""))) +(def PermissionEnum (s/enum :read :write :own)) diff --git a/services/data-info/src/data_info/routes/domain/users.clj b/services/data-info/src/data_info/routes/domain/permissions.clj similarity index 63% rename from services/data-info/src/data_info/routes/domain/users.clj rename to services/data-info/src/data_info/routes/domain/permissions.clj index 15f54e592..e55926981 100644 --- a/services/data-info/src/data_info/routes/domain/users.clj +++ b/services/data-info/src/data_info/routes/domain/permissions.clj @@ -1,9 +1,8 @@ -(ns data-info.routes.domain.users - (:use [common-swagger-api.schema :only [describe]]) +(ns data-info.routes.domain.permissions + (:use [common-swagger-api.schema :only [describe]] + [data-info.routes.domain.common :only [PermissionEnum]]) (:require [schema.core :as s])) -(def PermissionEnum (s/enum :read :write :own)) - (s/defschema UserPermission {:user (describe String "The user's short username") :permission (describe PermissionEnum "The user's level of permission")}) @@ -14,3 +13,6 @@ (s/defschema PermissionsResponse {:paths (describe [PermissionsEntry] "An array of objects describing files and their permissions")}) + +(s/defschema DataItemPermissionsResponse + {:permissions (describe [UserPermission] "An array of objects describing user permissions.")}) diff --git a/services/data-info/src/data_info/routes/domain/stats.clj b/services/data-info/src/data_info/routes/domain/stats.clj index 4329fcff1..187a8557b 100644 --- a/services/data-info/src/data_info/routes/domain/stats.clj +++ b/services/data-info/src/data_info/routes/domain/stats.clj @@ -5,7 +5,6 @@ (:import [java.util UUID])) (def DataTypeEnum (s/enum :file :dir)) -(def PermissionEnum (s/enum :read :write :own)) (def DataItemIdParam (describe UUID "The UUID of this data item")) (def DataItemPathParam (describe NonBlankString "The IRODS paths to this data item")) diff --git a/services/data-info/src/data_info/routes/users.clj b/services/data-info/src/data_info/routes/users.clj index 848348948..d3aa032d2 100644 --- a/services/data-info/src/data_info/routes/users.clj +++ b/services/data-info/src/data_info/routes/users.clj @@ -1,7 +1,7 @@ (ns data-info.routes.users (:use [common-swagger-api.schema] [data-info.routes.domain.common] - [data-info.routes.domain.users]) + [data-info.routes.domain.permissions]) (:require [data-info.services.users :as users] [data-info.util.service :as svc])) diff --git a/services/data-info/src/data_info/services/permissions.clj b/services/data-info/src/data_info/services/permissions.clj new file mode 100644 index 000000000..87bee978b --- /dev/null +++ b/services/data-info/src/data_info/services/permissions.clj @@ -0,0 +1,31 @@ +(ns data-info.services.permissions + (:use [clj-jargon.init :only [with-jargon]]) + (:require [clojure.tools.logging :as log] + [dire.core :refer [with-pre-hook! with-post-hook!]] + [clj-jargon.permissions :as perm] + [clojure-commons.file-utils :as ft] + [data-info.services.uuids :as uuids] + [data-info.util.config :as cfg] + [data-info.util.logging :as dul] + [data-info.util.validators :as validators])) + +(defn filtered-user-perms + [cm user abspath] + (let [filtered-users (set (conj (cfg/perms-filter) user (cfg/irods-user)))] + (filter + #(not (contains? filtered-users (:user %1))) + (perm/list-user-perm cm abspath)))) + +(defn list-permissions + [{:keys [user]} data-id] + (with-jargon (cfg/jargon-cfg) [cm] + (let [path (ft/rm-last-slash (:path (uuids/path-for-uuid user data-id)))] + (validators/user-exists cm user) + (validators/path-readable cm user path) + {:permissions (filtered-user-perms cm user path)}))) + +(with-pre-hook! #'list-permissions + (fn [params data-id] + (dul/log-call "list-permissions" params data-id))) + +(with-post-hook! #'list-permissions (dul/log-func "list-permissions")) diff --git a/services/data-info/src/data_info/services/users.clj b/services/data-info/src/data_info/services/users.clj index fdb31161d..5db03a822 100644 --- a/services/data-info/src/data_info/services/users.clj +++ b/services/data-info/src/data_info/services/users.clj @@ -2,23 +2,15 @@ (:use [clj-jargon.init :only [with-jargon]]) (:require [clojure.tools.logging :as log] [dire.core :refer [with-pre-hook! with-post-hook!]] - [clj-jargon.permissions :as perm] + [data-info.services.permissions :as perms] [data-info.util.config :as cfg] [data-info.util.logging :as dul] [data-info.util.validators :as validators])) -(defn- filtered-user-perms - [cm user abspath] - (let [filtered-users (set (conj (cfg/perms-filter) user (cfg/irods-user)))] - (filter - #(not (contains? filtered-users (:user %1))) - (perm/list-user-perm cm abspath)))) - - (defn- list-perm [cm user abspath] {:path abspath - :user-permissions (filtered-user-perms cm user abspath)}) + :user-permissions (perms/filtered-user-perms cm user abspath)}) (defn- list-perms [user abspaths] From 2d78814d9d053258446fcfe10f9c9b204649565b Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Thu, 10 Mar 2016 15:01:39 -0700 Subject: [PATCH 130/183] CORE-7558: slightly altered the response body of the GET /data/:data-id/permissions endpoint and moved the definition of the endpoint to a new file --- services/data-info/src/data_info/routes.clj | 5 +++-- .../data-info/src/data_info/routes/data.clj | 13 +------------ .../data_info/routes/domain/permissions.clj | 2 +- .../routes/{users.clj => permissions.clj} | 19 +++++++++++++++++-- .../src/data_info/services/permissions.clj | 2 +- 5 files changed, 23 insertions(+), 18 deletions(-) rename services/data-info/src/data_info/routes/{users.clj => permissions.clj} (64%) diff --git a/services/data-info/src/data_info/routes.clj b/services/data-info/src/data_info/routes.clj index d8037d238..bd4ef3469 100644 --- a/services/data-info/src/data_info/routes.clj +++ b/services/data-info/src/data_info/routes.clj @@ -11,7 +11,7 @@ [data-info.routes.data :as data-routes] [data-info.routes.exists :as exists-routes] [data-info.routes.filetypes :as filetypes-routes] - [data-info.routes.users :as users-routes] + [data-info.routes.permissions :as permission-routes] [data-info.routes.navigation :as navigation-routes] [data-info.routes.rename :as rename-routes] [data-info.routes.sharing :as sharing-routes] @@ -51,7 +51,8 @@ avus-routes/avus-routes exists-routes/existence-marker filetypes-routes/filetypes-operations - users-routes/permissions-gatherer + permission-routes/permissions-gatherer + permission-routes/data-item-permissions navigation-routes/navigation stat-routes/stat-gatherer sharing-routes/sharing-routes diff --git a/services/data-info/src/data_info/routes/data.clj b/services/data-info/src/data_info/routes/data.clj index 685dd57bf..e5890674d 100644 --- a/services/data-info/src/data_info/routes/data.clj +++ b/services/data-info/src/data_info/routes/data.clj @@ -2,7 +2,6 @@ (:use [common-swagger-api.schema] [data-info.routes.domain.common] [data-info.routes.domain.data] - [data-info.routes.domain.permissions] [data-info.routes.domain.stats]) (:require [data-info.services.create :as create] [data-info.services.metadata :as meta] @@ -10,7 +9,6 @@ [data-info.services.write :as write] [data-info.services.page-file :as page-file] [data-info.services.page-tabular :as page-tabular] - [data-info.services.permissions :as perms] [data-info.util.config :as cfg] [clojure-commons.error-codes :as ce] [data-info.util.service :as svc] @@ -132,13 +130,4 @@ with characters in a runtime-configurable parameter. Currently, this parameter l "ERR_INVALID_JSON, ERR_EXISTS, ERR_DOES_NOT_EXIST, ERR_NOT_READABLE," "ERR_NOT_WRITEABLE, ERR_NOT_A_USER, ERR_BAD_PATH_LENGTH, ERR_BAD_DIRNAME_LENGTH," "ERR_BAD_BASENAME_LENGTH, ERR_TOO_MANY_RESULTS")) - (svc/trap uri meta/do-metadata-save data-id params body)) - - (GET* "/permissions" [:as {uri :uri}] - :query [params StandardUserQueryParams] - :return DataItemPermissionsResponse - :summary "Lists Data Item Permissions" - :description (str - "Lists permissions for a data item." - (get-error-code-block "ERR_DOES_NOT_EXIST, ERR_NOT_READABLE, ERR_NOT_A_USER")) - (svc/trap uri perms/list-permissions params data-id))))) + (svc/trap uri meta/do-metadata-save data-id params body))))) diff --git a/services/data-info/src/data_info/routes/domain/permissions.clj b/services/data-info/src/data_info/routes/domain/permissions.clj index e55926981..9e671254c 100644 --- a/services/data-info/src/data_info/routes/domain/permissions.clj +++ b/services/data-info/src/data_info/routes/domain/permissions.clj @@ -15,4 +15,4 @@ {:paths (describe [PermissionsEntry] "An array of objects describing files and their permissions")}) (s/defschema DataItemPermissionsResponse - {:permissions (describe [UserPermission] "An array of objects describing user permissions.")}) + (dissoc PermissionsEntry :path)) diff --git a/services/data-info/src/data_info/routes/users.clj b/services/data-info/src/data_info/routes/permissions.clj similarity index 64% rename from services/data-info/src/data_info/routes/users.clj rename to services/data-info/src/data_info/routes/permissions.clj index d3aa032d2..3ab124e07 100644 --- a/services/data-info/src/data_info/routes/users.clj +++ b/services/data-info/src/data_info/routes/permissions.clj @@ -1,11 +1,11 @@ -(ns data-info.routes.users +(ns data-info.routes.permissions (:use [common-swagger-api.schema] [data-info.routes.domain.common] [data-info.routes.domain.permissions]) (:require [data-info.services.users :as users] + [data-info.services.permissions :as perms] [data-info.util.service :as svc])) - (defroutes* permissions-gatherer (context* "/permissions-gatherer" [] @@ -23,3 +23,18 @@ (get-error-code-block "ERR_NOT_A_USER, ERR_DOES_NOT_EXIST, ERR_NOT_OWNER, ERR_NOT_READABLE")) (svc/trap uri users/do-user-permissions params body)))) + +(defroutes* data-item-permissions + + (context* "/data/:data-id" [] + :path-params [data-id :- DataIdPathParam] + :tags ["data-by-id"] + + (GET* "/permissions" [:as {uri :uri}] + :query [params StandardUserQueryParams] + :return DataItemPermissionsResponse + :summary "Lists Data Item Permissions" + :description (str + "Lists permissions for a data item." + (get-error-code-block "ERR_DOES_NOT_EXIST, ERR_NOT_READABLE, ERR_NOT_A_USER")) + (svc/trap uri perms/list-permissions params data-id)))) diff --git a/services/data-info/src/data_info/services/permissions.clj b/services/data-info/src/data_info/services/permissions.clj index 87bee978b..e4051ee11 100644 --- a/services/data-info/src/data_info/services/permissions.clj +++ b/services/data-info/src/data_info/services/permissions.clj @@ -22,7 +22,7 @@ (let [path (ft/rm-last-slash (:path (uuids/path-for-uuid user data-id)))] (validators/user-exists cm user) (validators/path-readable cm user path) - {:permissions (filtered-user-perms cm user path)}))) + {:user-permissions (filtered-user-perms cm user path)}))) (with-pre-hook! #'list-permissions (fn [params data-id] From c27b190f1312d3f860b019e44a7e34ec21ebbcc5 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Thu, 10 Mar 2016 15:34:58 -0700 Subject: [PATCH 131/183] CORE-7558: consolidated all of the routes defined in data-info.routes.permissions into a single defroutes* --- services/data-info/src/data_info/routes.clj | 3 +-- services/data-info/src/data_info/routes/permissions.clj | 6 ++---- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/services/data-info/src/data_info/routes.clj b/services/data-info/src/data_info/routes.clj index bd4ef3469..5c5a63761 100644 --- a/services/data-info/src/data_info/routes.clj +++ b/services/data-info/src/data_info/routes.clj @@ -51,8 +51,7 @@ avus-routes/avus-routes exists-routes/existence-marker filetypes-routes/filetypes-operations - permission-routes/permissions-gatherer - permission-routes/data-item-permissions + permission-routes/permissions-routes navigation-routes/navigation stat-routes/stat-gatherer sharing-routes/sharing-routes diff --git a/services/data-info/src/data_info/routes/permissions.clj b/services/data-info/src/data_info/routes/permissions.clj index 3ab124e07..56a3754c0 100644 --- a/services/data-info/src/data_info/routes/permissions.clj +++ b/services/data-info/src/data_info/routes/permissions.clj @@ -6,7 +6,7 @@ [data-info.services.permissions :as perms] [data-info.util.service :as svc])) -(defroutes* permissions-gatherer +(defroutes* permissions-routes (context* "/permissions-gatherer" [] :tags ["bulk"] @@ -22,9 +22,7 @@ " The owner permissions are found in the normal stat return, e.g. from /stat-gatherer." (get-error-code-block "ERR_NOT_A_USER, ERR_DOES_NOT_EXIST, ERR_NOT_OWNER, ERR_NOT_READABLE")) - (svc/trap uri users/do-user-permissions params body)))) - -(defroutes* data-item-permissions + (svc/trap uri users/do-user-permissions params body))) (context* "/data/:data-id" [] :path-params [data-id :- DataIdPathParam] From 2100837def6312549046b0c46124dc57c49bade6 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Thu, 10 Mar 2016 16:35:05 -0700 Subject: [PATCH 132/183] Update dockerfile for how it gets built therein (with -linux-amd64 suffix) --- services/templeton/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/templeton/Dockerfile b/services/templeton/Dockerfile index 0c9b0ee93..610a32d04 100644 --- a/services/templeton/Dockerfile +++ b/services/templeton/Dockerfile @@ -1,6 +1,6 @@ FROM jeanblanchard/alpine-glibc -ADD bin/templeton /bin/ +ADD bin/templeton-linux-amd64 /bin/templeton ARG git_commit=unknown ARG buildenv_git_commit=unknown From d1006929880008619712d30265ea9aff200ab438 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Thu, 10 Mar 2016 16:38:29 -0700 Subject: [PATCH 133/183] Add templeton to docker images and manifest images. --- docker/images.txt | 1 + docker/manifest-images.txt | 1 + 2 files changed, 2 insertions(+) diff --git a/docker/images.txt b/docker/images.txt index 6ce8184e5..dc5e8ccac 100644 --- a/docker/images.txt +++ b/docker/images.txt @@ -21,6 +21,7 @@ notificationagent porklock saved-searches sharkbait +templeton tree-urls user-preferences user-sessions diff --git a/docker/manifest-images.txt b/docker/manifest-images.txt index 964665818..36a6946f2 100644 --- a/docker/manifest-images.txt +++ b/docker/manifest-images.txt @@ -16,6 +16,7 @@ monkey notificationagent porklock saved-searches +templeton tree-urls user-preferences user-sessions From 79c4b83c1d263fdaa7e4aff5172373f9961cda09 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Thu, 10 Mar 2016 17:47:27 -0700 Subject: [PATCH 134/183] CORE-7558: added endpoints to grant and revoke access to a data item --- .../src/data_info/routes/permissions.clj | 39 +++++++++--- .../src/data_info/services/permissions.clj | 39 +++++++++++- .../src/data_info/services/sharing.clj | 60 +++++++++++++++++-- 3 files changed, 124 insertions(+), 14 deletions(-) diff --git a/services/data-info/src/data_info/routes/permissions.clj b/services/data-info/src/data_info/routes/permissions.clj index 56a3754c0..accd8fbf7 100644 --- a/services/data-info/src/data_info/routes/permissions.clj +++ b/services/data-info/src/data_info/routes/permissions.clj @@ -28,11 +28,34 @@ :path-params [data-id :- DataIdPathParam] :tags ["data-by-id"] - (GET* "/permissions" [:as {uri :uri}] - :query [params StandardUserQueryParams] - :return DataItemPermissionsResponse - :summary "Lists Data Item Permissions" - :description (str - "Lists permissions for a data item." - (get-error-code-block "ERR_DOES_NOT_EXIST, ERR_NOT_READABLE, ERR_NOT_A_USER")) - (svc/trap uri perms/list-permissions params data-id)))) + (context* "/permissions" [] + + (GET* "/" [:as {uri :uri}] + :query [params StandardUserQueryParams] + :return DataItemPermissionsResponse + :summary "List Data Item Permissions" + :description (str +"Lists permissions for a data item." +(get-error-code-block "ERR_DOES_NOT_EXIST, ERR_NOT_READABLE, ERR_NOT_A_USER")) + (svc/trap uri perms/list-permissions params data-id)) + + (PUT* "/:share-with/:permission" [:as {uri :uri}] + :path-params [share-with :- (describe NonBlankString "The user to grant permissions to.") + permission :- (describe PermissionEnum "The permission level to grant.")] + :query [params StandardUserQueryParams] + :return DataItemPermissionsResponse + :summary "Grant Data Item Permissions" + :description (str +"Grants access to a data item." +(get-error-code-block "ERR_DOES_NOT_EXIST, ERR_NOT_READABLE, ERR_NOT_A_USER", "ERR_NOT_OWNER")) + (svc/trap uri perms/add-permission params data-id share-with permission)) + + (DELETE* "/:unshare-with" [:as {uri :uri}] + :path-params [unshare-with :- (describe NonBlankString "The user whose permissions will be revoked.")] + :query [params StandardUserQueryParams] + :return DataItemPermissionsResponse + :summary "Revoke Data Item Permissions" + :description (str +"Revokes access to a data item." +(get-error-code-block "ERR_DOES_NOT_EXIST, ERR_NOT_READABLE, ERR_NOT_A_USER", "ERR_NOT_OWNER")) + (svc/trap uri perms/remove-permission params data-id unshare-with))))) diff --git a/services/data-info/src/data_info/services/permissions.clj b/services/data-info/src/data_info/services/permissions.clj index e4051ee11..649b61baf 100644 --- a/services/data-info/src/data_info/services/permissions.clj +++ b/services/data-info/src/data_info/services/permissions.clj @@ -4,6 +4,7 @@ [dire.core :refer [with-pre-hook! with-post-hook!]] [clj-jargon.permissions :as perm] [clojure-commons.file-utils :as ft] + [data-info.services.sharing :as sharing] [data-info.services.uuids :as uuids] [data-info.util.config :as cfg] [data-info.util.logging :as dul] @@ -16,16 +17,52 @@ #(not (contains? filtered-users (:user %1))) (perm/list-user-perm cm abspath)))) +(defn list-permissions* + [cm user path] + {:user-permissions (filtered-user-perms cm user path)}) + (defn list-permissions [{:keys [user]} data-id] (with-jargon (cfg/jargon-cfg) [cm] (let [path (ft/rm-last-slash (:path (uuids/path-for-uuid user data-id)))] (validators/user-exists cm user) (validators/path-readable cm user path) - {:user-permissions (filtered-user-perms cm user path)}))) + (list-permissions* cm user path)))) (with-pre-hook! #'list-permissions (fn [params data-id] (dul/log-call "list-permissions" params data-id))) (with-post-hook! #'list-permissions (dul/log-func "list-permissions")) + +(defn add-permission + [{:keys [user]} data-id share-with permission] + (with-jargon (cfg/jargon-cfg) [cm] + (let [path (ft/rm-last-slash (:path (uuids/path-for-uuid user data-id)))] + (validators/user-exists cm user) + (validators/user-owns-path cm user path) + (validators/user-exists cm share-with) + (sharing/share-path cm user share-with path permission) + (list-permissions* cm user path)))) + +(with-pre-hook! #'add-permission + (fn [params data-id share-with permission] + (dul/log-call "add-permission" params data-id share-with permission))) + +(with-post-hook! #'add-permission (dul/log-func "add-permission")) + +(defn remove-permission + [{:keys [user]} data-id unshare-with] + (with-jargon (cfg/jargon-cfg) [cm] + (let [path (ft/rm-last-slash (:path (uuids/path-for-uuid user data-id)))] + (validators/user-exists cm user) + (validators/user-owns-path cm user path) + (validators/user-exists cm unshare-with) + (sharing/unshare-path cm user unshare-with path) + (list-permissions* cm user path)))) + +(with-pre-hook! #'remove-permission + (fn [params data-id unshare-with] + (dul/log-call "remove-permission" params data-id unshare-with))) + +(with-post-hook! #'remove-permission (dul/log-func "remove-permission")) diff --git a/services/data-info/src/data_info/services/sharing.clj b/services/data-info/src/data_info/services/sharing.clj index 893e38723..d10673f69 100644 --- a/services/data-info/src/data_info/services/sharing.clj +++ b/services/data-info/src/data_info/services/sharing.clj @@ -33,7 +33,7 @@ [share-path] (string/join "/" (take 4 (string/split share-path #"\/")))) -(defn- share-path +(defn- share-path* "Shares a path with a user. This consists of the following steps: 1. The parent directories up to the sharer's home directory need to be marked as readable @@ -64,14 +64,18 @@ {:user share-with :path fpath})) +(defn share-path + [cm user share-with fpath perm] + (cond (= user share-with) (skip-share share-with fpath :share-with-self) + (paths/in-trash? user fpath) (skip-share share-with fpath :share-from-trash) + (shared? cm share-with fpath perm) (skip-share share-with fpath :already-shared) + :else (share-path* cm user share-with perm fpath))) + (defn- share-paths [cm user share-withs fpaths perm] (for [share-with share-withs fpath fpaths] - (cond (= user share-with) (skip-share share-with fpath :share-with-self) - (paths/in-trash? user fpath) (skip-share share-with fpath :share-from-trash) - (shared? cm share-with fpath perm) (skip-share share-with fpath :already-shared) - :else (share-path cm user share-with perm fpath)))) + (share-path cm user share-with fpath perm))) (defn- share [cm user share-withs fpaths perm] @@ -89,6 +93,52 @@ :skipped (map #(dissoc % :skipped) (:skipped share-recs)) :permission perm})) +(defn- remove-inherit-bit? + [cm user fpath] + (empty? (remove (comp (conj (set (cfg/irods-admins)) user) :user) + (list-user-perms cm fpath)))) + +(defn- unshare-dir + "Removes the inherit bit from a directory if the directory is no longer shared with any accounts + other than iRODS administrative accounts." + [cm user unshare-with fpath] + (when (remove-inherit-bit? cm user fpath) + (log/warn "Removing inherit bit on" fpath) + (remove-inherits cm fpath))) + +(defn- unshare-path* + "Removes permissions for a user to access a path. This consists of several steps: + + 1. Remove the access permissions for the user. This is done recursively in case the path + being unshared is a directory. + + 2. If the item being unshared is a directory, perform any directory-specific unsharing + steps that are required. + + 3. Remove the user's read permissions for parent directories in which the user no longer has + access to any other files or subdirectories." + [cm user unshare-with fpath] + (let [base-dirs #{(ft/rm-last-slash (paths/user-home-dir user)) (trash-base-dir (:zone cm) user)}] + (log/warn "Removing permissions on" fpath "from" unshare-with "by" user) + (remove-permissions cm unshare-with fpath) + + (when (is-dir? cm fpath) + (log/warn "Unsharing directory" fpath "from" unshare-with "by" user) + (unshare-dir cm user unshare-with fpath)) + + (log/warn "Removing read perms on parents of" fpath "from" unshare-with "by" user) + (process-parent-dirs + (partial set-readable cm unshare-with false) + #(and (not (base-dirs %)) (not (contains-accessible-obj? cm unshare-with %))) + fpath) + {:user unshare-with :path fpath})) + +(defn unshare-path + [cm user unshare-with fpath] + (cond (= user unshare-with) (skip-share unshare-with fpath :unshare-with-self) + (shared? cm unshare-with fpath) (unshare-path* cm user unshare-with fpath) + :else (skip-share unshare-with fpath :not-shared))) + (defn- anon-file-url [p] (let [aurl (url/url (cfg/anon-files-base))] From 4e0e122d36c0257c15de330175816f5ec7530752 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Fri, 11 Mar 2016 11:08:39 -0700 Subject: [PATCH 135/183] CORE-7558: fixed the error code blocks for the data object permission endpoints in data-info --- services/data-info/src/data_info/routes/permissions.clj | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/services/data-info/src/data_info/routes/permissions.clj b/services/data-info/src/data_info/routes/permissions.clj index accd8fbf7..7769d406c 100644 --- a/services/data-info/src/data_info/routes/permissions.clj +++ b/services/data-info/src/data_info/routes/permissions.clj @@ -47,7 +47,7 @@ :summary "Grant Data Item Permissions" :description (str "Grants access to a data item." -(get-error-code-block "ERR_DOES_NOT_EXIST, ERR_NOT_READABLE, ERR_NOT_A_USER", "ERR_NOT_OWNER")) +(get-error-code-block "ERR_DOES_NOT_EXIST, ERR_NOT_OWNER, ERR_NOT_A_USER")) (svc/trap uri perms/add-permission params data-id share-with permission)) (DELETE* "/:unshare-with" [:as {uri :uri}] @@ -57,5 +57,5 @@ :summary "Revoke Data Item Permissions" :description (str "Revokes access to a data item." -(get-error-code-block "ERR_DOES_NOT_EXIST, ERR_NOT_READABLE, ERR_NOT_A_USER", "ERR_NOT_OWNER")) +(get-error-code-block "ERR_DOES_NOT_EXIST, ERR_NOT_OWNER, ERR_NOT_A_USER")) (svc/trap uri perms/remove-permission params data-id unshare-with))))) From a87e151dfd0d628283e3300507cf52a421f88433 Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Fri, 11 Mar 2016 12:40:40 -0700 Subject: [PATCH 136/183] CORE-7266 refactor menu disable. --- .../de/apps/client/views/toolBar/AppsViewToolbarImpl.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/apps/client/views/toolBar/AppsViewToolbarImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/apps/client/views/toolBar/AppsViewToolbarImpl.java index 2b7b0be91..fcb895aad 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/apps/client/views/toolBar/AppsViewToolbarImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/apps/client/views/toolBar/AppsViewToolbarImpl.java @@ -65,6 +65,9 @@ public class AppsViewToolbarImpl extends Composite implements AppsToolbarView { interface AppsViewToolbarUiBinder extends UiBinder { } + + @UiField + TextButton share_menu; @UiField Menu sharingMenu; @UiField @@ -278,7 +281,7 @@ public void onAppSelectionChanged(final AppSelectionChangedEvent event) { shareWithCollaboratorsMiEnabled = containsSharableApps(currentSelection); } - sharingMenu.setEnabled(submitAppEnabled || shareWithCollaboratorsMiEnabled); + share_menu.setEnabled(submitAppEnabled || shareWithCollaboratorsMiEnabled); deleteApp.setEnabled(deleteAppEnabled); editApp.setEnabled(editAppEnabled); sharePublic.setEnabled(submitAppEnabled); From 0b6e120489f58ad7ad7676b723d388b788ada0ad Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Fri, 11 Mar 2016 12:41:15 -0700 Subject: [PATCH 137/183] CORE-7152 add service facade. --- .../sharing/AnalysisSharingPresenter.java | 10 ++++++ .../services/AnalysisServiceFacade.java | 10 ++++-- .../impl/AnalysisServiceFacadeImpl.java | 34 +++++++++++++++++-- 3 files changed, 50 insertions(+), 4 deletions(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/sharing/AnalysisSharingPresenter.java b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/sharing/AnalysisSharingPresenter.java index 6c4b6ac25..42b7c7f60 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/sharing/AnalysisSharingPresenter.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/sharing/AnalysisSharingPresenter.java @@ -20,6 +20,7 @@ import com.sencha.gxt.core.shared.FastMap; +import java.util.ArrayList; import java.util.List; public class AnalysisSharingPresenter implements SharingPresenter { @@ -59,6 +60,15 @@ private FastMap getSelectedResourcesAsMap(List selecte return resourcesMap; } + private List buildAppsList(List shareList) { + List anaIds = new ArrayList<>(); + for (Sharing s : shareList) { + anaIds.add(s.getId()); + } + + return anaIds; + } + @Override public void go(HasOneWidget container) { container.setWidget(view.asWidget()); diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/services/AnalysisServiceFacade.java b/ui/de-lib/src/main/java/org/iplantc/de/client/services/AnalysisServiceFacade.java index f8d550f16..16bc6f6db 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/client/services/AnalysisServiceFacade.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/services/AnalysisServiceFacade.java @@ -3,6 +3,11 @@ import org.iplantc.de.client.models.analysis.Analysis; import org.iplantc.de.client.models.analysis.AnalysisParameter; import org.iplantc.de.client.models.analysis.AnalysisStepsInfo; +import org.iplantc.de.client.models.analysis.sharing.AnalysisSharingRequest; +import org.iplantc.de.client.models.analysis.sharing.AnalysisUnsharingRequest; +import org.iplantc.de.client.models.apps.App; +import org.iplantc.de.client.models.apps.sharing.AppSharingRequestList; +import org.iplantc.de.client.models.apps.sharing.AppUnSharingRequestList; import com.google.gwt.user.client.rpc.AsyncCallback; @@ -58,9 +63,10 @@ public interface AnalysisServiceFacade { */ void getAnalysisSteps(Analysis analysis, AsyncCallback callback); - void shareAnalyses(List analysisList, AsyncCallback callback); + void shareAnalyses(AnalysisSharingRequest request, AsyncCallback callback); - void unshareAnalyses(List analysisList, AsyncCallback callback); + void unshareAnalyses(AnalysisUnsharingRequest request, AsyncCallback callback); + void getPermissions(List analyses, AsyncCallback callback); } \ No newline at end of file diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/AnalysisServiceFacadeImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/AnalysisServiceFacadeImpl.java index f30636798..342dc92ca 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/AnalysisServiceFacadeImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/AnalysisServiceFacadeImpl.java @@ -11,6 +11,8 @@ import org.iplantc.de.client.models.analysis.AnalysisParametersList; import org.iplantc.de.client.models.analysis.AnalysisStepsInfo; import org.iplantc.de.client.models.analysis.SimpleValue; +import org.iplantc.de.client.models.analysis.sharing.AnalysisSharingRequest; +import org.iplantc.de.client.models.analysis.sharing.AnalysisUnsharingRequest; import org.iplantc.de.client.models.apps.integration.ArgumentType; import org.iplantc.de.client.models.apps.integration.SelectionItem; import org.iplantc.de.client.services.AnalysisServiceFacade; @@ -18,12 +20,14 @@ import org.iplantc.de.client.services.converters.StringToVoidCallbackConverter; import org.iplantc.de.client.util.AppTemplateUtils; import org.iplantc.de.client.util.DiskResourceUtil; +import org.iplantc.de.shared.services.BaseServiceCallWrapper; import org.iplantc.de.shared.services.DiscEnvApiService; import org.iplantc.de.shared.services.ServiceCallWrapper; import com.google.common.base.Strings; import com.google.common.collect.Lists; import com.google.common.collect.Sets; +import com.google.gwt.core.client.GWT; import com.google.gwt.http.client.URL; import com.google.gwt.json.client.JSONArray; import com.google.gwt.json.client.JSONObject; @@ -32,6 +36,7 @@ import com.google.inject.Inject; import com.google.web.bindery.autobean.shared.AutoBean; import com.google.web.bindery.autobean.shared.AutoBeanCodex; +import com.google.web.bindery.autobean.shared.AutoBeanUtils; import com.google.web.bindery.autobean.shared.Splittable; import com.google.web.bindery.autobean.shared.impl.StringQuoter; @@ -291,13 +296,38 @@ public void getAnalysisSteps(Analysis analysis, AsyncCallback } @Override - public void shareAnalyses(List analysisList, AsyncCallback callback) { + public void shareAnalyses(AnalysisSharingRequest request, AsyncCallback callback) { + final String payload = AutoBeanCodex.encode(AutoBeanUtils.getAutoBean(request)).getPayload(); + GWT.log("analyis sharing request:" + payload); + String address = ANALYSES + "/" + "sharing"; + ServiceCallWrapper wrapper = new ServiceCallWrapper(POST, address, payload); + deServiceFacade.getServiceData(wrapper, callback); + } + @Override + public void unshareAnalyses(AnalysisUnsharingRequest request, AsyncCallback callback) { + final String payload = AutoBeanCodex.encode(AutoBeanUtils.getAutoBean(request)).getPayload(); + GWT.log("analysis un-sharing request:" + payload); + String address = ANALYSES + "/" + "unsharing"; + ServiceCallWrapper wrapper = new ServiceCallWrapper(POST, address, payload); + deServiceFacade.getServiceData(wrapper, callback); } @Override - public void unshareAnalyses(List analysisList, AsyncCallback callback) { + public void getPermissions(List analyses, AsyncCallback callback) { + Splittable anaObj = StringQuoter.createSplittable(); + Splittable idArr = StringQuoter.createIndexed(); + for(Analysis a : analyses) { + Splittable item = StringQuoter.create(a.getId()); + item.assign(idArr, idArr.size()); + } + + idArr.assign(anaObj, "apps"); + String address = ANALYSES + "/" + "permission-lister"; + ServiceCallWrapper wrapper = new ServiceCallWrapper(BaseServiceCallWrapper.Type.POST, address, anaObj.getPayload()); + deServiceFacade.getServiceData(wrapper, callback); } + } From 4cf34eda60316dac9113833dd0c9e0803055a717 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Fri, 11 Mar 2016 14:41:30 -0700 Subject: [PATCH 138/183] CORE-7558: modified the method used for base directory selection in data-info.services.sharing/unshare-path* --- services/data-info/src/data_info/services/sharing.clj | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/services/data-info/src/data_info/services/sharing.clj b/services/data-info/src/data_info/services/sharing.clj index d10673f69..108cb036f 100644 --- a/services/data-info/src/data_info/services/sharing.clj +++ b/services/data-info/src/data_info/services/sharing.clj @@ -118,7 +118,9 @@ 3. Remove the user's read permissions for parent directories in which the user no longer has access to any other files or subdirectories." [cm user unshare-with fpath] - (let [base-dirs #{(ft/rm-last-slash (paths/user-home-dir user)) (trash-base-dir (:zone cm) user)}] + (let [trash-base (trash-base-dir (:zone cm) user) + path-base (share-path-home fpath) + base-dirs #{path-base trash-base}] (log/warn "Removing permissions on" fpath "from" unshare-with "by" user) (remove-permissions cm unshare-with fpath) @@ -128,9 +130,9 @@ (log/warn "Removing read perms on parents of" fpath "from" unshare-with "by" user) (process-parent-dirs - (partial set-readable cm unshare-with false) - #(and (not (base-dirs %)) (not (contains-accessible-obj? cm unshare-with %))) - fpath) + (partial set-readable cm unshare-with false) + #(not (or (base-dirs %) (contains-accessible-obj? cm unshare-with %))) + fpath) {:user unshare-with :path fpath})) (defn unshare-path From ad68ecfae96ca606bb7e4ef5244520184bda975c Mon Sep 17 00:00:00 2001 From: Paul Sarando Date: Fri, 11 Mar 2016 16:21:45 -0700 Subject: [PATCH 139/183] CORE-7550 Return null folders in Perm-ID Request endpoints. Updated terrain Perm-ID Request endpoints to return null folder fields instead of error responses when the folder does not exist. Updated the `POST /admin/permanent-id-requests/{request-id}/ezid` terrain endpoint to return a not-found error that includes the original path when the folder no longer exists. Updated metadata Perm-ID Request listing endpoints to include the original_path field. --- .../persistence/permanent_id_requests.clj | 5 ++-- .../services/permanent_id_requests.clj | 25 +++++++++++++------ 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/services/metadata/src/metadata/persistence/permanent_id_requests.clj b/services/metadata/src/metadata/persistence/permanent_id_requests.clj index 748864cfb..ff0573426 100644 --- a/services/metadata/src/metadata/persistence/permanent_id_requests.clj +++ b/services/metadata/src/metadata/persistence/permanent_id_requests.clj @@ -26,6 +26,7 @@ :target_id :target_type :requested_by + :original_path [:status_codes.name :status] [:statuses.date_assigned :status_date] [:statuses.updated_by]) @@ -44,12 +45,12 @@ sort-field :sort-field sort-dir :sort-dir}] (subselect [(list-permanent-id-requests-subselect user) :reqs] - (fields :id :type :target_id :target_type :requested_by + (fields :id :type :target_id :target_type :requested_by :original_path [(sqlfn :first :status_date) :date_submitted] [(sqlfn :last :status) :status] [(sqlfn :last :status_date) :date_updated] [(sqlfn :last :updated_by) :updated_by]) - (group :id :type :target_id :target_type :requested_by) + (group :id :type :target_id :target_type :requested_by :original_path) (order (or sort-field :date_submitted) (or sort-dir :ASC)) (limit row-limit) (offset row-offset))) diff --git a/services/terrain/src/terrain/services/permanent_id_requests.clj b/services/terrain/src/terrain/services/permanent_id_requests.clj index 1a5db21aa..cd5e2409c 100644 --- a/services/terrain/src/terrain/services/permanent_id_requests.clj +++ b/services/terrain/src/terrain/services/permanent_id_requests.clj @@ -53,7 +53,11 @@ ezid-metadata) (defn- validate-request-for-completion - [{:keys [permanent_id]}] + [{:keys [folder original_path permanent_id]}] + (when (empty? folder) + (throw+ {:type :clojure-commons.exception/not-found + :error "Folder not found." + :path original_path})) (when-not (empty? permanent_id) (throw+ {:type :clojure-commons.exception/bad-request :error "This Request appears to be completed, since it already has a Permanent ID." @@ -194,11 +198,10 @@ "Could not send permanent_id_request (" request-id ") notification to" user ":" subject)))) (defn- send-update-notification - [{{:keys [username email]} :requested_by - :keys [id type folder history] - :or {folder {:path "unknown"}}}] + [{:keys [id type folder history] {:keys [username email]} :requested_by}] (let [{:keys [status comments]} (last history) - subject (str type " Request for " (ft/basename (:path folder)) " Status Changed to " status)] + folder-name (if folder (ft/basename (:path folder)) "unknown") + subject (str type " Request for " folder-name " Status Changed to " status)] (send-notification username email subject comments id))) (defn- request-type->shoulder @@ -263,11 +266,19 @@ :unit ""}]) (partial remove #(contains? remove-attrs (:attr %))))))) +(defn- format-folder-details + [user folder-id] + (try+ + (data-info/stat-by-uuid user folder-id) + (catch Object e + (log/warn e "Could not lookup folder details.") + nil))) + (defn- format-perm-id-req-response [user {:keys [target_id] :as response}] (-> response - (dissoc :target_id :target_type :original_path) - (assoc :folder (data-info/stat-by-uuid user (uuidify target_id))))) + (dissoc :target_id :target_type) + (assoc :folder (format-folder-details user (uuidify target_id))))) (defn- format-requested-by [user {:keys [requested_by target_id] :as permanent-id-request}] From a304e2ed5c3d056fdc7bac1e106bc8ba22ad60bf Mon Sep 17 00:00:00 2001 From: Paul Sarando Date: Fri, 11 Mar 2016 16:34:11 -0700 Subject: [PATCH 140/183] CORE-7550 Update Belphegor's Perm-ID Request display for null folders. Updated Belphegor to display a "Not Found" message for null folder fields in the Perm-ID Request listing grid and when the "View Metadata" action is selected. The Perm-ID Request "View Metadata" error message also includes the request's original path. --- .../PermanentIdRequestPresenter.java | 9 ++++- .../views/FolderPathProvider.java | 38 +++++++++++++++++++ .../views/PermanentIdRequestProperties.java | 5 --- .../views/PermanentIdRequestView.java | 4 ++ .../views/PermanentIdRequestViewImpl.java | 2 +- .../identifiers/PermanentIdRequest.java | 3 ++ .../PermIdRequestDisplayStrings.java | 2 + .../PermIdRequestDisplayStrings.properties | 1 + ...ntIdRequestPresenterDefaultAppearance.java | 13 ++++++- ...rmanentIdRequestViewDefaultAppearance.java | 4 ++ 10 files changed, 72 insertions(+), 9 deletions(-) create mode 100644 ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/FolderPathProvider.java diff --git a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/presenter/PermanentIdRequestPresenter.java b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/presenter/PermanentIdRequestPresenter.java index eb45f00b4..4e1685b13 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/presenter/PermanentIdRequestPresenter.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/presenter/PermanentIdRequestPresenter.java @@ -5,6 +5,7 @@ import org.iplantc.de.admin.desktop.client.permIdRequest.views.PermanentIdRequestView.PermanentIdRequestPresenterAppearance; import org.iplantc.de.admin.desktop.client.permIdRequest.views.PermanentIdRequestView.Presenter; import org.iplantc.de.admin.desktop.client.permIdRequest.views.UpdatePermanentIdRequestDialog; +import org.iplantc.de.client.models.diskResources.Folder; import org.iplantc.de.client.models.identifiers.PermanentIdRequest; import org.iplantc.de.client.models.identifiers.PermanentIdRequestAutoBeanFactory; import org.iplantc.de.client.models.identifiers.PermanentIdRequestDetails; @@ -60,7 +61,13 @@ public PermanentIdRequestPresenter(DiskResourceServiceFacade drsvc, @Override public void fetchMetadata() { - view.fetchMetadata(selectedRequest.getFolder(), appearance, drsvc); + final Folder selectedFolder = selectedRequest.getFolder(); + if (selectedFolder != null) { + view.fetchMetadata(selectedFolder, appearance, drsvc); + } else { + final String errMessage = appearance.folderNotFound(selectedRequest.getOriginalPath()); + IplantAnnouncer.getInstance().schedule(new ErrorAnnouncementConfig(errMessage)); + } } @Override diff --git a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/FolderPathProvider.java b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/FolderPathProvider.java new file mode 100644 index 000000000..48cf31510 --- /dev/null +++ b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/FolderPathProvider.java @@ -0,0 +1,38 @@ +package org.iplantc.de.admin.desktop.client.permIdRequest.views; + +import org.iplantc.de.client.models.identifiers.PermanentIdRequest; + +import com.sencha.gxt.core.client.ValueProvider; + +/** + * @author psarando + */ +public class FolderPathProvider implements ValueProvider { + + private final PermanentIdRequestView.PermanentIdRequestViewAppearance appearance; + + public FolderPathProvider(PermanentIdRequestView.PermanentIdRequestViewAppearance appearance) { + this.appearance = appearance; + } + + @Override + public String getValue(PermanentIdRequest request) { + if (request == null || request.getFolder() == null) { + return appearance.folderNotFound(); + } + + return request.getFolder().getPath(); + } + + @Override + public void setValue(PermanentIdRequest request, String value) { + if (request != null && request.getFolder() != null) { + request.getFolder().setPath(value); + } + } + + @Override + public String getPath() { + return "Folder.path"; + } +} diff --git a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestProperties.java b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestProperties.java index bb78ce5b6..a87cef9b0 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestProperties.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestProperties.java @@ -3,8 +3,6 @@ import org.iplantc.de.client.models.identifiers.PermanentIdRequest; import org.iplantc.de.client.models.identifiers.PermanentIdRequestType; -import com.google.gwt.editor.client.Editor.Path; - import com.sencha.gxt.core.client.ValueProvider; import com.sencha.gxt.data.shared.ModelKeyProvider; import com.sencha.gxt.data.shared.PropertyAccess; @@ -28,9 +26,6 @@ public interface PermanentIdRequestProperties extends PropertyAccess dateUpdated(); - @Path("Folder.path") - ValueProvider path(); - ValueProvider type(); ValueProvider status(); diff --git a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestView.java b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestView.java index 8a408cbce..c0f2c0199 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestView.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestView.java @@ -75,6 +75,8 @@ public interface PermanentIdRequestViewAppearance { String request(); String userEmail(); + + String folderNotFound(); } public interface Presenter { @@ -103,6 +105,8 @@ public interface PermanentIdRequestPresenterAppearance { String createPermIdFailure(); + String folderNotFound(String path); + String metadataSaveError(); String requestLoadFailure(); diff --git a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestViewImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestViewImpl.java index 9e637262b..acabbd464 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestViewImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestViewImpl.java @@ -154,7 +154,7 @@ ColumnModel createColumnModel() { ColumnConfig nameCol = new ColumnConfig<>(pr_props.requestedBy(), appearance.nameColumnWidth(), appearance.nameColumnLabel()); - ColumnConfig pathCol = new ColumnConfig<>(pr_props.path(), + ColumnConfig pathCol = new ColumnConfig<>(new FolderPathProvider(appearance), appearance.pathColumnWidth(), appearance.pathColumnLabel()); ColumnConfig dateSubCol = new ColumnConfig<>(pr_props.dateSubmitted(), diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/models/identifiers/PermanentIdRequest.java b/ui/de-lib/src/main/java/org/iplantc/de/client/models/identifiers/PermanentIdRequest.java index 103dbe280..d24d5932c 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/client/models/identifiers/PermanentIdRequest.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/models/identifiers/PermanentIdRequest.java @@ -17,6 +17,9 @@ public interface PermanentIdRequest extends HasId { Folder getFolder(); + @PropertyName("original_path") + String getOriginalPath(); + @PropertyName("requested_by") String getRequestedBy(); diff --git a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermIdRequestDisplayStrings.java b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermIdRequestDisplayStrings.java index f3050b207..edec47941 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermIdRequestDisplayStrings.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermIdRequestDisplayStrings.java @@ -54,4 +54,6 @@ public interface PermIdRequestDisplayStrings extends Messages { String statusUpdateSuccess(); String userEmail(); + + String folderNotFound(); } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermIdRequestDisplayStrings.properties b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermIdRequestDisplayStrings.properties index 65176f041..c0e9b9e8f 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermIdRequestDisplayStrings.properties +++ b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermIdRequestDisplayStrings.properties @@ -24,3 +24,4 @@ requestLoadFailure = Unable to load permanentId requests! statusUpdateFailure = Unable to update the status of this request! statusUpdateSuccess = Request updated! userEmail = Email +folderNotFound = Folder Not Found diff --git a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermanentIdRequestPresenterDefaultAppearance.java b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermanentIdRequestPresenterDefaultAppearance.java index 63329d0a4..cd0de700d 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermanentIdRequestPresenterDefaultAppearance.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermanentIdRequestPresenterDefaultAppearance.java @@ -1,6 +1,7 @@ package org.iplantc.de.theme.base.client.admin.permIdRequest; import org.iplantc.de.admin.desktop.client.permIdRequest.views.PermanentIdRequestView.PermanentIdRequestPresenterAppearance; +import org.iplantc.de.resources.client.messages.IplantErrorStrings; import com.google.gwt.core.client.GWT; @@ -8,13 +9,16 @@ public class PermanentIdRequestPresenterDefaultAppearance implements PermanentIdRequestPresenterAppearance { private final PermIdRequestDisplayStrings displayStrings; + private final IplantErrorStrings errorStrings; public PermanentIdRequestPresenterDefaultAppearance() { - this(GWT. create(PermIdRequestDisplayStrings.class)); + this(GWT.create(PermIdRequestDisplayStrings.class), + GWT.create(IplantErrorStrings.class)); } - public PermanentIdRequestPresenterDefaultAppearance(PermIdRequestDisplayStrings displayStrings) { + public PermanentIdRequestPresenterDefaultAppearance(PermIdRequestDisplayStrings displayStrings, IplantErrorStrings errorStrings) { this.displayStrings = displayStrings; + this.errorStrings = errorStrings; } @Override @@ -27,6 +31,11 @@ public String createPermIdFailure() { return displayStrings.createPermIdFailure(); } + @Override + public String folderNotFound(String path) { + return errorStrings.folderNotFound(path); + } + @Override public String metadataSaveError() { return displayStrings.metadataSaveError(); diff --git a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermanentIdRequestViewDefaultAppearance.java b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermanentIdRequestViewDefaultAppearance.java index 71888ff97..11804f037 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermanentIdRequestViewDefaultAppearance.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/theme/base/client/admin/permIdRequest/PermanentIdRequestViewDefaultAppearance.java @@ -133,4 +133,8 @@ public String userEmail() { return displayStrings.userEmail(); } + @Override + public String folderNotFound() { + return displayStrings.folderNotFound(); + } } From 86c45d2f7c663db851aa3aafaac95fc00b5daba4 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Fri, 11 Mar 2016 16:49:09 -0700 Subject: [PATCH 141/183] CORE-7558: implemented the POST /analyses/sharing endpoint in apps --- services/apps/src/apps/clients/data_info.clj | 49 ++++++-- .../apps/src/apps/clients/iplant_groups.clj | 69 ++++++++--- .../apps/src/apps/clients/notifications.clj | 7 ++ .../clients/notifications/app_sharing.clj | 48 +------- .../clients/notifications/common_sharing.clj | 51 ++++++++ .../clients/notifications/job_sharing.clj | 54 +++++++++ services/apps/src/apps/protocols.clj | 1 + services/apps/src/apps/routes/analyses.clj | 21 +++- .../src/apps/routes/domain/permission.clj | 31 ++++- services/apps/src/apps/service/apps.clj | 8 ++ services/apps/src/apps/service/apps/agave.clj | 5 + .../apps/src/apps/service/apps/combined.clj | 3 + services/apps/src/apps/service/apps/de.clj | 4 + .../src/apps/service/apps/de/permissions.clj | 5 + services/apps/src/apps/service/apps/jobs.clj | 55 ++------- .../apps/service/apps/jobs/permissions.clj | 45 ++++++++ .../src/apps/service/apps/jobs/sharing.clj | 109 ++++++++++++++++++ .../apps/src/apps/service/apps/jobs/util.clj | 9 ++ 18 files changed, 459 insertions(+), 115 deletions(-) create mode 100644 services/apps/src/apps/clients/notifications/common_sharing.clj create mode 100644 services/apps/src/apps/clients/notifications/job_sharing.clj create mode 100644 services/apps/src/apps/service/apps/jobs/permissions.clj create mode 100644 services/apps/src/apps/service/apps/jobs/sharing.clj create mode 100644 services/apps/src/apps/service/apps/jobs/util.clj diff --git a/services/apps/src/apps/clients/data_info.clj b/services/apps/src/apps/clients/data_info.clj index 92b0c5051..4c9688ab1 100644 --- a/services/apps/src/apps/clients/data_info.clj +++ b/services/apps/src/apps/clients/data_info.clj @@ -1,4 +1,5 @@ (ns apps.clients.data-info + (:use [medley.core :only [map-kv]]) (:require [cemerick.url :as curl] [cheshire.core :as cheshire] [clj-http.client :as http] @@ -19,22 +20,32 @@ (defn get-file-stats [user paths] (when (seq paths) - ((comp service/parse-json :body) + (:body (http/post (data-info-url "stat-gatherer") {:query-params (secured-params user) :body (cheshire/encode {:paths paths}) :content-type :json - :as :stream})))) + :as :json})))) + +(defn get-data-ids + [user paths] + (->> (get-file-stats user paths) + :paths + (map-kv (fn [k v] [k (:id v)])))) + +(defn get-data-id + [user path] + ((keyword path) (get-data-ids user [path]))) (defn get-paths-exist [user paths] (when (seq paths) - ((comp service/parse-json :body) - (http/post (data-info-url "existence-marker") - {:query-params (secured-params user) - :body (cheshire/encode {:paths paths}) - :content-type :json - :as :stream})))) + (:body + (http/post (data-info-url "existence-marker") + {:query-params (secured-params user) + :body (cheshire/encode {:paths paths}) + :content-type :json + :as :json})))) (defn get-file-contents [user path] @@ -57,3 +68,25 @@ :body (cheshire/encode {:paths [path]}) :content-type :json :as :stream})) + +(defn share-data-item + [user data-id share-with permission] + (:body + (http/put (data-info-url "data" data-id "permissions" share-with permission) + {:query-params (secured-params user) + :as :json}))) + +(defn share-path + [user path share-with permission] + (share-data-item user (get-data-id user path) share-with permission)) + +(defn unshare-data-item + [user data-id unshare-with] + (:body + (http/delete (data-info-url "data" data-id "permissions" unshare-with) + {:query-params (secured-params user) + :as :json}))) + +(defn unshare-path + [user path unshare-with] + (unshare-data-item user (get-data-id user path) unshare-with)) diff --git a/services/apps/src/apps/clients/iplant_groups.clj b/services/apps/src/apps/clients/iplant_groups.clj index ef20ddfca..ecb3a6ced 100644 --- a/services/apps/src/apps/clients/iplant_groups.clj +++ b/services/apps/src/apps/clients/iplant_groups.clj @@ -138,6 +138,13 @@ [app-ids] (group-permissions (retrieve-app-permissions nil app-ids))) +(defn load-analysis-permissions + "Loads analysis permissions for a user from Grouper." + ([user] + (load-analysis-permissions user nil)) + ([user analysis-ids] + (group-permissions (retrieve-analysis-permissions user analysis-ids)))) + (defn list-analysis-permissions "Loads an analysis permission listing from Grouper." [analysis-ids] @@ -214,27 +221,33 @@ :content-type :json :as :json}))) +(defn- share-resource + "Shares a resource with a user." + [resource-name role-name subject-id level] + (http/put (grouper-url "attributes" resource-name "permissions" "memberships" role-name subject-id level) + {:query-params {:user grouper-user} + :form-params {:allowed true} + :content-type :json + :as :json}) + nil) + +(defn- unshare-resource + "Unshares a resource with a user." + [resource-name role-name subject-id] + (http/delete (grouper-url "attributes" resource-name "permissions" "memberships" role-name subject-id) + {:query-params {:user grouper-user} + :as :json}) + nil) + (defn- share-app* "Shares an app with a user." [app-id subject-id level] - (let [resource-name (grouper-app-resource-name app-id) - role-name (grouper-user-group)] - (http/put (grouper-url "attributes" resource-name "permissions" "memberships" role-name subject-id level) - {:query-params {:user grouper-user} - :form-params {:allowed true} - :content-type :json - :as :json})) - nil) + (share-resource (grouper-app-resource-name app-id) (grouper-user-group) subject-id level)) (defn- unshare-app* "Unshares an app with a user." [app-id subject-id] - (let [resource-name (grouper-app-resource-name app-id) - role-name (grouper-user-group)] - (http/delete (grouper-url "attributes" resource-name "permissions" "memberships" role-name subject-id) - {:query-params {:user grouper-user} - :as :json})) - nil) + (unshare-resource (grouper-app-resource-name app-id) (grouper-user-group) subject-id)) (defn- get-error-reason "Attempts to extract the reason for an error from an iplant-groups response body." @@ -270,3 +283,31 @@ (let [analysis-resource-name (grouper-analysis-resource-name analysis-id)] (create-resource analysis-resource-name (grouper-analysis-permission-def)) (grant-role-user-permission user (grouper-user-group) analysis-resource-name "own"))) + +(defn- share-analysis* + [analysis-id subject-id level] + (share-resource (grouper-analysis-resource-name analysis-id) (grouper-user-group) subject-id level)) + +(defn- unshare-analysis* + [analysis-id subject-id] + (unshare-resource (grouper-analysis-resource-name analysis-id) (grouper-user-group) subject-id)) + +(defn share-analysis + "Shares an analysis with a user." + [analysis-id subject-id level] + (try+ + (share-analysis* analysis-id subject-id level) + (catch clj-http-error? {:keys [status body]} + (let [reason (get-error-reason body status)] + (log/error (str "uanble to share " analysis-id " with " subject-id ": " reason))) + "the analysis sharing request failed"))) + +(defn unshare-analysis + "Unshares an analysis with a user." + [analysis-id subject-id] + (try+ + (unshare-analysis* analysis-id subject-id) + (catch clj-http-error? {:keys [status body]} + (let [reason (get-error-reason body status)] + (log/error (str "uanble to unshare " analysis-id " with " subject-id ": " reason))) + "the analysis unsharing request failed"))) diff --git a/services/apps/src/apps/clients/notifications.clj b/services/apps/src/apps/clients/notifications.clj index 1f2835709..40251c5e4 100644 --- a/services/apps/src/apps/clients/notifications.clj +++ b/services/apps/src/apps/clients/notifications.clj @@ -7,6 +7,7 @@ [clojure.string :as string] [clojure.tools.logging :as log] [apps.clients.notifications.app-sharing :as asn] + [apps.clients.notifications.job-sharing :as jsn] [apps.persistence.tool-requests :as tp] [apps.util.config :as config])) @@ -126,3 +127,9 @@ (->> (asn/format-unsharing-notifications sharer sharee responses) (map guarded-send-notification) dorun)) + +(defn send-analysis-sharing-notifications + [sharer sharee responses] + (->> (jsn/format-sharing-notifications sharer sharee responses) + (map guarded-send-notification) + dorun)) diff --git a/services/apps/src/apps/clients/notifications/app_sharing.clj b/services/apps/src/apps/clients/notifications/app_sharing.clj index b9a72ed99..397f529a4 100644 --- a/services/apps/src/apps/clients/notifications/app_sharing.clj +++ b/services/apps/src/apps/clients/notifications/app_sharing.clj @@ -1,49 +1,11 @@ (ns apps.clients.notifications.app-sharing - (:use [clostache.parser :only [render]] + (:use [apps.clients.notifications.common-sharing] [medley.core :only [remove-vals]]) (:require [clojure.string :as string])) -(def grouping-threshold 10) - (def notification-type "apps") -(def share-action "share") -(def unshare-action "unshare") - -(def sharer-success-formats - {:grouped {:singular "{{count}} app has been {{action}}d with {{sharee}}." - :plural "{{count}} apps have been {{action}}d with {{sharee}}."} - :ungrouped {:singular "The following app has been {{action}}d with {{sharee}}: {{apps}}" - :plural "The following apps have been {{action}}d with {{sharee}}: {{apps}}"}}) - -(def sharee-success-formats - {:grouped {:singular "{{sharer}} has {{action}}d {{count}} app with you." - :plural "{{sharer}} has {{action}}d {{count}} apps with you."} - :ungrouped {:singular "{{sharer}} has {{action}}d the following app with you: {{apps}}" - :plural "{{sharer}} has {{action}}d the following apps with you: {{apps}}"}}) - -(def failure-formats - {:grouped {:singular "{{count}} app could not be {{action}}d with {{sharee}}." - :plural "{{count}} apps could not be {{action}}d with {{sharee}}."} - :ungrouped {:singular "The following app could not be {{action}}d with {{sharee}}: {{apps}}" - :plural "The following apps could not be {{action}}d with {{sharee}}: {{apps}}"}}) - -(defn- format-numbered-string - [formats action sharer sharee response-desc response-count] - (let [fmt (formats (if (= response-count 1) :singular :plural))] - (render fmt {:action action - :sharer sharer - :sharee sharee - :apps response-desc - :count response-count}))) - -(defn- format-subject - [formats action sharer sharee response-desc response-count] - (format-numbered-string (:grouped formats) action sharer sharee response-desc response-count)) - -(defn- format-message - [formats action sharer sharee response-desc response-count] - (let [formats (formats (if (< response-count grouping-threshold) :ungrouped :grouped))] - (format-numbered-string formats action sharer sharee response-desc response-count))) +(def singular "app") +(def plural "apps") (defn- format-app [category-keyword response] @@ -62,8 +24,8 @@ response-count (count responses)] {:type notification-type :user recipient - :subject (format-subject formats action sharer sharee response-desc response-count) - :message (format-message formats action sharer sharee response-desc response-count) + :subject (format-subject formats singular plural action sharer sharee response-desc response-count) + :message (format-message formats singular plural action sharer sharee response-desc response-count) :payload (format-payload category-keyword action responses)}))) (defn- format-sharer-notification diff --git a/services/apps/src/apps/clients/notifications/common_sharing.clj b/services/apps/src/apps/clients/notifications/common_sharing.clj new file mode 100644 index 000000000..9486c7ffb --- /dev/null +++ b/services/apps/src/apps/clients/notifications/common_sharing.clj @@ -0,0 +1,51 @@ +(ns apps.clients.notifications.common-sharing + (:use [clostache.parser :only [render]])) + +(def grouping-threshold 10) + +(def share-action "share") +(def unshare-action "unshare") + +(def sharer-success-formats + {:grouped + {:singular "{{count}} {{singular}} has been {{action}}d with {{sharee}}." + :plural "{{count}} {{plural}} have been {{action}}d with {{sharee}}."} + :ungrouped + {:singular "The following {{singular}} has been {{action}}d with {{sharee}}: {{items}}" + :plural "The following {{plural}} have been {{action}}d with {{sharee}}: {{items}}"}}) + +(def sharee-success-formats + {:grouped + {:singular "{{sharer}} has {{action}}d {{count}} {{singular}} with you." + :plural "{{sharer}} has {{action}}d {{count}} {{plural}} with you."} + :ungrouped + {:singular "{{sharer}} has {{action}}d the following {{singular}} with you: {{items}}" + :plural "{{sharer}} has {{action}}d the following {{plural}} with you: {{items}}"}}) + +(def failure-formats + {:grouped + {:singular "{{count}} {{singular}} could not be {{action}}d with {{sharee}}." + :plural "{{count}} {{plural}} could not be {{action}}d with {{sharee}}."} + :ungrouped + {:singular "The following {{singular}} could not be {{action}}d with {{sharee}}: {{items}}" + :plural "The following {{plural}} could not be {{action}}d with {{sharee}}: {{items}}"}}) + +(defn- format-numbered-string + [formats singular plural action sharer sharee response-desc response-count] + (let [fmt (formats (if (= response-count 1) :singular :plural))] + (render fmt {:singular singular + :plural plural + :action action + :sharer sharer + :sharee sharee + :items response-desc + :count response-count}))) + +(defn format-subject + [formats singular plural action sharer sharee response-desc response-count] + (format-numbered-string (:grouped formats) singular plural action sharer sharee response-desc response-count)) + +(defn format-message + [formats singular plural action sharer sharee response-desc response-count] + (let [formats (formats (if (< response-count grouping-threshold) :ungrouped :grouped))] + (format-numbered-string formats singular plural action sharer sharee response-desc response-count))) diff --git a/services/apps/src/apps/clients/notifications/job_sharing.clj b/services/apps/src/apps/clients/notifications/job_sharing.clj new file mode 100644 index 000000000..169a22478 --- /dev/null +++ b/services/apps/src/apps/clients/notifications/job_sharing.clj @@ -0,0 +1,54 @@ +(ns apps.clients.notifications.job-sharing + (:use [apps.clients.notifications.common-sharing] + [medley.core :only [remove-vals]]) + (:require [clojure.string :as string])) + +(def notification-type "analyses") +(def singular "analysis") +(def plural "analyses") + +(defn- format-analysis + [category-keyword response] + (remove-vals nil? (assoc (select-keys response [:analysis_id :analysis_name]) + :category_id (str (category-keyword response))))) + +(defn- format-payload + [category-keyword action responses] + {:action action + :analyses (map (partial format-analysis category-keyword) responses)}) + +(defn- format-notification + [category-keyword recipient formats action sharer sharee responses] + (when (seq responses) + (let [response-desc (string/join ", " (map :analysis_name responses)) + response-count (count responses)] + {:type notification-type + :user recipient + :subject (format-subject formats singular plural action sharer sharee response-desc response-count) + :message (format-message formats singular plural action sharer sharee response-desc response-count) + :payload (format-payload category-keyword action responses)}))) + +(defn- format-sharer-notification + [formats action sharer sharee responses] + (format-notification :sharer_category sharer formats action sharer sharee responses)) + +(defn- format-sharee-notification + [formats action sharer sharee responses] + (format-notification :sharee_category sharee formats action sharer sharee responses)) + +(defn format-sharing-notifications + "Formats sharing notifications for analyses." + [sharer sharee responses] + (let [responses (group-by :success responses)] + (remove nil? + [(format-sharer-notification sharer-success-formats share-action sharer sharee (responses true)) + (format-sharee-notification sharee-success-formats share-action sharer sharee (responses true)) + (format-sharer-notification failure-formats share-action sharer sharee (responses false))]))) + +(defn format-unsharing-notifications + "Formats unsharing notifications for analyses." + [sharer sharee responses] + (let [responses (group-by :success responses)] + (remove nil? + [(format-sharer-notification sharer-success-formats unshare-action sharer sharee (responses true)) + (format-sharer-notification failure-formats unshare-action sharer sharee (responses false))]))) diff --git a/services/apps/src/apps/protocols.clj b/services/apps/src/apps/protocols.clj index 95e36e63c..6b44cd826 100644 --- a/services/apps/src/apps/protocols.clj +++ b/services/apps/src/apps/protocols.clj @@ -68,4 +68,5 @@ (unshareApps [_ unsharing-requests]) (unshareAppsWithUser [_ app-names sharee app-ids]) (unshareAppWithUser [_ app-names sharee app-id]) + (hasAppPermission [_ username app-id required-level]) (supportsJobSharing [_ _])) diff --git a/services/apps/src/apps/routes/analyses.clj b/services/apps/src/apps/routes/analyses.clj index e352f07cd..f14c7e12a 100644 --- a/services/apps/src/apps/routes/analyses.clj +++ b/services/apps/src/apps/routes/analyses.clj @@ -45,11 +45,26 @@ :return perms/AnalysisPermissionListing :summary "List App Permissions" :description "This endpoint allows the caller to list the permissions for one or more analyses. - The authenticated user must have ownership permission on every analysis in the request body for - this endpoint to succeed." + The authenticated user must have read permission on every analysis in the request body for this + endpoint to succeed." (ok (apps/list-job-permissions current-user (:analyses body)))) - (PATCH* "/:analysis-id" [] +(POST* "/sharing" [] + :query [params SecuredQueryParams] + :body [body (describe perms/AnalysisSharingRequest "The app sharing request.")] + :return perms/AnalysisSharingResponse + :summary "Add App Permissions" + :description "This endpoint allows the caller to share multiple analyses with multiple users. The + authenticated user must have ownership permission to every analysis in the request body for this + endpoint to fully succeed. Note: this is a potentially slow operation and the response is returned + synchronously. The DE UI handles this by allowing the user to continue working while the request is + being processed. When calling this endpoint, please be sure that the response timeout is long + enough. Using a response timeout that is too short will result in an exception on the client side. + On the server side, the result of the sharing operation when a connection is lost is undefined. It + may be worthwhile to repeat failed or timed out calls to this endpoint." + (ok (apps/share-jobs current-user (:sharing body)))) + +(PATCH* "/:analysis-id" [] :path-params [analysis-id :- AnalysisIdPathParam] :query [params SecuredQueryParams] :body [body AnalysisUpdate] diff --git a/services/apps/src/apps/routes/domain/permission.clj b/services/apps/src/apps/routes/domain/permission.clj index 3c5889953..bb1584156 100644 --- a/services/apps/src/apps/routes/domain/permission.clj +++ b/services/apps/src/apps/routes/domain/permission.clj @@ -3,14 +3,15 @@ [schema.core :only [defschema optional-key enum]]) (:import [java.util UUID])) -(def PermissionEnum (enum "read" "write" "own" "")) +(def AppPermissionEnum (enum "read" "write" "own" "")) +(def AnalysisPermissionEnum (enum "read" "own" "")) (defschema AppIdList {:apps (describe [NonBlankString] "A List of app IDs")}) (defschema UserPermissionListElement {:user (describe NonBlankString "The user ID") - :permission (describe PermissionEnum "The permission level assigned to the user")}) + :permission (describe AppPermissionEnum "The permission level assigned to the user")}) (defschema AppPermissionListElement {:id (describe NonBlankString "The app ID") @@ -22,7 +23,7 @@ (defschema AppSharingRequestElement {:app_id (describe NonBlankString "The app ID") - :permission (describe PermissionEnum "The requested permission level")}) + :permission (describe AppPermissionEnum "The requested permission level")}) (defschema AppSharingResponseElement (assoc AppSharingRequestElement @@ -74,3 +75,27 @@ (defschema AnalysisPermissionListing {:analyses (describe [AnalysisPermissionListElement] "The list of analysis permissions")}) + +(defschema AnalysisSharingRequestElement + {:analysis_id (describe UUID "The analysis ID") + :permission (describe AnalysisPermissionEnum "The requested permission level")}) + +(defschema AnalysisSharingResponseElement + (assoc AnalysisSharingRequestElement + :analysis_name (describe NonBlankString "The analysis name") + :success (describe Boolean "A Boolean flag indicating whether the sharing request succeeded") + (optional-key :error) (describe ErrorResponse "Information about any error that may have occurred"))) + +(defschema UserAnalysisSharingRequestElement + {:user (describe NonBlankString "The user ID") + :analyses (describe [AnalysisSharingRequestElement] "The list of sharing requests for individual analyses")}) + +(defschema UserAnalysisSharingResponseElement + (assoc UserAnalysisSharingRequestElement + :analyses (describe [AnalysisSharingResponseElement] "The list of analysis sharing responses for the user"))) + +(defschema AnalysisSharingRequest + {:sharing (describe [UserAnalysisSharingRequestElement] "The list of sharing requests for individual users.")}) + +(defschema AnalysisSharingResponse + {:sharing (describe [UserAnalysisSharingResponseElement] "The list of sharing responses for individual users.")}) diff --git a/services/apps/src/apps/service/apps.clj b/services/apps/src/apps/service/apps.clj index ebc973166..ccec4fafe 100644 --- a/services/apps/src/apps/service/apps.clj +++ b/services/apps/src/apps/service/apps.clj @@ -360,3 +360,11 @@ (defn list-job-permissions [user job-ids] (jobs/list-job-permissions (get-apps-client user) user job-ids)) + +(defn share-jobs + [user sharing-requests] + {:sharing (jobs/share-jobs (get-apps-client user) user sharing-requests)}) + +(defn can-access-app + [user username app-id required-level] + (.hasAppPermission (get-apps-client user) username app-id required-level)) diff --git a/services/apps/src/apps/service/apps/agave.clj b/services/apps/src/apps/service/apps/agave.clj index 22f0e3c6f..9ca32d638 100644 --- a/services/apps/src/apps/service/apps/agave.clj +++ b/services/apps/src/apps/service/apps/agave.clj @@ -171,5 +171,10 @@ (let [category (.hpcAppGroup agave)] (app-permissions/app-unsharing-failure app-names app-id category app-permission-rejection)))) + (hasAppPermission [_ username app-id required-level] + (when (and (user-has-access-token?) + (not (util/uuid? app-id))) + false)) + (supportsJobSharing [_ _] false)) diff --git a/services/apps/src/apps/service/apps/combined.clj b/services/apps/src/apps/service/apps/combined.clj index a1cf0b9f4..6532b9ad3 100644 --- a/services/apps/src/apps/service/apps/combined.clj +++ b/services/apps/src/apps/service/apps/combined.clj @@ -233,5 +233,8 @@ (or (first (remove nil? (map #(.unshareAppWithUser % app-names sharee app-id) clients))) (app-permissions/app-unsharing-failure app-names app-id nil (str "app ID " app-id " does not exist")))) + (hasAppPermission [_ username app-id required-level] + (first (remove nil? (map #(.hasAppPermission % username app-id required-level) clients)))) + (supportsJobSharing [_ job-step] (.supportsJobSharing (util/apps-client-for-job-step clients job-step) job-step))) diff --git a/services/apps/src/apps/service/apps/de.clj b/services/apps/src/apps/service/apps/de.clj index bdc6ca692..38156e3d5 100644 --- a/services/apps/src/apps/service/apps/de.clj +++ b/services/apps/src/apps/service/apps/de.clj @@ -256,5 +256,9 @@ (partial app-permissions/app-unsharing-success app-names app-id) (partial app-permissions/app-unsharing-failure app-names app-id)))) + (hasAppPermission [_ username app-id required-level] + (when (util/uuid? app-id) + (perms/has-app-permission username (uuidify app-id) required-level))) + (supportsJobSharing [_ _] true)) diff --git a/services/apps/src/apps/service/apps/de/permissions.clj b/services/apps/src/apps/service/apps/de/permissions.clj index 806ce2058..ada0fbaac 100644 --- a/services/apps/src/apps/service/apps/de/permissions.clj +++ b/services/apps/src/apps/service/apps/de/permissions.clj @@ -35,3 +35,8 @@ (let [app-perms (iplant-groups/list-app-permissions app-ids) app-names (amp/get-app-names app-ids)] (map (partial format-app-permissions user app-perms app-names) app-ids))) + +(defn has-app-permission + [user app-id required-level] + (-> (iplant-groups/load-app-permissions user [app-id]) + (iplant-groups/has-permission-level required-level app-id))) diff --git a/services/apps/src/apps/service/apps/jobs.clj b/services/apps/src/apps/service/apps/jobs.clj index 64b883f32..7abe9cfc4 100644 --- a/services/apps/src/apps/service/apps/jobs.clj +++ b/services/apps/src/apps/service/apps/jobs.clj @@ -3,6 +3,7 @@ [slingshot.slingshot :only [try+]]) (:require [clojure.tools.logging :as log] [clojure.string :as string] + [clojure-commons.error-codes :as ce] [clojure-commons.exception-util :as cxu] [kameleon.db :as db] [apps.clients.iplant-groups :as iplant-groups] @@ -10,7 +11,10 @@ [apps.persistence.jobs :as jp] [apps.service.apps.job-listings :as listings] [apps.service.apps.jobs.params :as job-params] + [apps.service.apps.jobs.permissions :as job-permissions] + [apps.service.apps.jobs.sharing :as job-sharing] [apps.service.apps.jobs.submissions :as submissions] + [apps.service.apps.jobs.util :as ju] [apps.util.service :as service])) (defn supports-job-type @@ -109,12 +113,6 @@ (sync-incomplete-job-status apps-client job step) (sync-complete-job-status job))) -(defn- validate-job-existence - [job-ids] - (let [missing-ids (jp/list-non-existent-job-ids (set job-ids))] - (when-not (empty? missing-ids) - (service/not-found "jobs" job-ids)))) - (defn validate-job-ownership [username job-ids] (let [unowned-ids (map :id (jp/list-unowned-jobs username job-ids))] @@ -123,7 +121,7 @@ (defn- validate-jobs-for-user [username job-ids] - (validate-job-existence job-ids) + (ju/validate-job-existence job-ids) (validate-job-ownership username job-ids)) (defn update-job @@ -189,41 +187,10 @@ (iplant-groups/register-analysis (:shortUsername user) (:id job-info)) job-info))) -(defn- validate-job-permission-level - [short-username perms required-level job-ids] - (doseq [job-id job-ids] - (let [user-perms (filter (comp (partial = short-username) :id :subject) (perms job-id))] - (when (iplant-groups/lacks-permission-level {job-id user-perms} required-level job-id) - (cxu/forbidden (str "insufficient privileges for analysis " job-id)))))) - -(defn- validate-job-sharing-support - [apps-client job-ids] - (doseq [job-id job-ids - job-step (jp/list-job-steps job-id)] - (when-not (.supportsJobSharing apps-client job-step) - (cxu/bad-request (str "analysis sharing not supported for " job-id))))) - -(defn- validate-jobs-for-permissions - [apps-client {short-username :shortUsername} perms required-level job-ids] - (validate-job-existence job-ids) - (validate-job-permission-level short-username perms required-level job-ids) - (validate-job-sharing-support apps-client job-ids)) - -(defn- format-job-permission - [short-username perms {:keys [id job-name]}] - {:id id - :name job-name - :permissions (mapv iplant-groups/format-permission - (remove (comp (partial = short-username) key) - (group-by (comp :id :subject) (perms id))))}) - -(defn- format-job-permission-listing - [{short-username :shortUsername} perms jobs] - {:analyses (mapv (partial format-job-permission short-username perms) jobs)}) - (defn list-job-permissions - [apps-client {:keys [username] :as user} job-ids] - (let [perms (iplant-groups/list-analysis-permissions job-ids)] - (transaction - (validate-jobs-for-permissions apps-client user perms "read" job-ids) - (format-job-permission-listing user perms (jp/list-jobs-by-id job-ids))))) + [apps-client user job-ids] + (job-permissions/list-job-permissions apps-client user job-ids)) + +(defn share-jobs + [apps-client user sharing-requests] + (job-sharing/share-jobs apps-client user sharing-requests)) diff --git a/services/apps/src/apps/service/apps/jobs/permissions.clj b/services/apps/src/apps/service/apps/jobs/permissions.clj new file mode 100644 index 000000000..14268e70e --- /dev/null +++ b/services/apps/src/apps/service/apps/jobs/permissions.clj @@ -0,0 +1,45 @@ +(ns apps.service.apps.jobs.permissions + (:use [korma.db :only [transaction]]) + (:require [apps.clients.iplant-groups :as iplant-groups] + [apps.persistence.jobs :as jp] + [apps.service.apps.jobs.util :as ju] + [clojure-commons.exception-util :as cxu])) + +(defn- validate-job-permission-level + [short-username perms required-level job-ids] + (doseq [job-id job-ids] + (let [user-perms (filter (comp (partial = short-username) :id :subject) (perms job-id))] + (when (iplant-groups/lacks-permission-level {job-id user-perms} required-level job-id) + (cxu/forbidden (str "insufficient privileges for analysis " job-id)))))) + +(defn- validate-job-sharing-support + [apps-client job-ids] + (doseq [job-id job-ids + job-step (jp/list-job-steps job-id)] + (when-not (.supportsJobSharing apps-client job-step) + (cxu/bad-request (str "analysis sharing not supported for " job-id))))) + +(defn- validate-jobs-for-permissions + [apps-client {short-username :shortUsername} perms required-level job-ids] + (ju/validate-job-existence job-ids) + (validate-job-permission-level short-username perms required-level job-ids) + (validate-job-sharing-support apps-client job-ids)) + +(defn- format-job-permission + [short-username perms {:keys [id job-name]}] + {:id id + :name job-name + :permissions (mapv iplant-groups/format-permission + (remove (comp (partial = short-username) key) + (group-by (comp :id :subject) (perms id))))}) + +(defn- format-job-permission-listing + [{short-username :shortUsername} perms jobs] + {:analyses (mapv (partial format-job-permission short-username perms) jobs)}) + +(defn list-job-permissions + [apps-client {:keys [username] :as user} job-ids] + (let [perms (iplant-groups/list-analysis-permissions job-ids)] + (transaction + (validate-jobs-for-permissions apps-client user perms "read" job-ids) + (format-job-permission-listing user perms (jp/list-jobs-by-id job-ids))))) diff --git a/services/apps/src/apps/service/apps/jobs/sharing.clj b/services/apps/src/apps/service/apps/jobs/sharing.clj new file mode 100644 index 000000000..ff928832a --- /dev/null +++ b/services/apps/src/apps/service/apps/jobs/sharing.clj @@ -0,0 +1,109 @@ +(ns apps.service.apps.jobs.sharing + (:use [clostache.parser :only [render]] + [slingshot.slingshot :only [try+ throw+]]) + (:require [apps.clients.data-info :as data-info] + [apps.clients.iplant-groups :as iplant-groups] + [apps.clients.notifications :as cn] + [apps.persistence.jobs :as jp] + [apps.service.apps.jobs.permissions :as job-permissions] + [apps.util.service :as service] + [clojure-commons.error-codes :as ce])) + +(defn- get-job-name + [job-id {job-name :job_name}] + (or job-name (str "analysis ID " job-id))) + +(def job-sharing-formats + {:not-found "analysis ID {{analysis-id}} does not exist" + :load-failure "unable to load permissions for {{analysis-id}}: {{detail}}" + :not-allowed "insufficient privileges for analysis ID {{analysis-id}}"}) + +(defn- job-sharing-success + [job-id job level] + {:analysis_id job-id + :analysis_name (get-job-name job-id job) + :permission level + :success true}) + +(defn- job-sharing-failure + [job-id job level reason] + {:analysis_id job-id + :analysis_name (get-job-name job-id job) + :permission level + :success false + :error {:error_code ce/ERR_BAD_REQUEST + :reason reason}}) + +(defn- job-sharing-msg + ([reason-code job-id] + (job-sharing-msg reason-code job-id nil)) + ([reason-code job-id detail] + (render (job-sharing-formats reason-code) + {:analysis-id job-id + :detail (or detail "unexpected error")}))) + +(defn- load-analysis-permissions + [user analysis-id] + (try+ + (iplant-groups/load-analysis-permissions user [analysis-id]) + (catch ce/clj-http-error? {:keys [body]} + (throw+ {:type ::permission-load-failure + :reason (:grouper_result_message (service/parse-json body))})))) + +(defn- has-analysis-permission + [user job-id required-level] + (-> (iplant-groups/load-analysis-permissions user [job-id]) + (iplant-groups/has-permission-level required-level job-id))) + +(defn- share-app-for-job + [apps-client sharer sharee job-id {:keys [app-id]}] + (when-not (.hasAppPermission apps-client sharee app-id "read") + (let [response (.shareAppWithUser apps-client {} sharee app-id "read")] + (when-not (:success response) + (get-in response [:error :reason] "unable to share app"))))) + +(defn- share-output-folder + [sharer sharee {:keys [result-folder-path]}] + (try+ + (data-info/share-path sharer result-folder-path sharee "read") + nil + (catch ce/clj-http-error? {:keys [body]} + (str "unable to share result folder: " (:error_code (service/parse-json body)))))) + +(defn- do-job-sharing-steps + [apps-client sharer sharee job-id job level] + (or (share-app-for-job apps-client sharer sharee job-id job) + (share-output-folder sharer sharee job) + (iplant-groups/share-analysis job-id sharee level))) + +(defn- share-accessible-job + [apps-client sharer sharee job-id job level] + (if-let [failure-reason (do-job-sharing-steps apps-client sharer sharee job-id job level)] + (job-sharing-failure job-id job level failure-reason) + (job-sharing-success job-id job level))) + +(defn- share-extant-job + [apps-client sharer sharee job-id job level] + (if (has-analysis-permission (:shortUsername sharer) job-id "own") + (share-accessible-job apps-client sharer sharee job-id job level) + (job-sharing-failure job-id job level (job-sharing-msg :not-allowed job-id)))) + +(defn- share-job + [apps-client sharer sharee {job-id :analysis_id level :permission}] + (if-let [job (jp/get-job-by-id job-id)] + (try+ + (share-extant-job apps-client sharer sharee job-id job level) + (catch [:type ::permission-load-failure] {:keys [reason]} + (job-sharing-failure job-id job level (job-sharing-msg :load-failure job-id reason)))) + (job-sharing-failure job-id nil level (job-sharing-msg :not-found job-id)))) + +(defn- share-jobs-with-user + [apps-client sharer {sharee :user :keys [analyses]}] + (let [responses (mapv (partial share-job apps-client sharer sharee) analyses)] + (cn/send-analysis-sharing-notifications (:shortUsername sharer) sharee responses) + {:user sharee + :analyses responses})) + +(defn share-jobs + [apps-client user sharing-requests] + (mapv (partial share-jobs-with-user apps-client user) sharing-requests)) diff --git a/services/apps/src/apps/service/apps/jobs/util.clj b/services/apps/src/apps/service/apps/jobs/util.clj new file mode 100644 index 000000000..d0eb3a0b7 --- /dev/null +++ b/services/apps/src/apps/service/apps/jobs/util.clj @@ -0,0 +1,9 @@ +(ns apps.service.apps.jobs.util + (:require [apps.persistence.jobs :as jp] + [apps.util.service :as service])) + +(defn validate-job-existence + [job-ids] + (let [missing-ids (jp/list-non-existent-job-ids (set job-ids))] + (when-not (empty? missing-ids) + (service/not-found "jobs" job-ids)))) From f345cbd45be8843711d4eed1b4f9f6a5a27e9cb1 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Fri, 11 Mar 2016 17:01:49 -0700 Subject: [PATCH 142/183] CORE-7558: added the POST /analyses/sharing endpoint to terrain --- services/terrain/src/terrain/clients/apps/raw.clj | 9 +++++++++ services/terrain/src/terrain/routes/metadata.clj | 3 +++ 2 files changed, 12 insertions(+) diff --git a/services/terrain/src/terrain/clients/apps/raw.clj b/services/terrain/src/terrain/clients/apps/raw.clj index b7aefc4f5..61b3eedf1 100644 --- a/services/terrain/src/terrain/clients/apps/raw.clj +++ b/services/terrain/src/terrain/clients/apps/raw.clj @@ -262,6 +262,15 @@ :as :stream :follow-redirects false})) +(defn share-jobs + [body] + (client/post (apps-url "analyses" "sharing") + {:query-params (secured-params) + :content-type :json + :body body + :as :stream + :follow-redirects false})) + (defn submit-job [submission] (client/post (apps-url "analyses") diff --git a/services/terrain/src/terrain/routes/metadata.clj b/services/terrain/src/terrain/routes/metadata.clj index 8b38b69d8..7cc0d67cc 100644 --- a/services/terrain/src/terrain/routes/metadata.clj +++ b/services/terrain/src/terrain/routes/metadata.clj @@ -179,6 +179,9 @@ (POST "/analyses/permission-lister" [:as {:keys [body]}] (service/success-response (apps/list-job-permissions body))) + (POST "/analyses/sharing" [:as {:keys [body]}] + (service/success-response (apps/share-jobs body))) + (PATCH "/analyses/:analysis-id" [analysis-id :as {body :body}] (service/success-response (apps/update-job analysis-id body))) From 1e76e8cdf651849afc8edcc872f894b4e4348b66 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Fri, 11 Mar 2016 18:12:40 -0700 Subject: [PATCH 143/183] CORE-7559: implemented the POST /analyses/unsharing endpoint in apps --- .../apps/src/apps/clients/notifications.clj | 6 ++ services/apps/src/apps/routes/analyses.clj | 19 ++++-- .../src/apps/routes/domain/permission.clj | 28 ++++++++- services/apps/src/apps/service/apps.clj | 6 +- services/apps/src/apps/service/apps/jobs.clj | 4 ++ .../src/apps/service/apps/jobs/sharing.clj | 61 +++++++++++++++++++ 6 files changed, 115 insertions(+), 9 deletions(-) diff --git a/services/apps/src/apps/clients/notifications.clj b/services/apps/src/apps/clients/notifications.clj index 40251c5e4..326827cab 100644 --- a/services/apps/src/apps/clients/notifications.clj +++ b/services/apps/src/apps/clients/notifications.clj @@ -133,3 +133,9 @@ (->> (jsn/format-sharing-notifications sharer sharee responses) (map guarded-send-notification) dorun)) + +(defn send-analysis-unsharing-notifications + [sharer sharee responses] + (->> (jsn/format-unsharing-notifications sharer sharee responses) + (map guarded-send-notification) + dorun)) diff --git a/services/apps/src/apps/routes/analyses.clj b/services/apps/src/apps/routes/analyses.clj index f14c7e12a..ff83d9c58 100644 --- a/services/apps/src/apps/routes/analyses.clj +++ b/services/apps/src/apps/routes/analyses.clj @@ -49,11 +49,11 @@ endpoint to succeed." (ok (apps/list-job-permissions current-user (:analyses body)))) -(POST* "/sharing" [] + (POST* "/sharing" [] :query [params SecuredQueryParams] - :body [body (describe perms/AnalysisSharingRequest "The app sharing request.")] + :body [body (describe perms/AnalysisSharingRequest "The analysis sharing request.")] :return perms/AnalysisSharingResponse - :summary "Add App Permissions" + :summary "Add Analysis Permissions" :description "This endpoint allows the caller to share multiple analyses with multiple users. The authenticated user must have ownership permission to every analysis in the request body for this endpoint to fully succeed. Note: this is a potentially slow operation and the response is returned @@ -64,7 +64,18 @@ may be worthwhile to repeat failed or timed out calls to this endpoint." (ok (apps/share-jobs current-user (:sharing body)))) -(PATCH* "/:analysis-id" [] + (POST* "/unsharing" [] + :query [params SecuredQueryParams] + :body [body (describe perms/AnalysisUnsharingRequest "The analysis unsharing request.")] + :return perms/AnalysisUnsharingResponse + :summary "Revoke Analysis Permissions" + :description "This endpoint allows the caller to revoke permission to access one or more analyses from + one or more users. The authenticate user must have ownership permission to every analysis in the request + body for this endoint to fully succeed. Note: like analysis sharing, this is a potentially slow + operation." + (ok (apps/unshare-jobs current-user (:unsharing body)))) + + (PATCH* "/:analysis-id" [] :path-params [analysis-id :- AnalysisIdPathParam] :query [params SecuredQueryParams] :body [body AnalysisUpdate] diff --git a/services/apps/src/apps/routes/domain/permission.clj b/services/apps/src/apps/routes/domain/permission.clj index bb1584156..b71052266 100644 --- a/services/apps/src/apps/routes/domain/permission.clj +++ b/services/apps/src/apps/routes/domain/permission.clj @@ -95,7 +95,31 @@ :analyses (describe [AnalysisSharingResponseElement] "The list of analysis sharing responses for the user"))) (defschema AnalysisSharingRequest - {:sharing (describe [UserAnalysisSharingRequestElement] "The list of sharing requests for individual users.")}) + {:sharing (describe [UserAnalysisSharingRequestElement] "The list of sharing requests for individual users")}) (defschema AnalysisSharingResponse - {:sharing (describe [UserAnalysisSharingResponseElement] "The list of sharing responses for individual users.")}) + {:sharing (describe [UserAnalysisSharingResponseElement] "The list of sharing responses for individual users")}) + +(defschema AnalysisUnsharingRequestElement + {:analysis_id (describe UUID "The analysis ID") + :permission (describe AnalysisPermissionEnum "The requested permission level")}) + +(defschema AnalysisUnsharingResponseElement + {:analysis_id (describe UUID "The analysis ID") + :analysis_name (describe NonBlankString "The analysis name") + :success (describe Boolean "A Boolean flag indicating whether the unsharing request succeeded") + (optional-key :error) (describe ErrorResponse "Information about any error that may have occurred")}) + +(defschema UserAnalysisUnsharingRequestElement + {:user (describe NonBlankString "The user ID") + :analyses (describe [UUID] "The identifiers of the analyses to unshare")}) + +(defschema UserAnalysisUnsharingResponseElement + (assoc UserAnalysisUnsharingRequestElement + :analyses (describe [AnalysisUnsharingResponseElement] "The list of analysis unsharing responses for the user"))) + +(defschema AnalysisUnsharingRequest + {:unsharing (describe [UserAnalysisUnsharingRequestElement] "The list of unsharing requests for individual users")}) + +(defschema AnalysisUnsharingResponse + {:unsharing (describe [UserAnalysisUnsharingResponseElement] "The list of unsharing responses for individual users")}) diff --git a/services/apps/src/apps/service/apps.clj b/services/apps/src/apps/service/apps.clj index ccec4fafe..88c080438 100644 --- a/services/apps/src/apps/service/apps.clj +++ b/services/apps/src/apps/service/apps.clj @@ -365,6 +365,6 @@ [user sharing-requests] {:sharing (jobs/share-jobs (get-apps-client user) user sharing-requests)}) -(defn can-access-app - [user username app-id required-level] - (.hasAppPermission (get-apps-client user) username app-id required-level)) +(defn unshare-jobs + [user unsharing-requests] + {:unsharing (jobs/unshare-jobs (get-apps-client user) user unsharing-requests)}) diff --git a/services/apps/src/apps/service/apps/jobs.clj b/services/apps/src/apps/service/apps/jobs.clj index 7abe9cfc4..e035e8139 100644 --- a/services/apps/src/apps/service/apps/jobs.clj +++ b/services/apps/src/apps/service/apps/jobs.clj @@ -194,3 +194,7 @@ (defn share-jobs [apps-client user sharing-requests] (job-sharing/share-jobs apps-client user sharing-requests)) + +(defn unshare-jobs + [apps-client user unsharing-requests] + (job-sharing/unshare-jobs apps-client user unsharing-requests)) diff --git a/services/apps/src/apps/service/apps/jobs/sharing.clj b/services/apps/src/apps/service/apps/jobs/sharing.clj index ff928832a..a5e041f7d 100644 --- a/services/apps/src/apps/service/apps/jobs/sharing.clj +++ b/services/apps/src/apps/service/apps/jobs/sharing.clj @@ -34,6 +34,20 @@ :error {:error_code ce/ERR_BAD_REQUEST :reason reason}}) +(defn- job-unsharing-success + [job-id job] + {:analysis_id job-id + :analysis_name (get-job-name job-id job) + :success true}) + +(defn- job-unsharing-failure + [job-id job reason] + {:analysis_id job-id + :analysis_name (get-job-name job-id job) + :success false + :error {:error_code ce/ERR_BAD_REQUEST + :reason reason}}) + (defn- job-sharing-msg ([reason-code job-id] (job-sharing-msg reason-code job-id nil)) @@ -107,3 +121,50 @@ (defn share-jobs [apps-client user sharing-requests] (mapv (partial share-jobs-with-user apps-client user) sharing-requests)) + +(defn- unshare-output-folder + [sharer sharee {:keys [result-folder-path]}] + (try+ + (data-info/unshare-path sharer result-folder-path sharee) + nil + (catch ce/clj-http-error? {:keys [body]} + (str "unable to unshare result folder: " (:error_code (service/parse-json body)))))) + +;; The apps client isn't used at this time, but it will be once we extend analysis sharing +;; to HPC apps. +(defn- do-job-unsharing-steps + [apps-client sharer sharee job-id job] + (or (unshare-output-folder sharer sharee job) + (iplant-groups/unshare-analysis job-id sharee))) + +(defn- unshare-accessible-job + [apps-client sharer sharee job-id job] + (if-let [failure-reason (do-job-unsharing-steps apps-client sharer sharee job-id job)] + (job-unsharing-failure job-id job failure-reason) + (job-unsharing-success job-id job))) + +(defn- unshare-extant-job + [apps-client sharer sharee job-id job] + (if (has-analysis-permission (:shortUsername sharer) job-id "own") + (unshare-accessible-job apps-client sharer sharee job-id job) + (job-unsharing-failure job-id job (job-sharing-msg :not-found job-id)))) + +(defn- unshare-job + [apps-client sharer sharee job-id] + (if-let [job (jp/get-job-by-id job-id)] + (try+ + (unshare-extant-job apps-client sharer sharee job-id job) + (catch [:type ::permission-load-failure] {:keys [reason]} + (job-unsharing-failure job-id job (job-sharing-msg :load-failure job-id reason)))) + (job-unsharing-failure job-id nil (job-sharing-msg :not-found job-id)))) + +(defn- unshare-jobs-with-user + [apps-client sharer {sharee :user :keys [analyses]}] + (let [responses (mapv (partial unshare-job apps-client sharer sharee) analyses)] + (cn/send-analysis-unsharing-notifications (:shortUsername sharer) sharee responses) + {:user sharee + :analyses responses})) + +(defn unshare-jobs + [apps-client user unsharing-requests] + (mapv (partial unshare-jobs-with-user apps-client user) unsharing-requests)) From 333e6de5bb52c5901aacf933e2b819d8ce71a635 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Fri, 11 Mar 2016 18:22:00 -0700 Subject: [PATCH 144/183] CORE-7559: fixed an error message in POST /analyses/unsharing --- services/apps/src/apps/service/apps/jobs/sharing.clj | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/apps/src/apps/service/apps/jobs/sharing.clj b/services/apps/src/apps/service/apps/jobs/sharing.clj index a5e041f7d..5b064eaf2 100644 --- a/services/apps/src/apps/service/apps/jobs/sharing.clj +++ b/services/apps/src/apps/service/apps/jobs/sharing.clj @@ -147,7 +147,7 @@ [apps-client sharer sharee job-id job] (if (has-analysis-permission (:shortUsername sharer) job-id "own") (unshare-accessible-job apps-client sharer sharee job-id job) - (job-unsharing-failure job-id job (job-sharing-msg :not-found job-id)))) + (job-unsharing-failure job-id job (job-sharing-msg :not-allowed job-id)))) (defn- unshare-job [apps-client sharer sharee job-id] From c1640ba81e9d25c2f70f0574d03da29262c49d01 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Fri, 11 Mar 2016 18:22:29 -0700 Subject: [PATCH 145/183] CORE-7559: added the POST /analyses/unsharing endpoint to terrain --- services/terrain/src/terrain/clients/apps/raw.clj | 9 +++++++++ services/terrain/src/terrain/routes/metadata.clj | 3 +++ 2 files changed, 12 insertions(+) diff --git a/services/terrain/src/terrain/clients/apps/raw.clj b/services/terrain/src/terrain/clients/apps/raw.clj index 61b3eedf1..42a77e53e 100644 --- a/services/terrain/src/terrain/clients/apps/raw.clj +++ b/services/terrain/src/terrain/clients/apps/raw.clj @@ -271,6 +271,15 @@ :as :stream :follow-redirects false})) +(defn unshare-jobs + [body] + (client/post (apps-url "analyses" "unsharing") + {:query-params (secured-params) + :content-type :json + :body body + :as :stream + :follow-redirects false})) + (defn submit-job [submission] (client/post (apps-url "analyses") diff --git a/services/terrain/src/terrain/routes/metadata.clj b/services/terrain/src/terrain/routes/metadata.clj index 7cc0d67cc..766708b07 100644 --- a/services/terrain/src/terrain/routes/metadata.clj +++ b/services/terrain/src/terrain/routes/metadata.clj @@ -182,6 +182,9 @@ (POST "/analyses/sharing" [:as {:keys [body]}] (service/success-response (apps/share-jobs body))) + (POST "/analyses/unsharing" [:as {:keys [body]}] + (service/success-response (apps/unshare-jobs body))) + (PATCH "/analyses/:analysis-id" [analysis-id :as {body :body}] (service/success-response (apps/update-job analysis-id body))) From 812b3489f2b4f2bb665f08b953ccc16e079fe2fa Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Mon, 14 Mar 2016 11:14:38 -0700 Subject: [PATCH 146/183] CORE-7563: modified the analysis listing endpoint to indicate whether or not an analysis can be shared --- services/apps/src/apps/routes/domain/analysis/listing.clj | 5 ++++- services/apps/src/apps/service/apps/job_listings.clj | 8 +++++--- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/services/apps/src/apps/routes/domain/analysis/listing.clj b/services/apps/src/apps/routes/domain/analysis/listing.clj index a6482e5a3..cff23cace 100644 --- a/services/apps/src/apps/routes/domain/analysis/listing.clj +++ b/services/apps/src/apps/routes/domain/analysis/listing.clj @@ -62,7 +62,10 @@ (describe UUID "The identifier of the parent analysis.") (optional-key :batch_status) - (describe BatchStatus "A summary of the status of the batch.")}) + (describe BatchStatus "A summary of the status of the batch.") + + :can_share + (describe Boolean "Indicates whether or not the analysis can be shared.")}) (defschema AnalysisList {:analyses (describe [Analysis] "The list of analyses.") diff --git a/services/apps/src/apps/service/apps/job_listings.clj b/services/apps/src/apps/service/apps/job_listings.clj index b52eb17ca..f2059997b 100644 --- a/services/apps/src/apps/service/apps/job_listings.clj +++ b/services/apps/src/apps/service/apps/job_listings.clj @@ -46,7 +46,7 @@ :total (count children))))) (defn format-job - [app-tables job] + [apps-client app-tables job] (remove-nil-vals {:app_description (:app-description job) :app_id (:app-id job) @@ -65,7 +65,9 @@ :app_disabled (app-disabled? app-tables (:app-id job)) :parent_id (:parent-id job) :batch (:is-batch job) - :batch_status (when (:is-batch job) (format-batch-status (:id job)))})) + :batch_status (when (:is-batch job) (format-batch-status (:id job))) + :can_share (and (not (:is-batch job)) + (every? #(.supportsJobSharing apps-client %) (jp/list-job-steps (:id job))))})) (defn- list-jobs* [{:keys [username]} {:keys [limit offset sort-field sort-dir filter include-hidden]} types] @@ -82,7 +84,7 @@ types (.getJobTypes apps-client) jobs (list-jobs* user search-params types) app-tables (.loadAppTables apps-client (map :app-id jobs))] - {:analyses (map (partial format-job app-tables) jobs) + {:analyses (map (partial format-job apps-client app-tables) jobs) :timestamp (str (System/currentTimeMillis)) :total (count-jobs user params types)})) From 4ac03c8e350bc3dbac48e4f6de65a9a744c2d27b Mon Sep 17 00:00:00 2001 From: Paul Sarando Date: Mon, 14 Mar 2016 11:08:20 -0700 Subject: [PATCH 147/183] CORE-7550 Rename permIdRequest.model.PermanentIdRequestPathProvider Renamed permIdRequest.views.FolderPathProvider to permIdRequest.model.PermanentIdRequestPathProvider. --- .../PermanentIdRequestPathProvider.java} | 9 +++++---- .../permIdRequest/views/PermanentIdRequestViewImpl.java | 5 ++--- 2 files changed, 7 insertions(+), 7 deletions(-) rename ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/{views/FolderPathProvider.java => model/PermanentIdRequestPathProvider.java} (62%) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/FolderPathProvider.java b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/model/PermanentIdRequestPathProvider.java similarity index 62% rename from ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/FolderPathProvider.java rename to ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/model/PermanentIdRequestPathProvider.java index 48cf31510..71c2d39dd 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/FolderPathProvider.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/model/PermanentIdRequestPathProvider.java @@ -1,5 +1,6 @@ -package org.iplantc.de.admin.desktop.client.permIdRequest.views; +package org.iplantc.de.admin.desktop.client.permIdRequest.model; +import org.iplantc.de.admin.desktop.client.permIdRequest.views.PermanentIdRequestView.PermanentIdRequestViewAppearance; import org.iplantc.de.client.models.identifiers.PermanentIdRequest; import com.sencha.gxt.core.client.ValueProvider; @@ -7,11 +8,11 @@ /** * @author psarando */ -public class FolderPathProvider implements ValueProvider { +public class PermanentIdRequestPathProvider implements ValueProvider { - private final PermanentIdRequestView.PermanentIdRequestViewAppearance appearance; + private final PermanentIdRequestViewAppearance appearance; - public FolderPathProvider(PermanentIdRequestView.PermanentIdRequestViewAppearance appearance) { + public PermanentIdRequestPathProvider(PermanentIdRequestViewAppearance appearance) { this.appearance = appearance; } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestViewImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestViewImpl.java index acabbd464..00ffea626 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestViewImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/admin/desktop/client/permIdRequest/views/PermanentIdRequestViewImpl.java @@ -1,10 +1,10 @@ package org.iplantc.de.admin.desktop.client.permIdRequest.views; +import org.iplantc.de.admin.desktop.client.permIdRequest.model.PermanentIdRequestPathProvider; import org.iplantc.de.client.models.diskResources.Folder; import org.iplantc.de.client.models.identifiers.PermanentIdRequest; import org.iplantc.de.client.models.identifiers.PermanentIdRequestAutoBeanFactory; import org.iplantc.de.client.models.identifiers.PermanentIdRequestType; -import org.iplantc.de.client.models.identifiers.PermanentIdRequestUpdate; import org.iplantc.de.client.services.DiskResourceServiceFacade; import com.google.gwt.core.client.GWT; @@ -23,7 +23,6 @@ import com.sencha.gxt.widget.core.client.button.TextButton; import com.sencha.gxt.widget.core.client.event.DialogHideEvent; import com.sencha.gxt.widget.core.client.event.DialogHideEvent.DialogHideHandler; -import com.sencha.gxt.widget.core.client.event.SelectEvent.SelectHandler; import com.sencha.gxt.widget.core.client.event.SelectEvent; import com.sencha.gxt.widget.core.client.grid.ColumnConfig; import com.sencha.gxt.widget.core.client.grid.ColumnModel; @@ -154,7 +153,7 @@ ColumnModel createColumnModel() { ColumnConfig nameCol = new ColumnConfig<>(pr_props.requestedBy(), appearance.nameColumnWidth(), appearance.nameColumnLabel()); - ColumnConfig pathCol = new ColumnConfig<>(new FolderPathProvider(appearance), + ColumnConfig pathCol = new ColumnConfig<>(new PermanentIdRequestPathProvider(appearance), appearance.pathColumnWidth(), appearance.pathColumnLabel()); ColumnConfig dateSubCol = new ColumnConfig<>(pr_props.dateSubmitted(), From f160c2b462db2f3fb64f7a56606d4adb15a75dfd Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Mon, 14 Mar 2016 13:33:25 -0700 Subject: [PATCH 148/183] CORE-7558: changed the notification type for analyses sharing notifications to "analysis" --- services/apps/src/apps/clients/notifications/job_sharing.clj | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/apps/src/apps/clients/notifications/job_sharing.clj b/services/apps/src/apps/clients/notifications/job_sharing.clj index 169a22478..a368bb881 100644 --- a/services/apps/src/apps/clients/notifications/job_sharing.clj +++ b/services/apps/src/apps/clients/notifications/job_sharing.clj @@ -3,7 +3,7 @@ [medley.core :only [remove-vals]]) (:require [clojure.string :as string])) -(def notification-type "analyses") +(def notification-type "analysis") (def singular "analysis") (def plural "analyses") From 92c779c4a49220f2b42031870cd7265b322b55ab Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Mon, 14 Mar 2016 13:35:20 -0700 Subject: [PATCH 149/183] CORE-7558: added checks to the job sharing and unsharing endpoints to ensure that they are not members of an HT batch --- .../src/apps/service/apps/jobs/sharing.clj | 58 +++++++++---------- 1 file changed, 27 insertions(+), 31 deletions(-) diff --git a/services/apps/src/apps/service/apps/jobs/sharing.clj b/services/apps/src/apps/service/apps/jobs/sharing.clj index 5b064eaf2..b3402e5ad 100644 --- a/services/apps/src/apps/service/apps/jobs/sharing.clj +++ b/services/apps/src/apps/service/apps/jobs/sharing.clj @@ -7,6 +7,7 @@ [apps.persistence.jobs :as jp] [apps.service.apps.jobs.permissions :as job-permissions] [apps.util.service :as service] + [clojure.tools.logging :as log] [clojure-commons.error-codes :as ce])) (defn- get-job-name @@ -16,7 +17,8 @@ (def job-sharing-formats {:not-found "analysis ID {{analysis-id}} does not exist" :load-failure "unable to load permissions for {{analysis-id}}: {{detail}}" - :not-allowed "insufficient privileges for analysis ID {{analysis-id}}"}) + :not-allowed "insufficient privileges for analysis ID {{analysis-id}}" + :is-subjob "analysis sharing not supported for individual jobs within an HT batch"}) (defn- job-sharing-success [job-id job level] @@ -69,6 +71,16 @@ (-> (iplant-groups/load-analysis-permissions user [job-id]) (iplant-groups/has-permission-level required-level job-id))) +(defn- verify-accessible + [sharer job-id] + (when-not (has-analysis-permission (:shortUsername sharer) job-id "own") + (job-sharing-msg :not-allowed job-id))) + +(defn- verify-not-subjob + [{:keys [id parent-id]}] + (when parent-id + (job-sharing-msg :is-subjob id))) + (defn- share-app-for-job [apps-client sharer sharee job-id {:keys [app-id]}] (when-not (.hasAppPermission apps-client sharee app-id "read") @@ -84,29 +96,21 @@ (catch ce/clj-http-error? {:keys [body]} (str "unable to share result folder: " (:error_code (service/parse-json body)))))) -(defn- do-job-sharing-steps +(defn- share-job* [apps-client sharer sharee job-id job level] - (or (share-app-for-job apps-client sharer sharee job-id job) + (or (verify-not-subjob job) + (verify-accessible sharer job-id) + (share-app-for-job apps-client sharer sharee job-id job) (share-output-folder sharer sharee job) (iplant-groups/share-analysis job-id sharee level))) -(defn- share-accessible-job - [apps-client sharer sharee job-id job level] - (if-let [failure-reason (do-job-sharing-steps apps-client sharer sharee job-id job level)] - (job-sharing-failure job-id job level failure-reason) - (job-sharing-success job-id job level))) - -(defn- share-extant-job - [apps-client sharer sharee job-id job level] - (if (has-analysis-permission (:shortUsername sharer) job-id "own") - (share-accessible-job apps-client sharer sharee job-id job level) - (job-sharing-failure job-id job level (job-sharing-msg :not-allowed job-id)))) - (defn- share-job [apps-client sharer sharee {job-id :analysis_id level :permission}] (if-let [job (jp/get-job-by-id job-id)] (try+ - (share-extant-job apps-client sharer sharee job-id job level) + (if-let [failure-reason (share-job* apps-client sharer sharee job-id job level)] + (job-sharing-failure job-id job level failure-reason) + (job-sharing-success job-id job level)) (catch [:type ::permission-load-failure] {:keys [reason]} (job-sharing-failure job-id job level (job-sharing-msg :load-failure job-id reason)))) (job-sharing-failure job-id nil level (job-sharing-msg :not-found job-id)))) @@ -132,28 +136,20 @@ ;; The apps client isn't used at this time, but it will be once we extend analysis sharing ;; to HPC apps. -(defn- do-job-unsharing-steps +(defn- unshare-job* [apps-client sharer sharee job-id job] - (or (unshare-output-folder sharer sharee job) + (or (verify-not-subjob job) + (verify-accessible sharer job-id) + (unshare-output-folder sharer sharee job) (iplant-groups/unshare-analysis job-id sharee))) -(defn- unshare-accessible-job - [apps-client sharer sharee job-id job] - (if-let [failure-reason (do-job-unsharing-steps apps-client sharer sharee job-id job)] - (job-unsharing-failure job-id job failure-reason) - (job-unsharing-success job-id job))) - -(defn- unshare-extant-job - [apps-client sharer sharee job-id job] - (if (has-analysis-permission (:shortUsername sharer) job-id "own") - (unshare-accessible-job apps-client sharer sharee job-id job) - (job-unsharing-failure job-id job (job-sharing-msg :not-allowed job-id)))) - (defn- unshare-job [apps-client sharer sharee job-id] (if-let [job (jp/get-job-by-id job-id)] (try+ - (unshare-extant-job apps-client sharer sharee job-id job) + (if-let [failure-reason (unshare-job* apps-client sharer sharee job-id job)] + (job-unsharing-failure job-id job failure-reason) + (job-unsharing-success job-id job)) (catch [:type ::permission-load-failure] {:keys [reason]} (job-unsharing-failure job-id job (job-sharing-msg :load-failure job-id reason)))) (job-unsharing-failure job-id nil (job-sharing-msg :not-found job-id)))) From 9b9924c3eacbfe0153f11973c4234131e1dc310d Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Mon, 14 Mar 2016 13:36:06 -0700 Subject: [PATCH 150/183] CORE-7563: added a Boolean flag indicating whether or not an analysis can be shared to the analysis listing service --- services/apps/src/apps/persistence/jobs.clj | 16 ++++++++++++++++ .../apps/src/apps/service/apps/job_listings.clj | 9 ++++++--- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/services/apps/src/apps/persistence/jobs.clj b/services/apps/src/apps/persistence/jobs.clj index d436c3660..36b21dc23 100644 --- a/services/apps/src/apps/persistence/jobs.clj +++ b/services/apps/src/apps/persistence/jobs.clj @@ -493,6 +493,22 @@ (where {:job_id job-id}) (order :step_number))) +(defn- child-job-subselect + [job-id] + (subselect :jobs + (fields :id) + (where {:parent_id job-id}) + (limit 1))) + +(defn list-representative-job-steps + "Lists all of the job steps in a standalone job or all of the steps of one of the jobs in an HT batch. The purpose + of this function is to ensure that steps of every job type that are used in a job are listed. The analysis listing + code uses this function to determine whether or not a job can be shared." + [job-id] + (select (job-step-base-query) + (where (or {:job_id job-id} + {:job_id [in (child-job-subselect job-id)]})))) + (defn list-jobs-to-delete [ids] (select [:jobs :j] diff --git a/services/apps/src/apps/service/apps/job_listings.clj b/services/apps/src/apps/service/apps/job_listings.clj index f2059997b..4e7c8c54b 100644 --- a/services/apps/src/apps/service/apps/job_listings.clj +++ b/services/apps/src/apps/service/apps/job_listings.clj @@ -43,7 +43,11 @@ (assoc (->> (group-by batch-child-status children) (map (fn [[k v]] [k (count v)])) (into {})) - :total (count children))))) + :total (count children))))) + +(defn- job-supports-sharing? + [apps-client {parent-id :parent_id :keys [id]}] + (and (nil? parent-id) (every? #(.supportsJobSharing apps-client %) (jp/list-representative-job-steps id)))) (defn format-job [apps-client app-tables job] @@ -66,8 +70,7 @@ :parent_id (:parent-id job) :batch (:is-batch job) :batch_status (when (:is-batch job) (format-batch-status (:id job))) - :can_share (and (not (:is-batch job)) - (every? #(.supportsJobSharing apps-client %) (jp/list-job-steps (:id job))))})) + :can_share (job-supports-sharing? apps-client job)})) (defn- list-jobs* [{:keys [username]} {:keys [limit offset sort-field sort-dir filter include-hidden]} types] From 089ec4af131768232e5adeef0965068a2fb26ad3 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Mon, 14 Mar 2016 13:38:41 -0700 Subject: [PATCH 151/183] Move mode implementation into dedicated functions. --- services/templeton/src/templeton/main.go | 61 +++++++++++++----------- 1 file changed, 34 insertions(+), 27 deletions(-) diff --git a/services/templeton/src/templeton/main.go b/services/templeton/src/templeton/main.go index 93fc31afe..eb7d6427f 100644 --- a/services/templeton/src/templeton/main.go +++ b/services/templeton/src/templeton/main.go @@ -99,6 +99,38 @@ func loadDBConfig() { } } +func doFullMode(es *elasticsearch.Elasticer, d *database.Databaser) { + logger.Println("Full indexing mode selected.") + + es.Reindex(d) +} + +func doPeriodicMode(es *elasticsearch.Elasticer, d *database.Databaser, client *messaging.Client) { + logger.Println("Periodic indexing mode selected.") + + go client.Listen() + + // Accept and handle messages sent out with the index.all and index.templates routing keys + client.AddConsumer(messaging.ReindexExchange, "direct", "templeton.reindexAll", messaging.ReindexAllKey, func(del amqp.Delivery) { + es.Reindex(d) + del.Ack(false) + }) + client.AddConsumer(messaging.ReindexExchange, "direct", "templeton.reindexTemplates", messaging.ReindexTemplatesKey, func(del amqp.Delivery) { + es.Reindex(d) + del.Ack(false) + }) + + // spinner in order to keep the program running since client.Listen() is in a goroutine. + spinner := make(chan int) + for { + select { + case <-spinner: + fmt.Println("Exiting") + break + } + } +} + func main() { if *version { AppVersion() @@ -128,9 +160,7 @@ func main() { } if *mode == "full" { - logger.Println("Full indexing mode selected.") - - es.Reindex(d) + doFullMode(es, d) return } @@ -143,35 +173,12 @@ func main() { defer client.Close() if *mode == "periodic" { - logger.Println("Periodic indexing mode selected.") - - go client.Listen() - - // Accept and handle messages sent out with the index.all and index.templates routing keys - client.AddConsumer(messaging.ReindexExchange, "direct", "templeton.reindexAll", messaging.ReindexAllKey, func(del amqp.Delivery) { - es.Reindex(d) - del.Ack(false) - }) - client.AddConsumer(messaging.ReindexExchange, "direct", "templeton.reindexTemplates", messaging.ReindexTemplatesKey, func(del amqp.Delivery) { - es.Reindex(d) - del.Ack(false) - }) - - // spinner in order to keep the program running since client.Listen() is in a goroutine. - spinner := make(chan int) - for { - select { - case <-spinner: - fmt.Println("Exiting") - break - } - } + doPeriodicMode(es, d, client) } if *mode == "incremental" { logger.Println("Incremental indexing mode selected.") // TODO: AMQP listener triggering incremental updates - return } } From d19b570c0622de591d55d063cb667546a9aa3e3a Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Mon, 14 Mar 2016 14:39:28 -0700 Subject: [PATCH 152/183] CORE-7563: added an extra valdation step to the analysis permission listing, sharing and unsharing endpoints in apps --- .../src/apps/service/apps/job_listings.clj | 15 +++++------ .../apps/service/apps/jobs/permissions.clj | 25 +++++++++++++------ .../src/apps/service/apps/jobs/sharing.clj | 16 +++++++++--- 3 files changed, 36 insertions(+), 20 deletions(-) diff --git a/services/apps/src/apps/service/apps/job_listings.clj b/services/apps/src/apps/service/apps/job_listings.clj index 4e7c8c54b..87c47629b 100644 --- a/services/apps/src/apps/service/apps/job_listings.clj +++ b/services/apps/src/apps/service/apps/job_listings.clj @@ -3,6 +3,7 @@ [apps.util.conversions :only [remove-nil-vals]]) (:require [kameleon.db :as db] [apps.persistence.jobs :as jp] + [apps.service.apps.jobs.permissions :as job-permissions] [apps.service.util :as util])) (defn is-completed? @@ -45,19 +46,15 @@ (into {})) :total (count children))))) -(defn- job-supports-sharing? - [apps-client {parent-id :parent_id :keys [id]}] - (and (nil? parent-id) (every? #(.supportsJobSharing apps-client %) (jp/list-representative-job-steps id)))) - (defn format-job - [apps-client app-tables job] + [apps-client app-tables {:keys [parent-id id] :as job}] (remove-nil-vals {:app_description (:app-description job) :app_id (:app-id job) :app_name (:app-name job) :description (:description job) :enddate (job-timestamp (:end-date job)) - :id (:id job) + :id id :name (:job-name job) :resultfolderid (:result-folder-path job) :startdate (job-timestamp (:start-date job)) @@ -67,10 +64,10 @@ :notify (:notify job false) :wiki_url (:app-wiki-url job) :app_disabled (app-disabled? app-tables (:app-id job)) - :parent_id (:parent-id job) + :parent_id parent-id :batch (:is-batch job) - :batch_status (when (:is-batch job) (format-batch-status (:id job))) - :can_share (job-supports-sharing? apps-client job)})) + :batch_status (when (:is-batch job) (format-batch-status id)) + :can_share (and (nil? parent-id) (job-permissions/supports-job-sharing? apps-client id))})) (defn- list-jobs* [{:keys [username]} {:keys [limit offset sort-field sort-dir filter include-hidden]} types] diff --git a/services/apps/src/apps/service/apps/jobs/permissions.clj b/services/apps/src/apps/service/apps/jobs/permissions.clj index 14268e70e..215d54745 100644 --- a/services/apps/src/apps/service/apps/jobs/permissions.clj +++ b/services/apps/src/apps/service/apps/jobs/permissions.clj @@ -5,6 +5,10 @@ [apps.service.apps.jobs.util :as ju] [clojure-commons.exception-util :as cxu])) +(defn supports-job-sharing? + [apps-client job-id] + (every? #(.supportsJobSharing apps-client %) (jp/list-representative-job-steps job-id))) + (defn- validate-job-permission-level [short-username perms required-level job-ids] (doseq [job-id job-ids] @@ -14,11 +18,15 @@ (defn- validate-job-sharing-support [apps-client job-ids] - (doseq [job-id job-ids - job-step (jp/list-job-steps job-id)] - (when-not (.supportsJobSharing apps-client job-step) + (doseq [job-id job-ids] + (when-not (supports-job-sharing? apps-client job-id) (cxu/bad-request (str "analysis sharing not supported for " job-id))))) +(defn- verify-not-subjobs + [jobs] + (when-let [subjob-ids (seq (map :id (filter :parent-id jobs)))] + (cxu/bad-request (str "analysis sharing not supported for members of a batch job") :jobs subjob-ids))) + (defn- validate-jobs-for-permissions [apps-client {short-username :shortUsername} perms required-level job-ids] (ju/validate-job-existence job-ids) @@ -39,7 +47,10 @@ (defn list-job-permissions [apps-client {:keys [username] :as user} job-ids] - (let [perms (iplant-groups/list-analysis-permissions job-ids)] - (transaction - (validate-jobs-for-permissions apps-client user perms "read" job-ids) - (format-job-permission-listing user perms (jp/list-jobs-by-id job-ids))))) + (ju/validate-job-existence job-ids) + (transaction + (let [jobs (jp/list-jobs-by-id job-ids)] + (verify-not-subjobs jobs) + (let [perms (iplant-groups/list-analysis-permissions job-ids)] + (validate-jobs-for-permissions apps-client user perms "read" job-ids) + (format-job-permission-listing user perms (jp/list-jobs-by-id job-ids)))))) diff --git a/services/apps/src/apps/service/apps/jobs/sharing.clj b/services/apps/src/apps/service/apps/jobs/sharing.clj index b3402e5ad..4ae5d6477 100644 --- a/services/apps/src/apps/service/apps/jobs/sharing.clj +++ b/services/apps/src/apps/service/apps/jobs/sharing.clj @@ -15,10 +15,11 @@ (or job-name (str "analysis ID " job-id))) (def job-sharing-formats - {:not-found "analysis ID {{analysis-id}} does not exist" - :load-failure "unable to load permissions for {{analysis-id}}: {{detail}}" - :not-allowed "insufficient privileges for analysis ID {{analysis-id}}" - :is-subjob "analysis sharing not supported for individual jobs within an HT batch"}) + {:not-found "analysis ID {{analysis-id}} does not exist" + :load-failure "unable to load permissions for {{analysis-id}}: {{detail}}" + :not-allowed "insufficient privileges for analysis ID {{analysis-id}}" + :is-subjob "analysis sharing not supported for individual jobs within an HT batch" + :not-supported "analysis sharing is not supported for jobs of this type"}) (defn- job-sharing-success [job-id job level] @@ -81,6 +82,11 @@ (when parent-id (job-sharing-msg :is-subjob id))) +(defn- verify-support + [apps-client job-id] + (when-not (job-permissions/supports-job-sharing? apps-client job-id) + (job-sharing-msg :not-supported job-id))) + (defn- share-app-for-job [apps-client sharer sharee job-id {:keys [app-id]}] (when-not (.hasAppPermission apps-client sharee app-id "read") @@ -100,6 +106,7 @@ [apps-client sharer sharee job-id job level] (or (verify-not-subjob job) (verify-accessible sharer job-id) + (verify-support apps-client job-id) (share-app-for-job apps-client sharer sharee job-id job) (share-output-folder sharer sharee job) (iplant-groups/share-analysis job-id sharee level))) @@ -140,6 +147,7 @@ [apps-client sharer sharee job-id job] (or (verify-not-subjob job) (verify-accessible sharer job-id) + (verify-support apps-client job-id) (unshare-output-folder sharer sharee job) (iplant-groups/unshare-analysis job-id sharee))) From 1ed4c8504c5e298bfa0edc9a8360325e41638388 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Mon, 14 Mar 2016 14:58:57 -0700 Subject: [PATCH 153/183] Remove redundant t.Fail()s after t.Error() --- services/templeton/src/configurate/configurate_test.go | 9 --------- services/templeton/src/logcabin/logcabin_test.go | 4 ---- 2 files changed, 13 deletions(-) diff --git a/services/templeton/src/configurate/configurate_test.go b/services/templeton/src/configurate/configurate_test.go index d6bf1e203..65d65948c 100644 --- a/services/templeton/src/configurate/configurate_test.go +++ b/services/templeton/src/configurate/configurate_test.go @@ -11,7 +11,6 @@ func TestNew(t *testing.T) { err := configurator() if err != nil { t.Error(err) - t.Fail() } if C == nil { t.Errorf("configurate.New() returned nil") @@ -22,12 +21,10 @@ func TestAMQPConfig(t *testing.T) { err := configurator() if err != nil { t.Error(err) - t.Fail() } actual, err := C.String("amqp.uri") if err != nil { t.Error(err) - t.Fail() } expected := "amqp://guest:guest@192.168.99.100:5672/" if actual != expected { @@ -39,12 +36,10 @@ func TestDBConfig(t *testing.T) { err := configurator() if err != nil { t.Error(err) - t.Fail() } actual, err := C.String("db.uri") if err != nil { t.Error(err) - t.Fail() } expected := "postgres://de:notprod@192.168.99.100:5432/metadata?sslmode=disable" if actual != expected { @@ -56,12 +51,10 @@ func TestESBase(t *testing.T) { err := configurator() if err != nil { t.Error(err) - t.Fail() } actual, err := C.String("elasticsearch.base") if err != nil { t.Error(err) - t.Fail() } expected := "http://localhost:9200" if actual != expected { @@ -73,12 +66,10 @@ func TestESIndex(t *testing.T) { err := configurator() if err != nil { t.Error(err) - t.Fail() } actual, err := C.String("elasticsearch.index") if err != nil { t.Error(err) - t.Fail() } expected := "data" if actual != expected { diff --git a/services/templeton/src/logcabin/logcabin_test.go b/services/templeton/src/logcabin/logcabin_test.go index 8c95c908c..53ee42803 100644 --- a/services/templeton/src/logcabin/logcabin_test.go +++ b/services/templeton/src/logcabin/logcabin_test.go @@ -35,7 +35,6 @@ func TestLogWriter(t *testing.T) { r, w, err := os.Pipe() if err != nil { t.Error(err) - t.Fail() } os.Stdout = w restore := func() { @@ -47,19 +46,16 @@ func TestLogWriter(t *testing.T) { if err != nil { t.Error(err) os.Stdout = original - t.Fail() } w.Close() var msg LogMessage actualBytes, err := ioutil.ReadAll(r) if err != nil { t.Error(err) - t.Fail() } err = json.Unmarshal(actualBytes, &msg) if err != nil { t.Error(err) - t.Fail() } actual := msg.Message if actual != expected { From e8c435b12d6d625a91382f1d962eaeff5ff10bf6 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Mon, 14 Mar 2016 15:01:54 -0700 Subject: [PATCH 154/183] Make newObjectCursor easier to read. --- services/templeton/src/templeton/database/database.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/services/templeton/src/templeton/database/database.go b/services/templeton/src/templeton/database/database.go index e17792b02..408cd4dd9 100644 --- a/services/templeton/src/templeton/database/database.go +++ b/services/templeton/src/templeton/database/database.go @@ -140,7 +140,11 @@ type objectCursor struct { } func newObjectCursor(rows *sql.Rows) *objectCursor { - return &objectCursor{rows: rows, lastRow: &model.AVURecord{TargetId: ""}, moreRows: true, anyRows: false} + return &objectCursor{ + rows: rows, + lastRow: &model.AVURecord{TargetId: ""}, + moreRows: true, + anyRows: false} } func (o *objectCursor) Next() ([]model.AVURecord, error) { From 334087853565d4818def343bb0407e84230b20d0 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Mon, 14 Mar 2016 15:58:31 -0700 Subject: [PATCH 155/183] Add templeton to assorted ansible pieces. --- ansible/DockerfileConfigs | 2 +- ansible/inventories/example.cfg | 4 +++ ansible/inventories/group_vars/all | 11 ++++++++ ansible/playbooks/de-pull-images.yaml | 15 +++++++++++ ansible/playbooks/de-rm-containers.yml | 26 +++++++++++++++++++ ansible/playbooks/de-start-containers.yml | 12 +++++++++ ansible/playbooks/de-stop-containers.yml | 13 ++++++++++ ansible/playbooks/local-services-cfg.yml | 5 ++++ ansible/roles/de-services-cfg/meta/main.yaml | 7 +++++ .../templates/templeton.yaml.j2 | 9 +++++++ 10 files changed, 103 insertions(+), 1 deletion(-) create mode 100644 ansible/roles/util-cfg-service/templates/templeton.yaml.j2 diff --git a/ansible/DockerfileConfigs b/ansible/DockerfileConfigs index b4ca8de5f..57e496af5 100644 --- a/ansible/DockerfileConfigs +++ b/ansible/DockerfileConfigs @@ -3,7 +3,7 @@ FROM tianon/true COPY config_files/logging/ /etc/iplant/de/logging/ COPY config_files/nginx/ /etc/nginx/ COPY config_files/*.properties /etc/iplant/de/ -COPY config_files/de-application.yaml /etc/iplant/de/ +COPY config_files/*.yaml /etc/iplant/de/ COPY config_files/logstash-forwarder/ /etc/logstash-forwarder/ COPY config_files/docker-gc-* /etc/docker-gc/ diff --git a/ansible/inventories/example.cfg b/ansible/inventories/example.cfg index 9b823082d..bad9d7179 100644 --- a/ansible/inventories/example.cfg +++ b/ansible/inventories/example.cfg @@ -48,6 +48,7 @@ metadata monkey notificationagent saved-searches +templeton terrain tree-urls user-preferences @@ -110,6 +111,9 @@ services.example.com [saved-searches] services.example.com +[templeton] +services.example.com + [terrain] services.example.com diff --git a/ansible/inventories/group_vars/all b/ansible/inventories/group_vars/all index b215fcea2..1dc9b9b9a 100644 --- a/ansible/inventories/group_vars/all +++ b/ansible/inventories/group_vars/all @@ -592,6 +592,17 @@ saved_searches: search_default_limit: 200 +templeton: + host: "{{ groups['templeton'][0] }}" + port: + service_name: jexevents.service + service_name_short: templeton + compose_service: templeton + service_description: templeton service + image_name: templeton + container_name: templeton + log_file: templeton-docker.log + tree_parser_base: http://portnoy.iplantcollaborative.org/parseTree tree_urls_log_file: /logs/tree-urls.log diff --git a/ansible/playbooks/de-pull-images.yaml b/ansible/playbooks/de-pull-images.yaml index a7d932ee2..46d57faf0 100644 --- a/ansible/playbooks/de-pull-images.yaml +++ b/ansible/playbooks/de-pull-images.yaml @@ -337,6 +337,21 @@ service_name: "{{saved_searches.compose_service}}" service_name_short: "{{saved_searches.service_name_short}}" +- name: Update templeton + hosts: templeton:&systems + become: true + gather_facts: false + tags: + - services + - templeton + roles: + - role: util-cfg-docker-pull-configs + service_name: "{{templeton.compose_service}}" + service_name_short: "{{templeton.service_name_short}}" + - role: util-cfg-docker-pull + service_name: "{{templeton.compose_service}}" + service_name_short: "{{templeton.service_name_short}}" + - name: Update terrain hosts: terrain:&systems become: true diff --git a/ansible/playbooks/de-rm-containers.yml b/ansible/playbooks/de-rm-containers.yml index 3b3023038..6c3e99dd2 100644 --- a/ansible/playbooks/de-rm-containers.yml +++ b/ansible/playbooks/de-rm-containers.yml @@ -449,6 +449,32 @@ become: true shell: docker rm -v {{saved_searches.service_name_short}} +- name: Remove templeton + hosts: templeton:&systems + become: true + gather_facts: false + tags: + - services + - templeton + roles: + - role: util-cfg-docker-rm-configs + ignore_errors: yes + service_name: "{{templeton.compose_service}}" + service_name_short: "{{templeton.service_name_short}}" + - role: util-cfg-docker-rm + ignore_errors: yes + service_name: "{{templeton.compose_service}}" + service_name_short: "{{templeton.service_name_short}}" + - role: util-cfg-docker-rm + ignore_errors: yes + service_name: "iplant_data_{{templeton.compose_service}}" + service_name_short: "iplant_data_{{templeton.service_name_short}}" + post_tasks: + - name: annihilate templeton + ignore_errors: yes + become: true + shell: docker rm -v {{templeton.service_name_short}} + - name: Remove terrain hosts: terrain:&systems become: true diff --git a/ansible/playbooks/de-start-containers.yml b/ansible/playbooks/de-start-containers.yml index f97a1e856..d97663e2b 100644 --- a/ansible/playbooks/de-start-containers.yml +++ b/ansible/playbooks/de-start-containers.yml @@ -230,6 +230,18 @@ service_name: "{{saved_searches.compose_service}}" service_name_short: "{{saved_searches.service_name_short}}" +- name: Start templeton + hosts: templeton:&systems + become: true + gather_facts: false + tags: + - services + - templeton + roles: + - role: util-cfg-docker-up + service_name: "{{templeton.compose_service}}" + service_name_short: "{{templeton.service_name_short}}" + - name: Start terrain hosts: terrain:&systems become: true diff --git a/ansible/playbooks/de-stop-containers.yml b/ansible/playbooks/de-stop-containers.yml index 799680adf..803862723 100644 --- a/ansible/playbooks/de-stop-containers.yml +++ b/ansible/playbooks/de-stop-containers.yml @@ -245,6 +245,19 @@ service_name: "{{saved_searches.compose_service}}" service_name_short: "{{saved_searches.service_name_short}}" +- name: Stop templeton + hosts: templeton:&systems + become: true + gather_facts: false + tags: + - services + - templeton + roles: + - role: util-cfg-docker-stop + ignore_errors: yes + service_name: "{{templeton.compose_service}}" + service_name_short: "{{templeton.service_name_short}}" + - name: Stop terrain hosts: terrain:&systems become: true diff --git a/ansible/playbooks/local-services-cfg.yml b/ansible/playbooks/local-services-cfg.yml index d08f7571a..101b7915a 100644 --- a/ansible/playbooks/local-services-cfg.yml +++ b/ansible/playbooks/local-services-cfg.yml @@ -92,6 +92,11 @@ - role: util-cfg-service service_name_short: "{{user_sessions.service_name_short}}" + - role: util-cfg-service + service_name_short: "{{templeton.service_name_short}}" + src: "templeton.yaml.j2" + dest: "{{service_conf_dir}}/templeton.yaml" + - role: util-cfg-service src: "ui/de-application.yaml.j2" dest: "{{service_conf_dir}}/de-application.yaml" diff --git a/ansible/roles/de-services-cfg/meta/main.yaml b/ansible/roles/de-services-cfg/meta/main.yaml index d210df034..9d3d485f6 100644 --- a/ansible/roles/de-services-cfg/meta/main.yaml +++ b/ansible/roles/de-services-cfg/meta/main.yaml @@ -119,6 +119,13 @@ dependencies: service_name: "{{saved_searches.compose_service}}" service_name_short: "{{saved_searches.service_name_short}}" + - role: util-cfg-docker-pull-configs + service_name: "{{templeton.compose_service}}" + service_name_short: "{{templeton.service_name_short}}" + - role: util-cfg-docker-pull + service_name: "{{templeton.compose_service}}" + service_name_short: "{{templeton.service_name_short}}" + - role: util-cfg-docker-pull-configs service_name: "{{terrain.compose_service}}" service_name_short: "{{terrain.service_name_short}}" diff --git a/ansible/roles/util-cfg-service/templates/templeton.yaml.j2 b/ansible/roles/util-cfg-service/templates/templeton.yaml.j2 new file mode 100644 index 000000000..652c4ed5f --- /dev/null +++ b/ansible/roles/util-cfg-service/templates/templeton.yaml.j2 @@ -0,0 +1,9 @@ +amqp: + uri: amqp://{{ amqp_user }}:{{ amqp_password }}@{{ amqp_broker.host }}:{{ amqp_broker.port }}/ + +elasticsearch: + base: http://{{ elasticsearch.host }}:{{ elasticsearch.port }} + index: data + +db: + uri: postgres://{{ metadata_db_user }}:{{ metadata_db_password }}@{{ metadata_db_host }}:{{ metadata_db_port }}/{{ metadata_db_name }}?sslmode=disable From 48e733d8527965996b5394676ed8ecdc3806bc64 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Mon, 14 Mar 2016 16:12:41 -0700 Subject: [PATCH 156/183] CORE-7563: fixed a function call with incorrect arity --- services/apps/src/apps/service/apps/job_listings.clj | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/services/apps/src/apps/service/apps/job_listings.clj b/services/apps/src/apps/service/apps/job_listings.clj index 87c47629b..c6f2114c0 100644 --- a/services/apps/src/apps/service/apps/job_listings.clj +++ b/services/apps/src/apps/service/apps/job_listings.clj @@ -90,8 +90,9 @@ (defn list-job [apps-client job-id] - (let [job-info (jp/get-job-by-id job-id)] - (format-job (.loadAppTables apps-client [(:app-id job-info)]) job-info))) + (let [job-info (jp/get-job-by-id job-id) + app-tables (.loadAppTables apps-client [(:app-id job-info)])] + (format-job apps-client app-tables job-info))) (defn- format-job-step [step] From 718efba08405750fa40c63f6c720c0726020944a Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Mon, 14 Mar 2016 15:58:31 -0700 Subject: [PATCH 157/183] Fix to templeton-periodic --- ansible/inventories/example.cfg | 4 ++-- ansible/inventories/group_vars/all | 13 ++++++------ ansible/playbooks/de-pull-images.yaml | 13 ++++++------ ansible/playbooks/de-rm-containers.yml | 21 ++++++++----------- ansible/playbooks/de-start-containers.yml | 9 ++++---- ansible/playbooks/de-stop-containers.yml | 9 ++++---- ansible/playbooks/local-services-cfg.yml | 6 +++--- ansible/roles/de-services-cfg/meta/main.yaml | 8 +++---- ...ton.yaml.j2 => templeton-periodic.yaml.j2} | 0 9 files changed, 41 insertions(+), 42 deletions(-) rename ansible/roles/util-cfg-service/templates/{templeton.yaml.j2 => templeton-periodic.yaml.j2} (100%) diff --git a/ansible/inventories/example.cfg b/ansible/inventories/example.cfg index bad9d7179..5d48915bc 100644 --- a/ansible/inventories/example.cfg +++ b/ansible/inventories/example.cfg @@ -48,7 +48,7 @@ metadata monkey notificationagent saved-searches -templeton +templeton-periodic terrain tree-urls user-preferences @@ -111,7 +111,7 @@ services.example.com [saved-searches] services.example.com -[templeton] +[templeton-periodic] services.example.com [terrain] diff --git a/ansible/inventories/group_vars/all b/ansible/inventories/group_vars/all index 490bbd335..799458a68 100644 --- a/ansible/inventories/group_vars/all +++ b/ansible/inventories/group_vars/all @@ -593,15 +593,14 @@ saved_searches: search_default_limit: 200 -templeton: - host: "{{ groups['templeton'][0] }}" +templeton_periodic: + host: "{{ groups['templeton-periodic'][0] }}" port: - service_name: jexevents.service - service_name_short: templeton - compose_service: templeton - service_description: templeton service + service_name_short: templeton-periodic + compose_service: templeton_periodic + service_description: templeton periodic service image_name: templeton - container_name: templeton + container_name: templeton-periodic log_file: templeton-docker.log tree_parser_base: http://portnoy.iplantcollaborative.org/parseTree diff --git a/ansible/playbooks/de-pull-images.yaml b/ansible/playbooks/de-pull-images.yaml index 46d57faf0..493828179 100644 --- a/ansible/playbooks/de-pull-images.yaml +++ b/ansible/playbooks/de-pull-images.yaml @@ -337,20 +337,21 @@ service_name: "{{saved_searches.compose_service}}" service_name_short: "{{saved_searches.service_name_short}}" -- name: Update templeton - hosts: templeton:&systems +- name: Update templeton-periodic + hosts: templeton-periodic:&systems become: true gather_facts: false tags: - services - templeton + - templeton-periodic roles: - role: util-cfg-docker-pull-configs - service_name: "{{templeton.compose_service}}" - service_name_short: "{{templeton.service_name_short}}" + service_name: "{{templeton_periodic.compose_service}}" + service_name_short: "{{templeton_periodic.service_name_short}}" - role: util-cfg-docker-pull - service_name: "{{templeton.compose_service}}" - service_name_short: "{{templeton.service_name_short}}" + service_name: "{{templeton_periodic.compose_service}}" + service_name_short: "{{templeton_periodic.service_name_short}}" - name: Update terrain hosts: terrain:&systems diff --git a/ansible/playbooks/de-rm-containers.yml b/ansible/playbooks/de-rm-containers.yml index 6c3e99dd2..ab24aeacf 100644 --- a/ansible/playbooks/de-rm-containers.yml +++ b/ansible/playbooks/de-rm-containers.yml @@ -449,31 +449,28 @@ become: true shell: docker rm -v {{saved_searches.service_name_short}} -- name: Remove templeton - hosts: templeton:&systems +- name: Remove templeton_periodic + hosts: templeton-periodic:&systems become: true gather_facts: false tags: - services - templeton + - templeton-periodic roles: - role: util-cfg-docker-rm-configs ignore_errors: yes - service_name: "{{templeton.compose_service}}" - service_name_short: "{{templeton.service_name_short}}" + service_name: "{{templeton_periodic.compose_service}}" + service_name_short: "{{templeton_periodic.service_name_short}}" - role: util-cfg-docker-rm ignore_errors: yes - service_name: "{{templeton.compose_service}}" - service_name_short: "{{templeton.service_name_short}}" - - role: util-cfg-docker-rm - ignore_errors: yes - service_name: "iplant_data_{{templeton.compose_service}}" - service_name_short: "iplant_data_{{templeton.service_name_short}}" + service_name: "{{templeton_periodic.compose_service}}" + service_name_short: "{{templeton_periodic.service_name_short}}" post_tasks: - - name: annihilate templeton + - name: annihilate templeton_periodic ignore_errors: yes become: true - shell: docker rm -v {{templeton.service_name_short}} + shell: docker rm -v {{templeton_periodic.service_name_short}} - name: Remove terrain hosts: terrain:&systems diff --git a/ansible/playbooks/de-start-containers.yml b/ansible/playbooks/de-start-containers.yml index d97663e2b..0096ab9b8 100644 --- a/ansible/playbooks/de-start-containers.yml +++ b/ansible/playbooks/de-start-containers.yml @@ -230,17 +230,18 @@ service_name: "{{saved_searches.compose_service}}" service_name_short: "{{saved_searches.service_name_short}}" -- name: Start templeton - hosts: templeton:&systems +- name: Start templeton-periodic + hosts: templeton-periodic:&systems become: true gather_facts: false tags: - services - templeton + - templeton-periodic roles: - role: util-cfg-docker-up - service_name: "{{templeton.compose_service}}" - service_name_short: "{{templeton.service_name_short}}" + service_name: "{{templeton_periodic.compose_service}}" + service_name_short: "{{templeton_periodic.service_name_short}}" - name: Start terrain hosts: terrain:&systems diff --git a/ansible/playbooks/de-stop-containers.yml b/ansible/playbooks/de-stop-containers.yml index 803862723..73a5c154b 100644 --- a/ansible/playbooks/de-stop-containers.yml +++ b/ansible/playbooks/de-stop-containers.yml @@ -245,18 +245,19 @@ service_name: "{{saved_searches.compose_service}}" service_name_short: "{{saved_searches.service_name_short}}" -- name: Stop templeton - hosts: templeton:&systems +- name: Stop templeton-periodic + hosts: templeton-periodic:&systems become: true gather_facts: false tags: - services - templeton + - templeton-periodic roles: - role: util-cfg-docker-stop ignore_errors: yes - service_name: "{{templeton.compose_service}}" - service_name_short: "{{templeton.service_name_short}}" + service_name: "{{templeton_periodic.compose_service}}" + service_name_short: "{{templeton_periodic.service_name_short}}" - name: Stop terrain hosts: terrain:&systems diff --git a/ansible/playbooks/local-services-cfg.yml b/ansible/playbooks/local-services-cfg.yml index 101b7915a..865dc0baa 100644 --- a/ansible/playbooks/local-services-cfg.yml +++ b/ansible/playbooks/local-services-cfg.yml @@ -93,9 +93,9 @@ service_name_short: "{{user_sessions.service_name_short}}" - role: util-cfg-service - service_name_short: "{{templeton.service_name_short}}" - src: "templeton.yaml.j2" - dest: "{{service_conf_dir}}/templeton.yaml" + service_name_short: "{{templeton_periodic.service_name_short}}" + src: "templeton-periodic.yaml.j2" + dest: "{{service_conf_dir}}/templeton-periodic.yaml" - role: util-cfg-service src: "ui/de-application.yaml.j2" diff --git a/ansible/roles/de-services-cfg/meta/main.yaml b/ansible/roles/de-services-cfg/meta/main.yaml index 9d3d485f6..3836a0e6b 100644 --- a/ansible/roles/de-services-cfg/meta/main.yaml +++ b/ansible/roles/de-services-cfg/meta/main.yaml @@ -120,11 +120,11 @@ dependencies: service_name_short: "{{saved_searches.service_name_short}}" - role: util-cfg-docker-pull-configs - service_name: "{{templeton.compose_service}}" - service_name_short: "{{templeton.service_name_short}}" + service_name: "{{templeton_periodic.compose_service}}" + service_name_short: "{{templeton_periodic.service_name_short}}" - role: util-cfg-docker-pull - service_name: "{{templeton.compose_service}}" - service_name_short: "{{templeton.service_name_short}}" + service_name: "{{templeton_periodic.compose_service}}" + service_name_short: "{{templeton_periodic.service_name_short}}" - role: util-cfg-docker-pull-configs service_name: "{{terrain.compose_service}}" diff --git a/ansible/roles/util-cfg-service/templates/templeton.yaml.j2 b/ansible/roles/util-cfg-service/templates/templeton-periodic.yaml.j2 similarity index 100% rename from ansible/roles/util-cfg-service/templates/templeton.yaml.j2 rename to ansible/roles/util-cfg-service/templates/templeton-periodic.yaml.j2 From 503b9477415d04a1a59223fd136ab2218534f4d1 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Mon, 14 Mar 2016 17:17:48 -0700 Subject: [PATCH 158/183] CORE-7563: made it easier to visually determine when a test failed or succeeded --- services/apps/test.sh | 42 ++++++++++++++++++++++++++++++++++-------- 1 file changed, 34 insertions(+), 8 deletions(-) diff --git a/services/apps/test.sh b/services/apps/test.sh index 4894dd288..e30ec7b60 100755 --- a/services/apps/test.sh +++ b/services/apps/test.sh @@ -1,7 +1,12 @@ #!/bin/sh set -e -set -x + +function error_exit() { + echo + echo "TEST FAILED: $@" 1>&2 + exit 1 +} CMD=$1 @@ -20,12 +25,33 @@ if [ $(docker ps -aqf "name=$DBCONTAINER" | wc -l) -gt 0 ]; then docker rm $DBCONTAINER fi -docker pull discoenv/buildenv -docker run --rm -v $(pwd):/build -w /build discoenv/buildenv lein eastwood '{:exclude-namespaces [apps.protocols :test-paths] :linters [:wrong-arity :wrong-ns-form :wrong-pre-post :wrong-tag :misplaced-docstrings]}' +# Pull the build environment. +docker pull discoenv/buildenv || error_exit 'unable to pull the build environment image' + +# Check for syntax errors. +docker run --rm -v $(pwd):/build -w /build discoenv/buildenv lein eastwood '{:exclude-namespaces [apps.protocols :test-paths] :linters [:wrong-arity :wrong-ns-form :wrong-pre-post :wrong-tag :misplaced-docstrings]}' \ + || error_exit 'lint errors were found' + +# Pull the DE database image. +docker pull discoenv/de-db || error_exit 'unable to pull the DE database image' + +# Start the DE database container. +docker run --name $DBCONTAINER -e POSTGRES_PASSWORD=notprod -d -p 35432:5432 discoenv/de-db \ + || error_exit 'unable to start the DE database container' -docker pull discoenv/de-db -docker run --name $DBCONTAINER -e POSTGRES_PASSWORD=notprod -d -p 35432:5432 discoenv/de-db +# Wait for the DE database container to start up. sleep 10 -docker pull discoenv/de-db-loader:dev -docker run --rm --link $DBCONTAINER:postgres discoenv/de-db-loader:dev -docker run --rm -v $(pwd):/build -w /build --link $DBCONTAINER:postgres discoenv/buildenv lein $CMD + +# Pull the DE database loader. +docker pull discoenv/de-db-loader:dev || error_exit 'unable to pull the DE database loader image' + +# Run the DE database loader. +docker run --rm --link $DBCONTAINER:postgres discoenv/de-db-loader:dev \ + || error_exit 'unable to run the DE database loader' + +# Run the tests. +docker run --rm -v $(pwd):/build -w /build --link $DBCONTAINER:postgres discoenv/buildenv lein $CMD \ + || error_exit 'there were unit test failures' + +# Display a success message. +echo "TEST SUCCEEDED" 1>&2 From 0b5f68357243abadaeb392c25b2c5da6fa6a6878 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Mon, 14 Mar 2016 17:18:42 -0700 Subject: [PATCH 159/183] CORE-7563: fixed an arity error in a function call --- services/apps/src/apps/service/apps/jobs.clj | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/apps/src/apps/service/apps/jobs.clj b/services/apps/src/apps/service/apps/jobs.clj index e035e8139..e01e76bdf 100644 --- a/services/apps/src/apps/service/apps/jobs.clj +++ b/services/apps/src/apps/service/apps/jobs.clj @@ -47,7 +47,7 @@ (when-not (= prev-status curr-status) (cn/send-job-status-update (.getUser apps-client) - (listings/format-job (.loadAppTables apps-client [app-id]) job))))) + (listings/format-job apps-client (.loadAppTables apps-client [app-id]) job))))) (defn- determine-batch-status [{:keys [id]}] From 0f30e02d1ed9bd60555246c30fc5deb8e6612f97 Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Mon, 14 Mar 2016 17:31:08 -0700 Subject: [PATCH 160/183] CORE-7152 integrate services. add filtering logic. add units. --- .../de/analysis/client/AnalysesView.java | 5 + .../presenter/AnalysesPresenterImpl.java | 54 ++-- .../sharing/AnalysisSharingPresenter.java | 258 +++++++++++++++++- .../client/views/AnalysesToolBarImpl.java | 41 +-- .../client/views/AnalysesToolBarImpl.ui.xml | 4 +- .../views/dialogs/AnalysisSharingDialog.java | 19 +- .../sharing/AnalysisSharingViewImpl.java | 1 - .../de/client/models/analysis/Analysis.java | 5 + .../AnalysisSharingAutoBeanFactory.java | 12 +- .../services/AnalysisServiceFacade.java | 6 +- .../impl/AnalysisServiceFacadeImpl.java | 10 +- .../client/views/AnalysesToolBarImplTest.java | 44 ++- 12 files changed, 381 insertions(+), 78 deletions(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/AnalysesView.java b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/AnalysesView.java index 68d9a3440..30bdf1ceb 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/AnalysesView.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/AnalysesView.java @@ -3,6 +3,7 @@ import org.iplantc.de.analysis.client.events.HTAnalysisExpandEvent; import org.iplantc.de.analysis.client.events.selection.AnalysisAppSelectedEvent; import org.iplantc.de.analysis.client.events.selection.AnalysisNameSelectedEvent; +import org.iplantc.de.analysis.client.models.AnalysisFilter; import org.iplantc.de.client.models.analysis.Analysis; import org.iplantc.de.theme.base.client.analyses.AnalysesViewDefaultAppearance.AnalysisInfoStyle; @@ -180,6 +181,10 @@ interface Appearance { void getAnalysisStepInfo(Analysis value); void onShareSelected(List selected); + + void setCurrentFilter(AnalysisFilter filter); + + public void loadAnalyses(AnalysisFilter filter); } void filterByAnalysisId(String id, String name); diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/AnalysesPresenterImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/AnalysesPresenterImpl.java index b1896ed6d..1cbec2b7b 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/AnalysesPresenterImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/AnalysesPresenterImpl.java @@ -6,6 +6,7 @@ import org.iplantc.de.analysis.client.events.selection.AnalysisAppSelectedEvent; import org.iplantc.de.analysis.client.events.selection.AnalysisNameSelectedEvent; import org.iplantc.de.analysis.client.gin.factory.AnalysesViewFactory; +import org.iplantc.de.analysis.client.models.AnalysisFilter; import org.iplantc.de.analysis.client.presenter.proxy.AnalysisRpcProxy; import org.iplantc.de.analysis.client.presenter.sharing.AnalysisSharingPresenter; import org.iplantc.de.analysis.client.views.AnalysisStepsView; @@ -13,7 +14,6 @@ import org.iplantc.de.analysis.client.views.dialogs.AnalysisStepsInfoDialog; import org.iplantc.de.analysis.client.views.sharing.AnalysisSharingView; import org.iplantc.de.analysis.client.views.sharing.AnalysisSharingViewImpl; -import org.iplantc.de.analysis.client.views.widget.AnalysisSearchField; import org.iplantc.de.client.events.EventBus; import org.iplantc.de.client.events.diskResources.OpenFolderEvent; import org.iplantc.de.client.models.analysis.Analysis; @@ -81,7 +81,7 @@ public void onFailure(Throwable caught) { public void onSuccess(String result) { SafeHtml msg = SafeHtmlUtils.fromString(appearance.analysisStopSuccess(ae.getName())); announcer.schedule(new SuccessAnnouncementConfig(msg, true, 3000)); - loadAnalyses(false); + loadAnalyses(currentFilter); } } @@ -188,6 +188,8 @@ public void onSuccess(Void result) { @Inject JsonUtil jsonUtil; + private AnalysisFilter currentFilter; + private SharingPresenter sharingPresenter; private AnalysisSharingView sharingView; @@ -217,7 +219,9 @@ public void onSuccess(Void result) { this.view.addAnalysisNameSelectedEventHandler(this); this.view.addAnalysisAppSelectedEventHandler(this); this.view.addHTAnalysisExpandEventHandler(this); - sharingView = new AnalysisSharingViewImpl(); + + //Set default filter to ALL + currentFilter = AnalysisFilter.ALL; } @Override @@ -243,7 +247,7 @@ public void onFailure(Throwable caught) { @Override public void onSuccess(String arg0) { - loadAnalyses(false); + loadAnalyses(currentFilter); } }); } @@ -281,20 +285,31 @@ public void go(final HasOneWidget container, final List selectedAnalys if (selectedAnalyses != null && !selectedAnalyses.isEmpty()) { handlerFirstLoad = loader.addLoadHandler(new FirstLoadHandler(selectedAnalyses)); } - loadAnalyses(true); + loadAnalyses(AnalysisFilter.ALL); container.setWidget(view); } - void loadAnalyses(boolean resetFilters) { + @Override + public void loadAnalyses(AnalysisFilter filter) { FilterPagingLoadConfig config = loader.getLastLoadConfig(); - if (resetFilters) { - // add only default filter - FilterConfigBean idParentFilter = new FilterConfigBean(); - idParentFilter.setField(AnalysisSearchField.PARENT_ID); - idParentFilter.setValue(""); - config.getFilters().clear(); - config.getFilters().add(idParentFilter); + FilterConfigBean filterCb = new FilterConfigBean(); + config.getFilters().clear(); + switch (filter) { + case ALL: + filterCb.setField("ownership"); + filterCb.setValue("all"); + break; + case SHARED_WITH_ME: + filterCb.setField("ownership"); + filterCb.setValue("theirs"); + break; + + case MY_ANALYSES: + filterCb.setField("ownership"); + filterCb.setValue("mine"); + break; } + config.getFilters().add(filterCb); config.setLimit(200); config.setOffset(0); loader.load(config); @@ -308,17 +323,17 @@ public void goToSelectedAnalysisFolder(final Analysis selectedAnalysis) { @Override public void onRefreshSelected() { - loadAnalyses(false); + loadAnalyses(currentFilter); } @Override public void onShowAllSelected() { - loadAnalyses(true); + loadAnalyses(AnalysisFilter.ALL); } @Override public void onShareSelected(List selected) { - sharingView.setSelectedAnalysis(selected); + sharingView = new AnalysisSharingViewImpl(); sharingPresenter = new AnalysisSharingPresenter(analysisService, selected, sharingView, @@ -326,7 +341,12 @@ public void onShareSelected(List selected) { jsonUtil); AnalysisSharingDialog asd = aSharingDialogProvider.get(); asd.setPresenter(sharingPresenter); - asd.show(selected); + asd.show(); + } + + @Override + public void setCurrentFilter(AnalysisFilter filter) { + this.currentFilter = filter; } @Override diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/sharing/AnalysisSharingPresenter.java b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/sharing/AnalysisSharingPresenter.java index 42b7c7f60..610b373e8 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/sharing/AnalysisSharingPresenter.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/sharing/AnalysisSharingPresenter.java @@ -6,17 +6,34 @@ import org.iplantc.de.analysis.client.views.sharing.AnalysisSharingView; import org.iplantc.de.client.models.analysis.Analysis; +import org.iplantc.de.client.models.analysis.sharing.AnalysisPermission; +import org.iplantc.de.client.models.analysis.sharing.AnalysisSharingAutoBeanFactory; +import org.iplantc.de.client.models.analysis.sharing.AnalysisSharingRequest; +import org.iplantc.de.client.models.analysis.sharing.AnalysisSharingRequestList; +import org.iplantc.de.client.models.analysis.sharing.AnalysisUnsharingRequest; +import org.iplantc.de.client.models.analysis.sharing.AnalysisUnsharingRequestList; +import org.iplantc.de.client.models.analysis.sharing.AnalysisUserPermissions; +import org.iplantc.de.client.models.analysis.sharing.AnalysisUserPermissionsList; +import org.iplantc.de.client.models.collaborators.Collaborator; import org.iplantc.de.client.models.diskResources.PermissionValue; import org.iplantc.de.client.models.sharing.SharedResource; import org.iplantc.de.client.models.sharing.Sharing; +import org.iplantc.de.client.models.sharing.UserPermission; import org.iplantc.de.client.services.AnalysisServiceFacade; import org.iplantc.de.client.sharing.SharingPermissionsPanel; import org.iplantc.de.client.sharing.SharingPresenter; import org.iplantc.de.client.util.JsonUtil; import org.iplantc.de.collaborators.client.util.CollaboratorsUtil; +import org.iplantc.de.commons.client.ErrorHandler; +import org.iplantc.de.commons.client.info.IplantAnnouncer; +import com.google.gwt.core.client.GWT; import com.google.gwt.json.client.JSONObject; +import com.google.gwt.json.client.JSONString; +import com.google.gwt.user.client.rpc.AsyncCallback; import com.google.gwt.user.client.ui.HasOneWidget; +import com.google.web.bindery.autobean.shared.AutoBean; +import com.google.web.bindery.autobean.shared.AutoBeanCodex; import com.sencha.gxt.core.shared.FastMap; @@ -25,14 +42,81 @@ public class AnalysisSharingPresenter implements SharingPresenter { - final AnalysisSharingView view; + + private final class LoadPermissionsCallback implements AsyncCallback { + private final class GetUserInfoCallback implements AsyncCallback> { + private final List usernames; + + private GetUserInfoCallback(List usernames) { + this.usernames = usernames; + } + + @Override + public void onFailure(Throwable caught) { + ErrorHandler.post(caught); + } + + @Override + public void onSuccess(FastMap results) { + sharingMap = new FastMap<>(); + for (String userName : usernames) { + Collaborator user = results.get(userName); + if (user == null) { + user = collaboratorsUtil.getDummyCollaborator(userName); + } + + List shares = new ArrayList<>(); + + sharingMap.put(userName, shares); + + for (JSONObject share : sharingList.get(userName)) { + String id = jsonUtil.getString(share, "id"); //$NON-NLS-1$ + String name = jsonUtil.getString(share, "name"); + Sharing sharing = new Sharing(user, buildPermissionFromJson(share), id, name); + shares.add(sharing); + } + } + + permissionsPanel.loadSharingData(sharingMap); + permissionsPanel.unmask(); + } + } + + @Override + public void onFailure(Throwable caught) { + permissionsPanel.unmask(); + ErrorHandler.post(caught); + } + + @Override + public void onSuccess(String result) { + AutoBean abrp = + AutoBeanCodex.decode(shareFactory, AnalysisUserPermissionsList.class, result); + AnalysisUserPermissionsList aPermsList = abrp.as(); + sharingList = new FastMap<>(); + for (AnalysisUserPermissions rup : aPermsList.getResourceUserPermissionsList()) { + String id = rup.getId(); + String appName = rup.getName(); + List upList = rup.getPermissions(); + loadPermissions(id, appName, upList); + } + final List usernames = new ArrayList<>(); + usernames.addAll(sharingList.keySet()); + collaboratorsUtil.getUserInfo(usernames, new GetUserInfoCallback(usernames)); + } + + } + + final AnalysisSharingView sharingView; private final SharingPermissionsPanel permissionsPanel; private final List selectedAnalysis; private Appearance appearance; - private FastMap> dataSharingMap; + private FastMap> sharingMap; private FastMap> sharingList; private final JsonUtil jsonUtil; private final CollaboratorsUtil collaboratorsUtil; + private final AnalysisServiceFacade aService; + private AnalysisSharingAutoBeanFactory shareFactory = GWT.create(AnalysisSharingAutoBeanFactory.class); public AnalysisSharingPresenter(final AnalysisServiceFacade aService, final List selectedAnalysis, @@ -40,16 +124,20 @@ public AnalysisSharingPresenter(final AnalysisServiceFacade aService, final CollaboratorsUtil collaboratorsUtil, final JsonUtil jsonUtil) { - this.view = view; + this.sharingView = view; + this.aService = aService; this.jsonUtil = jsonUtil; this.collaboratorsUtil = collaboratorsUtil; this.selectedAnalysis = selectedAnalysis; + this.appearance = GWT.create(Appearance.class); this.permissionsPanel = new SharingPermissionsPanel(this, getSelectedResourcesAsMap(this.selectedAnalysis)); permissionsPanel.hidePermissionColumn(); permissionsPanel.setExplainPanelVisibility(false); view.setPresenter(this); view.addShareWidget(permissionsPanel.asWidget()); + loadResources(); + loadPermissions(); } private FastMap getSelectedResourcesAsMap(List selectedAnalysis) { @@ -71,20 +159,18 @@ private List buildAppsList(List shareList) { @Override public void go(HasOneWidget container) { - container.setWidget(view.asWidget()); - - } + container.setWidget(sharingView.asWidget()); + } @Override public void loadResources() { - view.setSelectedAnalysis(selectedAnalysis); - - } + sharingView.setSelectedAnalysis(selectedAnalysis); + } @Override public void loadPermissions() { - permissionsPanel.mask(); - // TODO: load perm here + permissionsPanel.mask(); + aService.getPermissions(selectedAnalysis,new LoadPermissionsCallback()); } @@ -93,10 +179,158 @@ public PermissionValue getDefaultPermissions() { return PermissionValue.read; } + private PermissionValue buildPermissionFromJson(JSONObject perm) { + return PermissionValue.valueOf(jsonUtil.getString(perm, "permission")); + } + + private void loadPermissions(String id, String appName, List userPerms) { + for (UserPermission up : userPerms) { + String permVal = up.getPermission(); + String userName = up.getUser(); + JSONObject perm = new JSONObject(); + List shareList = sharingList.get(userName); + if (shareList == null) { + shareList = new ArrayList<>(); + sharingList.put(userName, shareList); + } + perm.put("permission", new JSONString(permVal)); + perm.put("id", new JSONString(id)); //$NON-NLS-1$ + perm.put("name", new JSONString(appName)); + shareList.add(perm); + } + + } + @Override public void processRequest() { - // TODO Auto-generated method stub + AnalysisSharingRequestList request = buildSharingRequest(); + AnalysisUnsharingRequestList unshareRequest = buildUnsharingRequest(); + + if (request != null) { + callSharingService(request); + } + + if (unshareRequest != null) { + callUnshareService(unshareRequest); + } + + if (request != null || unshareRequest != null) { + IplantAnnouncer.getInstance().schedule(appearance.sharingCompleteMsg()); + } + + + } + + private AnalysisSharingRequestList buildSharingRequest() { + AutoBean sharingAbList = + AutoBeanCodex.decode(shareFactory, AnalysisSharingRequestList.class, "{}"); + AnalysisSharingRequestList sharingRequestList = sharingAbList.as(); + + FastMap> sharingMap = permissionsPanel.getSharingMap(); + + List requests = new ArrayList<>(); + if (sharingMap != null && sharingMap.size() > 0) { + for (String userName : sharingMap.keySet()) { + AutoBean sharingAb = + AutoBeanCodex.decode(shareFactory, AnalysisSharingRequest.class, "{}"); + AnalysisSharingRequest sharingRequest = sharingAb.as(); + List shareList = sharingMap.get(userName); + sharingRequest.setUser(userName); + sharingRequest.setAnalysisPermissions(buildAnalysisPermissions(shareList)); + requests.add(sharingRequest); + } + + sharingRequestList.setAnalysisSharingRequestList(requests); + return sharingRequestList; + + } else { + return null; + } + + } + + private List buildAnalysisPermissions(List shareList) { + List aPermList = new ArrayList<>(); + for (Sharing s : shareList) { + AutoBeanaPermAb = + AutoBeanCodex.decode(shareFactory, AnalysisPermission.class, "{}"); + AnalysisPermission aPerm = aPermAb.as(); + aPerm.setId(s.getId()); + aPerm.setPermission(getDefaultPermissions().toString()); + aPermList.add(aPerm); + } + return aPermList; + } + + private AnalysisUnsharingRequestList buildUnsharingRequest() { + AutoBean unsharingAbList = + AutoBeanCodex.decode(shareFactory, AnalysisUnsharingRequestList.class, "{}"); + + AnalysisUnsharingRequestList unsharingRequestList = unsharingAbList.as(); + + FastMap> unSharingMap = permissionsPanel.getUnshareList(); + + List requests = new ArrayList<>(); + + if (unSharingMap != null && unSharingMap.size() > 0) { + for (String userName : unSharingMap.keySet()) { + List shareList = unSharingMap.get(userName); + AutoBean unsharingAb = + AutoBeanCodex.decode(shareFactory, AnalysisUnsharingRequest.class, "{}"); + + AnalysisUnsharingRequest unsharingRequest = unsharingAb.as(); + unsharingRequest.setUser(userName); + unsharingRequest.setAnalyses(buildAnalysisList(shareList)); + requests.add(unsharingRequest); + } + unsharingRequestList.setAnalysisUnSharingRequestList(requests); + return unsharingRequestList; + } else { + return null; + } + + } + + private List buildAnalysisList(List shareList) { + List anaList = new ArrayList<>(); + for (Sharing s : shareList) { + anaList.add(s.getId()); + } + + return anaList; + } + + + private void callSharingService(AnalysisSharingRequestList obj) { + aService.shareAnalyses(obj, new AsyncCallback() { + + @Override + public void onFailure(Throwable caught) { + ErrorHandler.post(caught); + + } + + @Override + public void onSuccess(String result) { + // do nothing intentionally + } + }); + } + + private void callUnshareService(AnalysisUnsharingRequestList obj) { + aService.unshareAnalyses(obj, new AsyncCallback() { + + @Override + public void onFailure(Throwable caught) { + ErrorHandler.post(caught); + + } + @Override + public void onSuccess(String result) { + // do nothing + } + }); } } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java index bf9fa51ae..0cf27869e 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java @@ -16,13 +16,14 @@ import org.iplantc.de.analysis.shared.AnalysisModule; import org.iplantc.de.client.models.UserInfo; import org.iplantc.de.client.models.analysis.Analysis; -import org.iplantc.de.client.models.analysis.AnalysisExecutionStatus; import org.iplantc.de.commons.client.ErrorHandler; import org.iplantc.de.commons.client.validators.DiskResourceNameValidator; import org.iplantc.de.commons.client.views.dialogs.IPlantPromptDialog; import com.google.common.base.Preconditions; +import com.google.common.base.Strings; import com.google.gwt.core.client.GWT; +import com.google.gwt.event.dom.client.KeyUpEvent; import com.google.gwt.event.logical.shared.SelectionEvent; import com.google.gwt.event.logical.shared.SelectionHandler; import com.google.gwt.event.logical.shared.ValueChangeEvent; @@ -85,8 +86,10 @@ interface AnalysesToolbarUiBinder extends UiBinder TextButton share_menu; @UiField MenuItem shareCollabMI; - @UiField - MenuItem shareSupportMI; + + //hidden for now... + //@UiField + //MenuItem shareSupportMI; @UiField(provided = true) SimpleComboBox filterCombo; @@ -135,12 +138,14 @@ public void onValueChange(ValueChangeEvent event) { private void onFilterChange(AnalysisFilter af) { switch (af) { case ALL: - onShowAllSelected(); + applyFilter(AnalysisFilter.ALL); break; case SHARED_WITH_ME: + applyFilter(AnalysisFilter.SHARED_WITH_ME); break; case MY_ANALYSES: + applyFilter(AnalysisFilter.MY_ANALYSES); break; } } @@ -223,7 +228,7 @@ public void onSelectionChanged(SelectionChangedEvent event) { deleteMI.setEnabled(deleteEnabled); share_menu.setEnabled(shareEnabled); shareCollabMI.setEnabled(shareEnabled); - shareSupportMI.setEnabled(shareEnabled); + // shareSupportMI.setEnabled(shareEnabled); renameMI.setEnabled(renameEnabled); updateCommentsMI.setEnabled(updateCommentsEnabled); } @@ -238,11 +243,11 @@ private boolean isOwner(List selection) { return true; } - private boolean isSharable(List selection) { + boolean isSharable(List selection) { for (Analysis a : selection) { - if (!(a.getStatus().equals(AnalysisExecutionStatus.COMPLETED.toString()) || a.getStatus() - .equals(AnalysisExecutionStatus.FAILED - .toString()))) { + if ((!(a.getStatus().equals(COMPLETED.toString()) + || a.getStatus().equals(FAILED.toString())) + || !( a.isSharable()))) { return false; } @@ -328,15 +333,15 @@ boolean canDeleteSelection(List selection) { } // -/* @UiHandler("searchField") + @UiHandler("searchField") void searchFieldKeyUp(KeyUpEvent event){ if (Strings.isNullOrEmpty(searchField.getCurrentValue())) { - // disable show all since an empty search field would fire load all. - showTb.disable(); + filterCombo.setValue(AnalysisFilter.ALL); } else { - showTb.enable(); + filterCombo.setValue(null); } - }*/ + } + @UiHandler("cancelMI") void onCancelSelected(SelectionEvent event) { @@ -455,9 +460,9 @@ void onRefreshSelected(SelectEvent event) { presenter.onRefreshSelected(); } - void onShowAllSelected() { + void applyFilter(AnalysisFilter filter) { searchField.clear(); - presenter.onShowAllSelected(); + presenter.loadAnalyses(filter); } @UiHandler("shareCollabMI") @@ -465,7 +470,7 @@ void onShareSelected(SelectionEvent event) { presenter.onShareSelected(currentSelection); } - @UiHandler("shareSupportMI") + /** @UiHandler("shareSupportMI") void onShareSupportSelected(SelectionEvent event) { ConfirmMessageBox messageBox = new ConfirmMessageBox(appearance.shareSupport(), appearance.shareSupportConfirm()); @@ -494,6 +499,6 @@ public void onDialogHide(DialogHideEvent event) { messageBox.show(); - } + } **/ } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.ui.xml b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.ui.xml index 2463d2206..826729354 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.ui.xml +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.ui.xml @@ -82,9 +82,9 @@ - + /> --> diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/dialogs/AnalysisSharingDialog.java b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/dialogs/AnalysisSharingDialog.java index e3c90cd61..37c99f9c4 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/dialogs/AnalysisSharingDialog.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/dialogs/AnalysisSharingDialog.java @@ -1,7 +1,5 @@ package org.iplantc.de.analysis.client.views.dialogs; -import org.iplantc.de.client.models.analysis.Analysis; -import org.iplantc.de.client.services.AnalysisServiceFacade; import org.iplantc.de.client.sharing.SharingPresenter; import org.iplantc.de.client.util.JsonUtil; import org.iplantc.de.collaborators.client.util.CollaboratorsUtil; @@ -13,12 +11,8 @@ import com.sencha.gxt.widget.core.client.event.SelectEvent; import com.sencha.gxt.widget.core.client.event.SelectEvent.SelectHandler; -import java.util.List; - public class AnalysisSharingDialog extends IPlantDialog implements SelectHandler { - private final AnalysisServiceFacade analysisService; - private SharingPresenter sharingPresenter; @Inject @@ -27,9 +21,8 @@ public class AnalysisSharingDialog extends IPlantDialog implements SelectHandler JsonUtil jsonUtil; @Inject - AnalysisSharingDialog(final AnalysisServiceFacade analysisService) { - super(true); - this.analysisService = analysisService; + public AnalysisSharingDialog() { + super(false); setPixelSize(600, 500); setHideOnButtonClick(true); setModal(true); @@ -46,14 +39,10 @@ public void onSelect(SelectEvent event) { sharingPresenter.processRequest(); } - public void show(final List resourcesToShare) { - sharingPresenter.go(this); - super.show(); - } - @Override public void show() throws UnsupportedOperationException { - throw new UnsupportedOperationException("This method is not supported for this class. "); + sharingPresenter.go(this); + super.show(); } public void setPresenter(SharingPresenter sharingPresenter) { diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/sharing/AnalysisSharingViewImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/sharing/AnalysisSharingViewImpl.java index a3d3dad4a..237e1bd6f 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/sharing/AnalysisSharingViewImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/sharing/AnalysisSharingViewImpl.java @@ -61,7 +61,6 @@ public Widget asWidget() { @Override public void addShareWidget(Widget widget) { container.add(widget); - } @Override diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/Analysis.java b/ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/Analysis.java index a04d682ef..600f167b9 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/Analysis.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/Analysis.java @@ -81,4 +81,9 @@ public interface Analysis extends HasId, HasName { @PropertyName("username") String getUserName(); + + // key to determine whether share menu is enabled or not + + @PropertyName("can_share") + public boolean isSharable(); } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisSharingAutoBeanFactory.java b/ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisSharingAutoBeanFactory.java index 4a75e5dc8..b873e04df 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisSharingAutoBeanFactory.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/models/analysis/sharing/AnalysisSharingAutoBeanFactory.java @@ -9,7 +9,15 @@ public interface AnalysisSharingAutoBeanFactory extends AutoBeanFactory { - AutoBean resourceUserPermissionsList(); + AutoBean analysisUserPermissionsList(); - AutoBean resourceUserPermissions(); + AutoBean analysisPermission(); + + AutoBean AnalysisSharingRequest(); + + AutoBean AnalysisUnsharingRequest(); + + AutoBean AnalysisSharingRequestList(); + + AutoBean AnalysisUnsharingRequestList(); } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/services/AnalysisServiceFacade.java b/ui/de-lib/src/main/java/org/iplantc/de/client/services/AnalysisServiceFacade.java index 16bc6f6db..2b241a377 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/client/services/AnalysisServiceFacade.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/services/AnalysisServiceFacade.java @@ -4,7 +4,9 @@ import org.iplantc.de.client.models.analysis.AnalysisParameter; import org.iplantc.de.client.models.analysis.AnalysisStepsInfo; import org.iplantc.de.client.models.analysis.sharing.AnalysisSharingRequest; +import org.iplantc.de.client.models.analysis.sharing.AnalysisSharingRequestList; import org.iplantc.de.client.models.analysis.sharing.AnalysisUnsharingRequest; +import org.iplantc.de.client.models.analysis.sharing.AnalysisUnsharingRequestList; import org.iplantc.de.client.models.apps.App; import org.iplantc.de.client.models.apps.sharing.AppSharingRequestList; import org.iplantc.de.client.models.apps.sharing.AppUnSharingRequestList; @@ -63,9 +65,9 @@ public interface AnalysisServiceFacade { */ void getAnalysisSteps(Analysis analysis, AsyncCallback callback); - void shareAnalyses(AnalysisSharingRequest request, AsyncCallback callback); + void shareAnalyses(AnalysisSharingRequestList request, AsyncCallback callback); - void unshareAnalyses(AnalysisUnsharingRequest request, AsyncCallback callback); + void unshareAnalyses(AnalysisUnsharingRequestList request, AsyncCallback callback); void getPermissions(List analyses, AsyncCallback callback); diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/AnalysisServiceFacadeImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/AnalysisServiceFacadeImpl.java index 342dc92ca..9a6eb1e18 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/AnalysisServiceFacadeImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/services/impl/AnalysisServiceFacadeImpl.java @@ -11,8 +11,8 @@ import org.iplantc.de.client.models.analysis.AnalysisParametersList; import org.iplantc.de.client.models.analysis.AnalysisStepsInfo; import org.iplantc.de.client.models.analysis.SimpleValue; -import org.iplantc.de.client.models.analysis.sharing.AnalysisSharingRequest; -import org.iplantc.de.client.models.analysis.sharing.AnalysisUnsharingRequest; +import org.iplantc.de.client.models.analysis.sharing.AnalysisSharingRequestList; +import org.iplantc.de.client.models.analysis.sharing.AnalysisUnsharingRequestList; import org.iplantc.de.client.models.apps.integration.ArgumentType; import org.iplantc.de.client.models.apps.integration.SelectionItem; import org.iplantc.de.client.services.AnalysisServiceFacade; @@ -296,7 +296,7 @@ public void getAnalysisSteps(Analysis analysis, AsyncCallback } @Override - public void shareAnalyses(AnalysisSharingRequest request, AsyncCallback callback) { + public void shareAnalyses(AnalysisSharingRequestList request, AsyncCallback callback) { final String payload = AutoBeanCodex.encode(AutoBeanUtils.getAutoBean(request)).getPayload(); GWT.log("analyis sharing request:" + payload); String address = ANALYSES + "/" + "sharing"; @@ -305,7 +305,7 @@ public void shareAnalyses(AnalysisSharingRequest request, AsyncCallback } @Override - public void unshareAnalyses(AnalysisUnsharingRequest request, AsyncCallback callback) { + public void unshareAnalyses(AnalysisUnsharingRequestList request, AsyncCallback callback) { final String payload = AutoBeanCodex.encode(AutoBeanUtils.getAutoBean(request)).getPayload(); GWT.log("analysis un-sharing request:" + payload); String address = ANALYSES + "/" + "unsharing"; @@ -323,7 +323,7 @@ public void getPermissions(List analyses, AsyncCallback callba item.assign(idArr, idArr.size()); } - idArr.assign(anaObj, "apps"); + idArr.assign(anaObj, "analyses"); String address = ANALYSES + "/" + "permission-lister"; ServiceCallWrapper wrapper = new ServiceCallWrapper(BaseServiceCallWrapper.Type.POST, address, anaObj.getPayload()); deServiceFacade.getServiceData(wrapper, callback); diff --git a/ui/de-lib/src/test/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImplTest.java b/ui/de-lib/src/test/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImplTest.java index 8bf4092e3..8a32b31c9 100644 --- a/ui/de-lib/src/test/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImplTest.java +++ b/ui/de-lib/src/test/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImplTest.java @@ -63,8 +63,8 @@ public class AnalysesToolBarImplTest { TextButton share_menuMock; @Mock MenuItem shareCollabMIMock; - @Mock - MenuItem shareSupportMIMock; + //@Mock + //MenuItem shareSupportMIMock; private AnalysesToolBarImpl uut; @@ -86,7 +86,7 @@ void mockMenuItems(AnalysesToolBarImpl uut){ uut.currentSelection = currentSelectionMock; uut.share_menu = share_menuMock; uut.shareCollabMI = shareCollabMIMock; - uut.shareSupportMI = shareSupportMIMock; + // uut.shareSupportMI = shareSupportMIMock; uut.userInfo = mockUserInfo; } @@ -103,7 +103,7 @@ void mockMenuItems(AnalysesToolBarImpl uut){ verify(updateCommentsMiMock).setEnabled(eq(false)); verify(share_menuMock).setEnabled(eq(false)); verify(shareCollabMIMock).setEnabled(eq(false)); - verify(shareSupportMIMock).setEnabled(eq(false)); + // verify(shareSupportMIMock).setEnabled(eq(false)); } @@ -268,4 +268,40 @@ boolean canDeleteSelection(List selection) { when(mock4.getStatus()).thenReturn(CANCELED.toString()); assertTrue("Selection should be deletable", uut.canDeleteSelection(Lists.newArrayList(mock1, mock2, mock3))); } + + @Test public void testCanShareSelection() { + Analysis mock1 = mock(Analysis.class); + Analysis mock2 = mock(Analysis.class); + + when(mock1.getStatus()).thenReturn(SUBMITTED.toString()); + when(mock2.getStatus()).thenReturn(RUNNING.toString()); + when(mock1.isSharable()).thenReturn(true); + when(mock2.isSharable()).thenReturn(true); + assertFalse("Selection should not be sharable", uut.isSharable(Lists.newArrayList(mock1, mock2))); + + when(mock1.getStatus()).thenReturn(COMPLETED.toString()); + when(mock2.getStatus()).thenReturn(RUNNING.toString()); + when(mock1.isSharable()).thenReturn(true); + when(mock2.isSharable()).thenReturn(true); + assertFalse("Selection should not be sharable", uut.isSharable(Lists.newArrayList(mock1, mock2))); + + when(mock1.getStatus()).thenReturn(COMPLETED.toString()); + when(mock2.getStatus()).thenReturn(FAILED.toString()); + when(mock1.isSharable()).thenReturn(false); + when(mock2.isSharable()).thenReturn(false); + assertFalse("Selection should not be sharable", uut.isSharable(Lists.newArrayList(mock1, mock2))); + + when(mock1.getStatus()).thenReturn(COMPLETED.toString()); + when(mock2.getStatus()).thenReturn(FAILED.toString()); + when(mock1.isSharable()).thenReturn(false); + when(mock2.isSharable()).thenReturn(true); + assertFalse("Selection should not be sharable", uut.isSharable(Lists.newArrayList(mock1, mock2))); + + when(mock1.getStatus()).thenReturn(COMPLETED.toString()); + when(mock2.getStatus()).thenReturn(FAILED.toString()); + when(mock1.isSharable()).thenReturn(true); + when(mock2.isSharable()).thenReturn(true); + assertTrue("Selection should be sharable", uut.isSharable(Lists.newArrayList(mock1, mock2))); + + } } From 122ab97cc42183227f3d4d8b2699e457ae4b6247 Mon Sep 17 00:00:00 2001 From: Ashley Ramsey Date: Mon, 14 Mar 2016 17:39:14 -0700 Subject: [PATCH 161/183] Update docker.version in group_vars/all to 1.10.2 --- ansible/inventories/group_vars/all | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/inventories/group_vars/all b/ansible/inventories/group_vars/all index 799458a68..788d2f776 100644 --- a/ansible/inventories/group_vars/all +++ b/ansible/inventories/group_vars/all @@ -12,7 +12,7 @@ docker: log_driver: syslog tag: latest user: discoenv - version: 1.9.1 + version: 1.10.2 compose_path: /etc/docker-compose.yml registry: host: "{{ groups['docker-registry'][0] }}" From aae9c09bd2516d58d69a6174114e0017922ba7c0 Mon Sep 17 00:00:00 2001 From: Paul Sarando Date: Mon, 14 Mar 2016 17:49:32 -0700 Subject: [PATCH 162/183] Fix services/apps/test.sh syntax error. --- services/apps/test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/apps/test.sh b/services/apps/test.sh index e30ec7b60..b5e796491 100755 --- a/services/apps/test.sh +++ b/services/apps/test.sh @@ -2,7 +2,7 @@ set -e -function error_exit() { +function error_exit { echo echo "TEST FAILED: $@" 1>&2 exit 1 From 97ceabfe96adc5ddc8e9f2161e2eb07b65f7f5ff Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Mon, 14 Mar 2016 17:52:53 -0700 Subject: [PATCH 163/183] CORE-7152 maintain filter during session save and restore. --- .../java/org/iplantc/de/analysis/client/AnalysesView.java | 2 ++ .../de/analysis/client/presenter/AnalysesPresenterImpl.java | 6 ++++++ .../de/analysis/client/views/AnalysesToolBarImpl.java | 3 +-- .../client/views/window/configs/AnalysisWindowConfig.java | 5 +++++ .../de/desktop/client/views/windows/MyAnalysesWindow.java | 2 ++ 5 files changed, 16 insertions(+), 2 deletions(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/AnalysesView.java b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/AnalysesView.java index 30bdf1ceb..11c79dc34 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/AnalysesView.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/AnalysesView.java @@ -184,6 +184,8 @@ interface Appearance { void setCurrentFilter(AnalysisFilter filter); + AnalysisFilter getCurrentFilter(); + public void loadAnalyses(AnalysisFilter filter); } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/AnalysesPresenterImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/AnalysesPresenterImpl.java index 1cbec2b7b..406685bd3 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/AnalysesPresenterImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/AnalysesPresenterImpl.java @@ -347,6 +347,12 @@ public void onShareSelected(List selected) { @Override public void setCurrentFilter(AnalysisFilter filter) { this.currentFilter = filter; + loadAnalyses(currentFilter); + } + + @Override + public AnalysisFilter getCurrentFilter() { + return currentFilter; } @Override diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java index 0cf27869e..e49e73e88 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java @@ -461,8 +461,7 @@ void onRefreshSelected(SelectEvent event) { } void applyFilter(AnalysisFilter filter) { - searchField.clear(); - presenter.loadAnalyses(filter); + presenter.setCurrentFilter(filter); } @UiHandler("shareCollabMI") diff --git a/ui/de-lib/src/main/java/org/iplantc/de/commons/client/views/window/configs/AnalysisWindowConfig.java b/ui/de-lib/src/main/java/org/iplantc/de/commons/client/views/window/configs/AnalysisWindowConfig.java index 1713b340b..b49f2e4e5 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/commons/client/views/window/configs/AnalysisWindowConfig.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/commons/client/views/window/configs/AnalysisWindowConfig.java @@ -1,5 +1,6 @@ package org.iplantc.de.commons.client.views.window.configs; +import org.iplantc.de.analysis.client.models.AnalysisFilter; import org.iplantc.de.client.models.analysis.Analysis; import java.util.List; @@ -10,4 +11,8 @@ public interface AnalysisWindowConfig extends WindowConfig { void setSelectedAnalyses(List selectedAnalyses); + void setFilter(AnalysisFilter filter); + + AnalysisFilter getFilter(); + } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/windows/MyAnalysesWindow.java b/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/windows/MyAnalysesWindow.java index fc2a44468..aa49b50a4 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/windows/MyAnalysesWindow.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/windows/MyAnalysesWindow.java @@ -47,6 +47,7 @@ public WindowState getWindowState() { List selectedAnalyses = Lists.newArrayList(); selectedAnalyses.addAll(presenter.getSelectedAnalyses()); config.setSelectedAnalyses(selectedAnalyses); + config.setFilter(presenter.getCurrentFilter()); return createWindowState(config); } @@ -57,6 +58,7 @@ public void update(C config) { if (config instanceof AnalysisWindowConfig) { AnalysisWindowConfig analysisWindowConfig = (AnalysisWindowConfig) config; presenter.setSelectedAnalyses(analysisWindowConfig.getSelectedAnalyses()); + presenter.setCurrentFilter(((AnalysisWindowConfig)config).getFilter()); } } From e596edd0f934f6e88b747f6f25bcad06080221b1 Mon Sep 17 00:00:00 2001 From: Paul Sarando Date: Mon, 14 Mar 2016 18:11:45 -0700 Subject: [PATCH 164/183] Fix services/apps/test.sh syntax error. --- services/apps/test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/apps/test.sh b/services/apps/test.sh index b5e796491..18c6011f1 100755 --- a/services/apps/test.sh +++ b/services/apps/test.sh @@ -2,7 +2,7 @@ set -e -function error_exit { +error_exit() { echo echo "TEST FAILED: $@" 1>&2 exit 1 From 0bac978a12a3bce2852884ce46bb39170d21436a Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Mon, 14 Mar 2016 18:43:41 -0700 Subject: [PATCH 165/183] Only deploy templeton to non-parasitic environments. --- ansible/playbooks/de-pull-images.yaml | 2 ++ ansible/playbooks/de-start-containers.yml | 1 + 2 files changed, 3 insertions(+) diff --git a/ansible/playbooks/de-pull-images.yaml b/ansible/playbooks/de-pull-images.yaml index 493828179..46ae5eaaa 100644 --- a/ansible/playbooks/de-pull-images.yaml +++ b/ansible/playbooks/de-pull-images.yaml @@ -347,9 +347,11 @@ - templeton-periodic roles: - role: util-cfg-docker-pull-configs + when: not parasitic service_name: "{{templeton_periodic.compose_service}}" service_name_short: "{{templeton_periodic.service_name_short}}" - role: util-cfg-docker-pull + when: not parasitic service_name: "{{templeton_periodic.compose_service}}" service_name_short: "{{templeton_periodic.service_name_short}}" diff --git a/ansible/playbooks/de-start-containers.yml b/ansible/playbooks/de-start-containers.yml index 0096ab9b8..c9b0e4868 100644 --- a/ansible/playbooks/de-start-containers.yml +++ b/ansible/playbooks/de-start-containers.yml @@ -240,6 +240,7 @@ - templeton-periodic roles: - role: util-cfg-docker-up + when: not parasitic service_name: "{{templeton_periodic.compose_service}}" service_name_short: "{{templeton_periodic.service_name_short}}" From eb9a2bca8d8a5a3b49bcd98014353c01a440abfd Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Mon, 14 Mar 2016 19:59:36 -0700 Subject: [PATCH 166/183] CORE-7564: added a way to list only analyses submitted by the current user or only those not submitted by the current user --- services/apps/src/apps/persistence/jobs.clj | 78 ++++++++++--------- services/apps/src/apps/routes/params.clj | 10 ++- .../src/apps/service/apps/job_listings.clj | 17 ++-- 3 files changed, 61 insertions(+), 44 deletions(-) diff --git a/services/apps/src/apps/persistence/jobs.clj b/services/apps/src/apps/persistence/jobs.clj index 36b21dc23..2c7564652 100644 --- a/services/apps/src/apps/persistence/jobs.clj +++ b/services/apps/src/apps/persistence/jobs.clj @@ -9,6 +9,7 @@ (:require [cheshire.core :as cheshire] [clojure.set :as set] [clojure.string :as string] + [clojure-commons.exception-util :as cxu] [kameleon.jobs :as kj] [korma.core :as sql])) @@ -86,13 +87,34 @@ [{:keys [field value]}] {(filter-field->where-field field) (filter-value->where-value field value)}) +(defn- apply-standard-filter + "Applies 'standard' filters to a query. Standard filters are filters that search for fields that are + included in the job listing response body." + [query standard-filter] + (if (seq standard-filter) + (where query (apply or (map filter-map->where-clause standard-filter))) + query)) + +(defn- apply-ownership-filter + "Applies an 'ownership' filter to a query. An ownership filter is any filter for which the field is + 'ownership'. Only one ownership filter is supported in a single job query. If multiple ownership + filters are included then the first one wins." + [query username [ownership-filter & _]] + (if ownership-filter + (condp = (:value ownership-filter) + "all" query + "mine" (where query {:j.username username}) + "theirs" (where query {:j.username [not= username]}) + (cxu/bad-request (str "invalid ownership filter value: " (:value ownership-filter)))) + query)) + (defn- add-job-query-filter-clause "Filters results returned by the given job query by adding a (where (or ...)) clause based on the given filter map." - [query filter] - (if (empty? filter) - query - (where query (apply or (map filter-map->where-clause filter))))) + [query username query-filter] + (let [ownership-filter? (fn [{:keys [field]}] (= field "ownership"))] + (-> (apply-standard-filter query (remove ownership-filter? query-filter)) + (apply-ownership-filter username (filter ownership-filter? query-filter))))) (defn save-job "Saves information about a job in the database." @@ -167,26 +189,18 @@ (defn- count-jobs-base "The base query for counting the number of jobs in the database for a user." - [username include-hidden] - (-> (select* [:jobs :j]) - (join [:users :u] {:j.user_id :u.id}) + [include-hidden] + (-> (select* [:job_listings :j]) (aggregate (count :*) :count) - (where {:u.username username}) (add-internal-app-clause include-hidden))) -(defn count-jobs - "Counts the number of undeleted jobs in the database for a user." - [username filter include-hidden] - ((comp :count first) - (select (add-job-query-filter-clause (count-jobs-base username include-hidden) filter) - (where {:j.deleted false})))) - (defn count-jobs-of-types "Counts the number of undeleted jobs of the given types in the database for a user." - [username filter include-hidden types] + [username filter include-hidden types accessible-ids] ((comp :count first) - (select (add-job-query-filter-clause (count-jobs-base username include-hidden) filter) + (select (add-job-query-filter-clause (count-jobs-base include-hidden) username filter) (where {:j.deleted false}) + (where {:j.id [in accessible-ids]}) (where (not (exists (job-type-subselect types))))))) (defn- translate-sort-field @@ -256,29 +270,21 @@ (aggregate (max :step_number) :max-step) (where {:job_id job-id})))) -(defn list-jobs - "Gets a list of jobs satisfying a query." - [username row-limit row-offset sort-field sort-order filter include-hidden] - (-> (select* (add-job-query-filter-clause (job-base-query) filter)) - (where {:j.deleted false - :j.username username}) - (add-internal-app-clause include-hidden) - (order (translate-sort-field sort-field) sort-order) - (offset (nil-if-zero row-offset)) - (limit (nil-if-zero row-limit)) - (select))) +(defn- add-order + [query {:keys [sort-field sort-dir]}] + (order query (translate-sort-field sort-field) sort-dir)) (defn list-jobs-of-types "Gets a list of jobs that contain only steps of the given types." - [username row-limit row-offset sort-field sort-order filter include-hidden types] - (-> (select* (add-job-query-filter-clause (job-base-query) filter)) - (where {:j.deleted false - :j.username username}) + [username search-params types accessible-ids] + (-> (select* (add-job-query-filter-clause (job-base-query) username (:filter search-params))) + (where {:j.deleted false}) + (where {:j.id [in accessible-ids]}) (where (not (exists (job-type-subselect types)))) - (add-internal-app-clause include-hidden) - (order (translate-sort-field sort-field) sort-order) - (offset (nil-if-zero row-offset)) - (limit (nil-if-zero row-limit)) + (add-internal-app-clause (:include-hidden search-params)) + (add-order search-params) + (offset (nil-if-zero (:offset search-params))) + (limit (nil-if-zero (:limit search-params))) (select))) (defn list-jobs-by-id diff --git a/services/apps/src/apps/routes/params.clj b/services/apps/src/apps/routes/params.clj index d75da3e9c..154a5783d 100644 --- a/services/apps/src/apps/routes/params.clj +++ b/services/apps/src/apps/routes/params.clj @@ -85,7 +85,15 @@ To find jobs associated with a specific `parent_id`, the parameter value can be `[{\"field\":\"parent_id\",\"value\":\"b4c2f624-7cbd-496e-adad-5be8d0d3b941\"}]`. It's also possible to search for jobs without a parent using this parameter value: - `[{\"field\":\"parent_id\",\"value\":null}]`.")})) + `[{\"field\":\"parent_id\",\"value\":null}]`. + The 'ownership' field can be used to specify whether analyses that belong to the authenticated + user or analyses that are shared with the authenticated user should be listed. If the value is + `all` then all analyses that are visible to the user will be listed. If the value is `mine` then + only analyses that were submitted by the user will be listed. If the value is `theirs` then only + analyses that have been shared with the user will be listed. By default, all analyses are listed. + The `ownership` field is the only field for which only one filter value is supported. If multiple + `ownership` field values are specified then the first value specified is used. Here's an example: + `[{\"field\":\"ownership\",\"value\":\"mine\"}]`.")})) (s/defschema ToolSearchParams (merge SecuredPagingParams IncludeHiddenParams diff --git a/services/apps/src/apps/service/apps/job_listings.clj b/services/apps/src/apps/service/apps/job_listings.clj index c6f2114c0..a22fa7925 100644 --- a/services/apps/src/apps/service/apps/job_listings.clj +++ b/services/apps/src/apps/service/apps/job_listings.clj @@ -2,6 +2,7 @@ (:use [kameleon.uuids :only [uuidify]] [apps.util.conversions :only [remove-nil-vals]]) (:require [kameleon.db :as db] + [apps.clients.iplant-groups :as iplant-groups] [apps.persistence.jobs :as jp] [apps.service.apps.jobs.permissions :as job-permissions] [apps.service.util :as util])) @@ -70,23 +71,25 @@ :can_share (and (nil? parent-id) (job-permissions/supports-job-sharing? apps-client id))})) (defn- list-jobs* - [{:keys [username]} {:keys [limit offset sort-field sort-dir filter include-hidden]} types] - (jp/list-jobs-of-types username limit offset sort-field sort-dir filter include-hidden types)) + [{:keys [username]} search-params types analysis-ids] + (jp/list-jobs-of-types username search-params types analysis-ids)) (defn- count-jobs - [{:keys [username]} {:keys [filter include-hidden]} types] - (jp/count-jobs-of-types username filter include-hidden types)) + [{:keys [username]} {:keys [filter include-hidden]} types analysis-ids] + (jp/count-jobs-of-types username filter include-hidden types analysis-ids)) (defn list-jobs [apps-client user {:keys [sort-field] :as params}] - (let [default-sort-dir (if (nil? sort-field) :desc :asc) + (let [perms (iplant-groups/load-analysis-permissions (:shortUsername user)) + analysis-ids (set (keys perms)) + default-sort-dir (if (nil? sort-field) :desc :asc) search-params (util/default-search-params params :startdate default-sort-dir) types (.getJobTypes apps-client) - jobs (list-jobs* user search-params types) + jobs (list-jobs* user search-params types analysis-ids) app-tables (.loadAppTables apps-client (map :app-id jobs))] {:analyses (map (partial format-job apps-client app-tables) jobs) :timestamp (str (System/currentTimeMillis)) - :total (count-jobs user params types)})) + :total (count-jobs user params types analysis-ids)})) (defn list-job [apps-client job-id] From ca3add2cedd896162bd23c79c415585b4ce2ff35 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Mon, 14 Mar 2016 20:24:21 -0700 Subject: [PATCH 167/183] CORE-7564: register individual jobs within an HT batch as well --- services/apps/src/apps/service/apps/jobs/submissions.clj | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/services/apps/src/apps/service/apps/jobs/submissions.clj b/services/apps/src/apps/service/apps/jobs/submissions.clj index b5293ec95..37e04d352 100644 --- a/services/apps/src/apps/service/apps/jobs/submissions.clj +++ b/services/apps/src/apps/service/apps/jobs/submissions.clj @@ -8,6 +8,7 @@ [clojure-commons.file-utils :as ft] [kameleon.db :as db] [apps.clients.data-info :as data-info] + [apps.clients.iplant-groups :as iplant-groups] [apps.persistence.app-metadata :as ap] [apps.persistence.jobs :as jp] [apps.service.apps.job-listings :as job-listings] @@ -205,9 +206,11 @@ :output_dir (ft/path-join (:output_dir submission) job-suffix)))) (defn- submit-job-in-batch - [apps-client submission paths-exist job-number path-map] + [apps-client user submission paths-exist job-number path-map] (when (every? (partial get paths-exist) (map keyword (vals path-map))) - (.submitJob apps-client (format-submission-in-batch submission job-number path-map)))) + (let [job-info (.submitJob apps-client (format-submission-in-batch submission job-number path-map))] + (iplant-groups/register-analysis (:shortUsername user) (:id job-info)) + job-info))) (defn- preprocess-batch-submission [submission output-dir parent-id] @@ -228,7 +231,7 @@ output-dir (get-batch-output-dir user submission) batch-id (save-batch user job-types app submission output-dir) submission (preprocess-batch-submission submission output-dir batch-id)] - (dorun (map-indexed (partial submit-job-in-batch apps-client submission paths-exist) path-maps)) + (dorun (map-indexed (partial submit-job-in-batch apps-client user submission paths-exist) path-maps)) (-> (job-listings/list-job apps-client batch-id) (assoc :missing-paths (extract-missing-paths paths-exist)) remove-nil-values))) From 366effd9deaeb0128a91560242e0b22209dd937a Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Mon, 14 Mar 2016 20:25:34 -0700 Subject: [PATCH 168/183] CORE-7564: also share child analyses when sharing an analysis --- services/apps/src/apps/service/apps/jobs/sharing.clj | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/services/apps/src/apps/service/apps/jobs/sharing.clj b/services/apps/src/apps/service/apps/jobs/sharing.clj index 4ae5d6477..c9e04c6bc 100644 --- a/services/apps/src/apps/service/apps/jobs/sharing.clj +++ b/services/apps/src/apps/service/apps/jobs/sharing.clj @@ -102,6 +102,10 @@ (catch ce/clj-http-error? {:keys [body]} (str "unable to share result folder: " (:error_code (service/parse-json body)))))) +(defn- process-child-jobs + [f job-id] + (first (remove nil? (map f (jp/list-child-jobs job-id))))) + (defn- share-job* [apps-client sharer sharee job-id job level] (or (verify-not-subjob job) @@ -109,7 +113,8 @@ (verify-support apps-client job-id) (share-app-for-job apps-client sharer sharee job-id job) (share-output-folder sharer sharee job) - (iplant-groups/share-analysis job-id sharee level))) + (iplant-groups/share-analysis job-id sharee level) + (process-child-jobs #(iplant-groups/share-analysis (:id %) sharee level) job-id))) (defn- share-job [apps-client sharer sharee {job-id :analysis_id level :permission}] @@ -149,7 +154,8 @@ (verify-accessible sharer job-id) (verify-support apps-client job-id) (unshare-output-folder sharer sharee job) - (iplant-groups/unshare-analysis job-id sharee))) + (iplant-groups/unshare-analysis job-id sharee) + (process-child-jobs #(iplant-groups/unshare-analysis (:id %) sharee) job-id))) (defn- unshare-job [apps-client sharer sharee job-id] From 337c2f7d7f000874235e91daf03d7e48415038e5 Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Tue, 15 Mar 2016 09:06:39 -0700 Subject: [PATCH 169/183] CORE-7152 add filter for collapsing ht jobs in analyses window default view . --- .../analysis/client/presenter/AnalysesPresenterImpl.java | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/AnalysesPresenterImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/AnalysesPresenterImpl.java index 406685bd3..831fad8a0 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/AnalysesPresenterImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/AnalysesPresenterImpl.java @@ -14,6 +14,7 @@ import org.iplantc.de.analysis.client.views.dialogs.AnalysisStepsInfoDialog; import org.iplantc.de.analysis.client.views.sharing.AnalysisSharingView; import org.iplantc.de.analysis.client.views.sharing.AnalysisSharingViewImpl; +import org.iplantc.de.analysis.client.views.widget.AnalysisSearchField; import org.iplantc.de.client.events.EventBus; import org.iplantc.de.client.events.diskResources.OpenFolderEvent; import org.iplantc.de.client.models.analysis.Analysis; @@ -292,6 +293,11 @@ public void go(final HasOneWidget container, final List selectedAnalys @Override public void loadAnalyses(AnalysisFilter filter) { FilterPagingLoadConfig config = loader.getLastLoadConfig(); + + FilterConfigBean idParentFilter = new FilterConfigBean(); + idParentFilter.setField(AnalysisSearchField.PARENT_ID); + idParentFilter.setValue(""); + FilterConfigBean filterCb = new FilterConfigBean(); config.getFilters().clear(); switch (filter) { @@ -309,6 +315,7 @@ public void loadAnalyses(AnalysisFilter filter) { filterCb.setValue("mine"); break; } + config.getFilters().add(idParentFilter); config.getFilters().add(filterCb); config.setLimit(200); config.setOffset(0); From 84795c475ec2f80758fe513e0955d6565ec0a896 Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Tue, 15 Mar 2016 09:24:49 -0700 Subject: [PATCH 170/183] fix test for apps toolbar --- .../toolBar/AppsViewToolbarImplTest.java | 67 +++++++++---------- 1 file changed, 33 insertions(+), 34 deletions(-) diff --git a/ui/de-lib/src/test/java/org/iplantc/de/apps/client/views/toolBar/AppsViewToolbarImplTest.java b/ui/de-lib/src/test/java/org/iplantc/de/apps/client/views/toolBar/AppsViewToolbarImplTest.java index 2ddb19b20..1a6ad71e8 100644 --- a/ui/de-lib/src/test/java/org/iplantc/de/apps/client/views/toolBar/AppsViewToolbarImplTest.java +++ b/ui/de-lib/src/test/java/org/iplantc/de/apps/client/views/toolBar/AppsViewToolbarImplTest.java @@ -29,7 +29,6 @@ import com.sencha.gxt.data.shared.loader.PagingLoadResult; import com.sencha.gxt.data.shared.loader.PagingLoader; import com.sencha.gxt.widget.core.client.button.TextButton; -import com.sencha.gxt.widget.core.client.menu.Menu; import com.sencha.gxt.widget.core.client.menu.MenuItem; import org.junit.Before; @@ -47,7 +46,7 @@ @RunWith(GxtMockitoTestRunner.class) public class AppsViewToolbarImplTest { @Mock - Menu mockSharingMenu; + TextButton mockshare_menu; @Mock MenuItem mockShareCollab, mockSharePublic; @Mock AppAutoBeanFactory mockAppFactory; @@ -101,7 +100,7 @@ void setupMocks(AppsViewToolbarImpl uut) { uut.editWf = mockEditWf; uut.requestTool = mockRequestTool; uut.wfRun = mockWfRun; - uut.sharingMenu = mockSharingMenu; + uut.share_menu = mockshare_menu; uut.shareCollab = mockShareCollab; uut.sharePublic = mockSharePublic; @@ -136,7 +135,7 @@ public void emptySelection_onAppCategorySelectionChanged() { mockEditWf, mockRequestTool, mockWfRun, - mockSharingMenu, + mockshare_menu, mockShareCollab, mockSharePublic); } @@ -166,7 +165,7 @@ public void nonEmptySelection_onAppCategorySelectionChanged() { mockEditWf, mockRequestTool, mockWfRun, - mockSharingMenu, + mockshare_menu, mockSharePublic, mockShareCollab); } @@ -194,7 +193,7 @@ public void emptySelection_onAppSelectionChanged() { verify(mockEditWf).setEnabled(false); verify(mockCopyWf).setEnabled(false); verify(mockWfRun).setEnabled(false); - verify(mockSharingMenu).setEnabled(false); + verify(mockshare_menu).setEnabled(false); verify(mockSharePublic).setEnabled(false); verify(mockShareCollab).setEnabled(false); @@ -212,7 +211,7 @@ public void emptySelection_onAppSelectionChanged() { mockEditWf, mockRequestTool, mockWfRun, - mockSharingMenu, + mockshare_menu, mockShareCollab, mockSharePublic); @@ -252,7 +251,7 @@ public void singleAppSelection_onAppSelectionChanged() { verify(mockEditWf).setEnabled(false); verify(mockCopyWf).setEnabled(false); verify(mockWfRun).setEnabled(false); - verify(mockSharingMenu).setEnabled(false); + verify(mockshare_menu).setEnabled(false); verify(mockShareCollab).setEnabled(false); verify(mockSharePublic).setEnabled(false); @@ -272,7 +271,7 @@ public void singleAppSelection_onAppSelectionChanged() { mockWfRun, mockSharePublic, mockShareCollab, - mockSharingMenu); + mockshare_menu); verifyZeroInteractions(appSearchMock); } @@ -309,7 +308,7 @@ public void singleWfSelection_onAppSelectionChanged() { verify(mockEditWf).setEnabled(false); verify(mockCopyWf).setEnabled(true); verify(mockWfRun).setEnabled(true); - verify(mockSharingMenu).setEnabled(false); + verify(mockshare_menu).setEnabled(false); verify(mockShareCollab).setEnabled(false); verify(mockSharePublic).setEnabled(false); @@ -369,7 +368,7 @@ public void singleAppSelection_public_onAppSelectionChanged() { verify(mockEditWf).setEnabled(false); verify(mockCopyWf).setEnabled(false); verify(mockWfRun).setEnabled(false); - verify(mockSharingMenu).setEnabled(false); + verify(mockshare_menu).setEnabled(false); verify(mockShareCollab).setEnabled(false); verify(mockSharePublic).setEnabled(false); @@ -429,7 +428,7 @@ public void singleWfSelection_public_onAppSelectionChanged() { verify(mockEditWf).setEnabled(false); verify(mockCopyWf).setEnabled(true); verify(mockWfRun).setEnabled(true); - verify(mockSharingMenu).setEnabled(false); + verify(mockshare_menu).setEnabled(false); verify(mockShareCollab).setEnabled(false); verify(mockSharePublic).setEnabled(false); @@ -485,7 +484,7 @@ public void singleAppSelection_owner_onAppSelectionChanged() { verify(mockEditWf).setEnabled(false); verify(mockCopyWf).setEnabled(false); verify(mockWfRun).setEnabled(false); - verify(mockSharingMenu).setEnabled(true); + verify(mockshare_menu).setEnabled(true); verify(mockShareCollab).setEnabled(true); verify(mockSharePublic).setEnabled(true); @@ -503,7 +502,7 @@ public void singleAppSelection_owner_onAppSelectionChanged() { mockEditWf, mockRequestTool, mockWfRun, - mockSharingMenu, + mockshare_menu, mockShareCollab, mockSharePublic); verifyZeroInteractions(appSearchMock); @@ -545,7 +544,7 @@ public void singleWfSelection_owner_onAppSelectionChanged() { verify(mockEditWf).setEnabled(true); verify(mockCopyWf).setEnabled(true); verify(mockWfRun).setEnabled(true); - verify(mockSharingMenu).setEnabled(true); + verify(mockshare_menu).setEnabled(true); verify(mockShareCollab).setEnabled(true); verify(mockSharePublic).setEnabled(true); @@ -564,7 +563,7 @@ public void singleWfSelection_owner_onAppSelectionChanged() { mockRequestTool, mockWfRun, mockSharePublic, - mockSharingMenu, + mockshare_menu, mockShareCollab); verifyZeroInteractions(appSearchMock); } @@ -606,7 +605,7 @@ public void singleAppSelection_ownerRunnable_onAppSelectionChanged() { verify(mockCopyWf).setEnabled(false); verify(mockWfRun).setEnabled(false); verify(mockSharePublic).setEnabled(true); - verify(mockSharingMenu).setEnabled(true); + verify(mockshare_menu).setEnabled(true); verify(mockShareCollab).setEnabled(true); verifyNoMoreInteractions(mockAppMenu, @@ -625,7 +624,7 @@ public void singleAppSelection_ownerRunnable_onAppSelectionChanged() { mockWfRun, mockSharePublic, mockShareCollab, - mockSharingMenu); + mockshare_menu); verifyZeroInteractions(appSearchMock); } @@ -666,7 +665,7 @@ public void singleWfSelection_ownerRunnable_onAppSelectionChanged() { verify(mockCopyWf).setEnabled(true); verify(mockWfRun).setEnabled(true); verify(mockSharePublic).setEnabled(true); - verify(mockSharingMenu).setEnabled(true); + verify(mockshare_menu).setEnabled(true); verify(mockShareCollab).setEnabled(true); verifyNoMoreInteractions(mockAppMenu, @@ -685,7 +684,7 @@ public void singleWfSelection_ownerRunnable_onAppSelectionChanged() { mockWfRun, mockShareCollab, mockSharePublic, - mockSharingMenu); + mockshare_menu); verifyZeroInteractions(appSearchMock); } @@ -727,7 +726,7 @@ public void singleAppSelection_ownerRunnablePublic_onAppSelectionChanged() { verify(mockCopyWf).setEnabled(false); verify(mockWfRun).setEnabled(false); verify(mockSharePublic).setEnabled(false); - verify(mockSharingMenu).setEnabled(false); + verify(mockshare_menu).setEnabled(false); verify(mockShareCollab).setEnabled(false); @@ -746,7 +745,7 @@ public void singleAppSelection_ownerRunnablePublic_onAppSelectionChanged() { mockRequestTool, mockWfRun, mockSharePublic, - mockSharingMenu, + mockshare_menu, mockShareCollab); verifyZeroInteractions(appSearchMock); } @@ -791,7 +790,7 @@ public void singleWfSelection_ownerRunnablePublic_onAppSelectionChanged() { verify(mockCopyWf).setEnabled(true); verify(mockWfRun).setEnabled(true); verify(mockSharePublic).setEnabled(false); - verify(mockSharingMenu).setEnabled(false); + verify(mockshare_menu).setEnabled(false); verify(mockShareCollab).setEnabled(false); verifyNoMoreInteractions(mockAppMenu, @@ -874,7 +873,7 @@ boolean allAppsPrivate(List apps) { verify(mockCopyWf).setEnabled(false); verify(mockWfRun).setEnabled(false); verify(mockSharePublic).setEnabled(false); - verify(mockSharingMenu).setEnabled(false); + verify(mockshare_menu).setEnabled(false); verify(mockShareCollab).setEnabled(false); verifyNoMoreInteractions(mockAppMenu, @@ -893,7 +892,7 @@ boolean allAppsPrivate(List apps) { mockWfRun, mockSharePublic, mockShareCollab, - mockSharingMenu); + mockshare_menu); verifyZeroInteractions(appSearchMock); } @@ -957,7 +956,7 @@ boolean allAppsPrivate(List apps) { verify(mockCopyWf).setEnabled(false); verify(mockWfRun).setEnabled(false); verify(mockSharePublic).setEnabled(false); - verify(mockSharingMenu).setEnabled(true); + verify(mockshare_menu).setEnabled(true); verify(mockShareCollab).setEnabled(true); verifyNoMoreInteractions(mockAppMenu, @@ -976,7 +975,7 @@ boolean allAppsPrivate(List apps) { mockWfRun, mockShareCollab, mockSharePublic, - mockSharingMenu); + mockshare_menu); verifyZeroInteractions(appSearchMock); } @@ -1039,7 +1038,7 @@ boolean allAppsPrivate(List apps) { verify(mockCopyWf).setEnabled(false); verify(mockWfRun).setEnabled(false); verify(mockSharePublic).setEnabled(false); - verify(mockSharingMenu).setEnabled(false); + verify(mockshare_menu).setEnabled(false); verify(mockShareCollab).setEnabled(false); verifyNoMoreInteractions(mockAppMenu, @@ -1058,7 +1057,7 @@ boolean allAppsPrivate(List apps) { mockWfRun, mockShareCollab, mockSharePublic, - mockSharingMenu); + mockshare_menu); verifyZeroInteractions(appSearchMock); } @@ -1124,7 +1123,7 @@ boolean allAppsPrivate(List apps) { verify(mockCopyWf).setEnabled(false); verify(mockWfRun).setEnabled(false); verify(mockSharePublic).setEnabled(false); - verify(mockSharingMenu).setEnabled(false); + verify(mockshare_menu).setEnabled(false); verify(mockShareCollab).setEnabled(false); @@ -1143,7 +1142,7 @@ boolean allAppsPrivate(List apps) { mockRequestTool, mockWfRun, mockSharePublic, - mockSharingMenu, + mockshare_menu, mockShareCollab); verifyZeroInteractions(appSearchMock); } @@ -1183,7 +1182,7 @@ public void singleAppSelectionWithWrite_onAppSelectionChanged() { verify(mockEditWf).setEnabled(false); verify(mockCopyWf).setEnabled(false); verify(mockWfRun).setEnabled(false); - verify(mockSharingMenu).setEnabled(false); + verify(mockshare_menu).setEnabled(false); verify(mockShareCollab).setEnabled(false); verify(mockSharePublic).setEnabled(false); @@ -1203,7 +1202,7 @@ public void singleAppSelectionWithWrite_onAppSelectionChanged() { mockWfRun, mockSharePublic, mockShareCollab, - mockSharingMenu); + mockshare_menu); verifyZeroInteractions(appSearchMock); } @@ -1242,7 +1241,7 @@ public void singleWfSelectionWithWrite_onAppSelectionChanged() { verify(mockEditWf).setEnabled(true); verify(mockCopyWf).setEnabled(true); verify(mockWfRun).setEnabled(true); - verify(mockSharingMenu).setEnabled(false); + verify(mockshare_menu).setEnabled(false); verify(mockShareCollab).setEnabled(false); verify(mockSharePublic).setEnabled(false); From cb6fc2173d9960b48342fc903ef9e823cfe4db69 Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Tue, 15 Mar 2016 16:19:51 -0700 Subject: [PATCH 171/183] CORE-7152 fix filtering logic and session restore. --- .../de/analysis/client/AnalysesView.java | 7 +++++- .../analysis/client/AnalysisToolBarView.java | 3 +++ .../presenter/AnalysesPresenterImpl.java | 25 +++++++++++-------- .../client/views/AnalysesToolBarImpl.java | 8 ++++++ .../client/views/AnalysesViewImpl.java | 6 +++++ .../views/windows/MyAnalysesWindow.java | 2 +- 6 files changed, 38 insertions(+), 13 deletions(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/AnalysesView.java b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/AnalysesView.java index 11c79dc34..2b9e60003 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/AnalysesView.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/AnalysesView.java @@ -186,7 +186,9 @@ interface Appearance { AnalysisFilter getCurrentFilter(); - public void loadAnalyses(AnalysisFilter filter); + void loadAnalyses(AnalysisFilter filter); + + void setFilterInView(AnalysisFilter filter); } void filterByAnalysisId(String id, String name); @@ -197,4 +199,7 @@ interface Appearance { void setSelectedAnalyses(List selectedAnalyses); + void setFilterInView(AnalysisFilter filter); + + } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/AnalysisToolBarView.java b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/AnalysisToolBarView.java index e6f433cce..07beafa8f 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/AnalysisToolBarView.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/AnalysisToolBarView.java @@ -1,5 +1,6 @@ package org.iplantc.de.analysis.client; +import org.iplantc.de.analysis.client.models.AnalysisFilter; import org.iplantc.de.client.models.analysis.Analysis; import com.google.gwt.user.client.ui.IsWidget; @@ -15,4 +16,6 @@ public interface AnalysisToolBarView extends IsWidget, void filterByAnalysisId(String analysisId, String name); void filterByParentAnalysisId(String id); + + void setFilterInView(AnalysisFilter filter); } diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/AnalysesPresenterImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/AnalysesPresenterImpl.java index 831fad8a0..fd1306d7e 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/AnalysesPresenterImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/presenter/AnalysesPresenterImpl.java @@ -12,7 +12,6 @@ import org.iplantc.de.analysis.client.views.AnalysisStepsView; import org.iplantc.de.analysis.client.views.dialogs.AnalysisSharingDialog; import org.iplantc.de.analysis.client.views.dialogs.AnalysisStepsInfoDialog; -import org.iplantc.de.analysis.client.views.sharing.AnalysisSharingView; import org.iplantc.de.analysis.client.views.sharing.AnalysisSharingViewImpl; import org.iplantc.de.analysis.client.views.widget.AnalysisSearchField; import org.iplantc.de.client.events.EventBus; @@ -20,7 +19,6 @@ import org.iplantc.de.client.models.analysis.Analysis; import org.iplantc.de.client.models.analysis.AnalysisStepsInfo; import org.iplantc.de.client.services.AnalysisServiceFacade; -import org.iplantc.de.client.sharing.SharingPresenter; import org.iplantc.de.client.util.JsonUtil; import org.iplantc.de.collaborators.client.util.CollaboratorsUtil; import org.iplantc.de.commons.client.ErrorHandler; @@ -191,11 +189,6 @@ public void onSuccess(Void result) { private AnalysisFilter currentFilter; - private SharingPresenter sharingPresenter; - private AnalysisSharingView sharingView; - - - private final ListStore listStore; private final AnalysesView view; @@ -322,6 +315,11 @@ public void loadAnalyses(AnalysisFilter filter) { loader.load(config); } + @Override + public void setFilterInView(AnalysisFilter filter) { + view.setFilterInView(filter); + } + @Override public void goToSelectedAnalysisFolder(final Analysis selectedAnalysis) { // Request disk resource window @@ -340,8 +338,8 @@ public void onShowAllSelected() { @Override public void onShareSelected(List selected) { - sharingView = new AnalysisSharingViewImpl(); - sharingPresenter = new AnalysisSharingPresenter(analysisService, + AnalysisSharingViewImpl sharingView = new AnalysisSharingViewImpl(); + AnalysisSharingPresenter sharingPresenter = new AnalysisSharingPresenter(analysisService, selected, sharingView, collaboratorsUtil, @@ -353,8 +351,13 @@ public void onShareSelected(List selected) { @Override public void setCurrentFilter(AnalysisFilter filter) { - this.currentFilter = filter; - loadAnalyses(currentFilter); + if(filter == null) { + currentFilter =filter; + return; + } else if(!(filter.equals(this.currentFilter))) { + currentFilter = filter; + loadAnalyses(currentFilter); + } } @Override diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java index e49e73e88..f8a24f8b0 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java @@ -160,6 +160,7 @@ public void filterByAnalysisId(String analysisId, String name) { searchField.filterByAnalysisId(analysisId, name); //reset filter. Users need to set Filter to ALL to go back... filterCombo.setValue(null); + presenter.setCurrentFilter(null); } @Override @@ -167,6 +168,7 @@ public void filterByParentAnalysisId(String analysisId) { searchField.filterByParentId(analysisId); //reset filter. Users need to set Filter to ALL to go back... filterCombo.setValue(null); + presenter.setCurrentFilter(null); } @Override @@ -339,6 +341,7 @@ void searchFieldKeyUp(KeyUpEvent event){ filterCombo.setValue(AnalysisFilter.ALL); } else { filterCombo.setValue(null); + presenter.setCurrentFilter(null); } } @@ -464,6 +467,11 @@ void applyFilter(AnalysisFilter filter) { presenter.setCurrentFilter(filter); } + @Override + public void setFilterInView(AnalysisFilter filter) { + filterCombo.setValue(filter); + } + @UiHandler("shareCollabMI") void onShareSelected(SelectionEvent event) { presenter.onShareSelected(currentSelection); diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesViewImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesViewImpl.java index ce9c73e84..7eb739952 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesViewImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesViewImpl.java @@ -7,6 +7,7 @@ import org.iplantc.de.analysis.client.events.selection.AnalysisCommentSelectedEvent; import org.iplantc.de.analysis.client.events.selection.AnalysisNameSelectedEvent; import org.iplantc.de.analysis.client.gin.factory.AnalysisToolBarFactory; +import org.iplantc.de.analysis.client.models.AnalysisFilter; import org.iplantc.de.analysis.client.views.dialogs.AnalysisCommentsDialog; import org.iplantc.de.analysis.shared.AnalysisModule; import org.iplantc.de.client.models.analysis.Analysis; @@ -162,6 +163,11 @@ public void setSelectedAnalyses(List selectedAnalyses) { } } + @Override + public void setFilterInView(AnalysisFilter filter) { + toolBar.setFilterInView(filter); + } + @Override protected void onEnsureDebugId(String baseID) { super.onEnsureDebugId(baseID); diff --git a/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/windows/MyAnalysesWindow.java b/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/windows/MyAnalysesWindow.java index aa49b50a4..1516869b0 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/windows/MyAnalysesWindow.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/desktop/client/views/windows/MyAnalysesWindow.java @@ -58,7 +58,7 @@ public void update(C config) { if (config instanceof AnalysisWindowConfig) { AnalysisWindowConfig analysisWindowConfig = (AnalysisWindowConfig) config; presenter.setSelectedAnalyses(analysisWindowConfig.getSelectedAnalyses()); - presenter.setCurrentFilter(((AnalysisWindowConfig)config).getFilter()); + presenter.setFilterInView(((AnalysisWindowConfig)config).getFilter()); } } From 8fa8f8ca9ccba20476709ff47aeac71a192c5c19 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Wed, 16 Mar 2016 11:23:15 -0700 Subject: [PATCH 172/183] CORE-7565: move the lein eastwood configuration into project.clj --- services/apps/project.clj | 2 ++ services/apps/test.sh | 5 +++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/services/apps/project.clj b/services/apps/project.clj index 511622254..89ff80cc9 100644 --- a/services/apps/project.clj +++ b/services/apps/project.clj @@ -31,6 +31,8 @@ [org.iplantc/service-logging "5.2.5.0"] [me.raynes/fs "1.4.6"] [mvxcvi/clj-pgp "0.8.0"]] + :eastwood {:exclude-namespaces [apps.protocols :test-paths] + :linters [:wrong-arity :wrong-ns-form :wrong-pre-post :wrong-tag :misplaced-docstrings]} :plugins [[lein-ring "0.9.6"] [lein-swank "1.4.4"] [test2junit "1.1.3"] diff --git a/services/apps/test.sh b/services/apps/test.sh index 18c6011f1..0f707d0c0 100755 --- a/services/apps/test.sh +++ b/services/apps/test.sh @@ -3,7 +3,7 @@ set -e error_exit() { - echo + echo 1>&2 echo "TEST FAILED: $@" 1>&2 exit 1 } @@ -29,7 +29,7 @@ fi docker pull discoenv/buildenv || error_exit 'unable to pull the build environment image' # Check for syntax errors. -docker run --rm -v $(pwd):/build -w /build discoenv/buildenv lein eastwood '{:exclude-namespaces [apps.protocols :test-paths] :linters [:wrong-arity :wrong-ns-form :wrong-pre-post :wrong-tag :misplaced-docstrings]}' \ +docker run --rm -v $(pwd):/build -w /build discoenv/buildenv lein eastwood \ || error_exit 'lint errors were found' # Pull the DE database image. @@ -54,4 +54,5 @@ docker run --rm -v $(pwd):/build -w /build --link $DBCONTAINER:postgres discoenv || error_exit 'there were unit test failures' # Display a success message. +echo 1>&2 echo "TEST SUCCEEDED" 1>&2 From aeede0a633748c57ec2da12fe82ebde8445c84d6 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Wed, 16 Mar 2016 12:28:06 -0700 Subject: [PATCH 173/183] CORE-7409: notificationagent -> notification-agent --- docker/images.txt | 2 +- docker/manifest-images.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/images.txt b/docker/images.txt index dc5e8ccac..8b5f0338b 100644 --- a/docker/images.txt +++ b/docker/images.txt @@ -17,7 +17,7 @@ kifshare apps metadata monkey -notificationagent +notification-agent porklock saved-searches sharkbait diff --git a/docker/manifest-images.txt b/docker/manifest-images.txt index 36a6946f2..d27a8f1b1 100644 --- a/docker/manifest-images.txt +++ b/docker/manifest-images.txt @@ -13,7 +13,7 @@ kifshare apps metadata monkey -notificationagent +notification-agent porklock saved-searches templeton From f395a565205689b0a4e1b6e1fcbe0c729ee0ff6f Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Wed, 16 Mar 2016 14:15:03 -0700 Subject: [PATCH 174/183] CORE-7565: also share input files when sharing an analysis --- libs/mescal/src/mescal/agave_de_v2.clj | 4 +++ libs/mescal/src/mescal/de.clj | 3 ++ services/apps/src/apps/persistence/jobs.clj | 1 + services/apps/src/apps/protocols.clj | 1 + services/apps/src/apps/service/apps/agave.clj | 4 +++ .../apps/src/apps/service/apps/combined.clj | 5 ++++ services/apps/src/apps/service/apps/de.clj | 4 +++ .../src/apps/service/apps/de/listings.clj | 7 +++++ .../src/apps/service/apps/jobs/params.clj | 6 +++- .../src/apps/service/apps/jobs/sharing.clj | 29 ++++++++++++++++++- services/terrain/src/terrain/util/jwt.clj | 4 +-- 11 files changed, 64 insertions(+), 4 deletions(-) diff --git a/libs/mescal/src/mescal/agave_de_v2.clj b/libs/mescal/src/mescal/agave_de_v2.clj index c684f7c40..87d28b29c 100644 --- a/libs/mescal/src/mescal/agave_de_v2.clj +++ b/libs/mescal/src/mescal/agave_de_v2.clj @@ -51,6 +51,10 @@ [agave app-id] {:tools [(apps/format-tool-for-app (.getApp agave app-id))]}) +(defn get-app-input-ids + [agave app-id] + (mapv :id (:inputs (.getApp agave app-id)))) + (defn prepare-job-submission [agave submission] (jobs/prepare-submission agave (.getApp agave (:app_id submission)) submission)) diff --git a/libs/mescal/src/mescal/de.clj b/libs/mescal/src/mescal/de.clj index f5f2be9fa..36281da04 100644 --- a/libs/mescal/src/mescal/de.clj +++ b/libs/mescal/src/mescal/de.clj @@ -11,6 +11,7 @@ (getAppDetails [_ app-id]) (listAppTasks [_ app-id]) (getAppToolListing [_ app-id]) + (getAppInputIds [_ app-id]) (submitJob [_ submission]) (prepareJobSubmission [_ submission]) (sendJobSubmission [_ submission]) @@ -42,6 +43,8 @@ (v2/list-app-tasks agave app-id)) (getAppToolListing [_ app-id] (v2/get-app-tool-listing agave app-id)) + (getAppInputIds [_ app-id] + (v2/get-app-input-ids agave app-id)) (submitJob [this submission] (->> (.prepareJobSubmission this submission) (.sendJobSubmission this))) diff --git a/services/apps/src/apps/persistence/jobs.clj b/services/apps/src/apps/persistence/jobs.clj index 2c7564652..45a3e3bb2 100644 --- a/services/apps/src/apps/persistence/jobs.clj +++ b/services/apps/src/apps/persistence/jobs.clj @@ -298,6 +298,7 @@ "Lists the child jobs within a batch job." [batch-id] (select (job-base-query) + (fields :submission) (where {:parent_id batch-id}))) (defn- add-job-type-clause diff --git a/services/apps/src/apps/protocols.clj b/services/apps/src/apps/protocols.clj index 6b44cd826..e7aefee99 100644 --- a/services/apps/src/apps/protocols.clj +++ b/services/apps/src/apps/protocols.clj @@ -30,6 +30,7 @@ (getAppTaskListing [_ app-id]) (getAppToolListing [_ app-id]) (getAppUi [_ app-id]) + (getAppInputIds [_ app-id]) (addPipeline [_ pipeline]) (formatPipelineTasks [_ pipeline]) (updatePipeline [_ pipeline]) diff --git a/services/apps/src/apps/service/apps/agave.clj b/services/apps/src/apps/service/apps/agave.clj index 9ca32d638..deb16f740 100644 --- a/services/apps/src/apps/service/apps/agave.clj +++ b/services/apps/src/apps/service/apps/agave.clj @@ -76,6 +76,10 @@ (when-not (util/uuid? app-id) (.getAppToolListing agave app-id))) + (getAppInputIds [_ app-id] + (when-not (util/uuid? app-id) + (.getAppInputIds agave app-id))) + (formatPipelineTasks [_ pipeline] (pipelines/format-pipeline-tasks agave pipeline)) diff --git a/services/apps/src/apps/service/apps/combined.clj b/services/apps/src/apps/service/apps/combined.clj index 6532b9ad3..9ec232f30 100644 --- a/services/apps/src/apps/service/apps/combined.clj +++ b/services/apps/src/apps/service/apps/combined.clj @@ -109,6 +109,11 @@ (getAppUi [_ app-id] (.getAppUi (util/get-apps-client clients) app-id)) + (getAppInputIds [_ app-id] + (->> (map #(.getAppInputIds % app-id) clients) + (remove nil?) + (first))) + (addPipeline [self pipeline] (.formatPipelineTasks self (.addPipeline (util/get-apps-client clients) pipeline))) diff --git a/services/apps/src/apps/service/apps/de.clj b/services/apps/src/apps/service/apps/de.clj index 38156e3d5..9c3538556 100644 --- a/services/apps/src/apps/service/apps/de.clj +++ b/services/apps/src/apps/service/apps/de.clj @@ -121,6 +121,10 @@ (when (util/uuid? app-id) (edit/get-app-ui user app-id))) + (getAppInputIds [_ app-id] + (when (util/uuid? app-id) + (listings/get-app-input-ids (uuidify app-id)))) + (addPipeline [_ pipeline] (pipeline-edit/add-pipeline user pipeline)) diff --git a/services/apps/src/apps/service/apps/de/listings.clj b/services/apps/src/apps/service/apps/de/listings.clj index dd6d55fe9..a4723273b 100644 --- a/services/apps/src/apps/service/apps/de/listings.clj +++ b/services/apps/src/apps/service/apps/de/listings.clj @@ -441,3 +441,10 @@ [{:keys [username]} app-id] (or (amp/get-category-id-for-app username app-id (workspace-favorites-app-category-index)) shared-with-me-id)) + +(defn get-app-input-ids + "Gets the list of parameter IDs corresponding to input files." + [app-id] + (->> (amp/get-app-parameters app-id) + (filter (comp amp/param-ds-input-types :type)) + (mapv #(str (:step_id %) "_" (:id %))))) diff --git a/services/apps/src/apps/service/apps/jobs/params.clj b/services/apps/src/apps/service/apps/jobs/params.clj index 7beb42db0..6752fb9c9 100644 --- a/services/apps/src/apps/service/apps/jobs/params.clj +++ b/services/apps/src/apps/service/apps/jobs/params.clj @@ -11,7 +11,7 @@ (let [submission (:submission job)] (when-not submission (throw+ {:type :clojure-commons.exception/not-found - :error "Job submission values could not be found."})) + :error (str "Job submission values could not be found for " (:id job))})) (cheshire/decode (.getValue submission) true))) (defn- load-mapped-params @@ -131,3 +131,7 @@ (update-in (assoc (.getAppJobView apps-client (:app-id job)) :debug (:debug submission false)) [:groups] (partial update-app-groups (:config submission))))) + +(defn get-job-config + [job] + (:config (get-job-submission job))) diff --git a/services/apps/src/apps/service/apps/jobs/sharing.clj b/services/apps/src/apps/service/apps/jobs/sharing.clj index c9e04c6bc..b2e0093e4 100644 --- a/services/apps/src/apps/service/apps/jobs/sharing.clj +++ b/services/apps/src/apps/service/apps/jobs/sharing.clj @@ -5,8 +5,10 @@ [apps.clients.iplant-groups :as iplant-groups] [apps.clients.notifications :as cn] [apps.persistence.jobs :as jp] + [apps.service.apps.jobs.params :as job-params] [apps.service.apps.jobs.permissions :as job-permissions] [apps.util.service :as service] + [clojure.string :as string] [clojure.tools.logging :as log] [clojure-commons.error-codes :as ce])) @@ -102,10 +104,34 @@ (catch ce/clj-http-error? {:keys [body]} (str "unable to share result folder: " (:error_code (service/parse-json body)))))) +(defn- share-input-file + [sharer sharee path] + (try+ + (data-info/share-path sharer path sharee "read") + nil + (catch ce/clj-http-error? {:keys [body]} + (str "unable to share input file, " path ": " (:error_code (service/parse-json body)))))) + (defn- process-child-jobs [f job-id] (first (remove nil? (map f (jp/list-child-jobs job-id))))) +(defn- list-job-inputs + [apps-client job] + (->> (mapv keyword (.getAppInputIds apps-client (:app-id job))) + (select-keys (job-params/get-job-config job)) + vals + (remove string/blank?))) + +(defn- process-job-inputs + [f apps-client job] + (first (remove nil? (map f (list-job-inputs apps-client job))))) + +(defn- share-child-job + [apps-client sharer sharee level job] + (or (process-job-inputs (partial share-input-file sharer sharee) apps-client job) + (iplant-groups/share-analysis (:id job) sharee level))) + (defn- share-job* [apps-client sharer sharee job-id job level] (or (verify-not-subjob job) @@ -114,7 +140,8 @@ (share-app-for-job apps-client sharer sharee job-id job) (share-output-folder sharer sharee job) (iplant-groups/share-analysis job-id sharee level) - (process-child-jobs #(iplant-groups/share-analysis (:id %) sharee level) job-id))) + (process-job-inputs (partial share-input-file sharer sharee) apps-client job) + (process-child-jobs (partial share-child-job apps-client sharer sharee level) job-id))) (defn- share-job [apps-client sharer sharee {job-id :analysis_id level :permission}] diff --git a/services/terrain/src/terrain/util/jwt.clj b/services/terrain/src/terrain/util/jwt.clj index 1525301f4..e035e514d 100644 --- a/services/terrain/src/terrain/util/jwt.clj +++ b/services/terrain/src/terrain/util/jwt.clj @@ -64,8 +64,8 @@ (defn validate-group-membership [handler allowed-groups-fn] (fn [request] - (let [allowed-groups (allowed-groups-fn) - actual-groups (get-in request [:jwt-claims :org.iplantc.de:entitlement] [])] + (let [allowed-groups (log/spy :warn (allowed-groups-fn)) + actual-groups (log/spy :warn (get-in request [:jwt-claims :org.iplantc.de:entitlement] []))] (if (some (partial contains? (set allowed-groups)) actual-groups) (handler request) (resp/forbidden "You are not in one of the admin groups."))))) From 342b696eb0ce0127bf6aa6d2d126e392d1782dba Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Wed, 16 Mar 2016 14:59:16 -0700 Subject: [PATCH 175/183] CORE-7565: undo an unintentional commit --- services/terrain/src/terrain/util/jwt.clj | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/services/terrain/src/terrain/util/jwt.clj b/services/terrain/src/terrain/util/jwt.clj index e035e514d..1525301f4 100644 --- a/services/terrain/src/terrain/util/jwt.clj +++ b/services/terrain/src/terrain/util/jwt.clj @@ -64,8 +64,8 @@ (defn validate-group-membership [handler allowed-groups-fn] (fn [request] - (let [allowed-groups (log/spy :warn (allowed-groups-fn)) - actual-groups (log/spy :warn (get-in request [:jwt-claims :org.iplantc.de:entitlement] []))] + (let [allowed-groups (allowed-groups-fn) + actual-groups (get-in request [:jwt-claims :org.iplantc.de:entitlement] [])] (if (some (partial contains? (set allowed-groups)) actual-groups) (handler request) (resp/forbidden "You are not in one of the admin groups."))))) From cf107521669637eb1f6aab5d8243b798665354d0 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Wed, 16 Mar 2016 15:06:15 -0700 Subject: [PATCH 176/183] Add placeholder testing framework code to clockwork. --- services/clockwork/.gitignore | 2 ++ services/clockwork/project.clj | 1 + services/clockwork/test/clockwork/core_test.clj | 6 +++--- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/services/clockwork/.gitignore b/services/clockwork/.gitignore index 11b4b7d26..85bdc7bc9 100644 --- a/services/clockwork/.gitignore +++ b/services/clockwork/.gitignore @@ -15,3 +15,5 @@ pom.xml .classpath .settings /bin +build.xml +test2junit diff --git a/services/clockwork/project.clj b/services/clockwork/project.clj index dffb8364a..c4ba4c6f9 100644 --- a/services/clockwork/project.clj +++ b/services/clockwork/project.clj @@ -42,4 +42,5 @@ :profiles {:dev {:resource-paths ["resources/test"]} :uberjar {:aot :all}} :main ^:skip-aot clockwork.core + :plugins [[test2junit "1.1.3"]] :uberjar-exclusions [#"BCKEY.SF"]) diff --git a/services/clockwork/test/clockwork/core_test.clj b/services/clockwork/test/clockwork/core_test.clj index 7f0f894c8..1f3a7e6ab 100644 --- a/services/clockwork/test/clockwork/core_test.clj +++ b/services/clockwork/test/clockwork/core_test.clj @@ -2,6 +2,6 @@ (:use clojure.test clockwork.core)) -(deftest a-test - (testing "FIXME, I fail." - (is (= 0 1)))) \ No newline at end of file +(deftest silly-test + (testing "This test is totally fake, but it'll pass!" + (is (= 1 1)))) From 48fa472d0a2acf458c20cb9bee7719d22e4ae0ea Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Wed, 16 Mar 2016 15:12:17 -0700 Subject: [PATCH 177/183] Add placeholder testing framework code to data-info. --- services/data-info/.gitignore | 2 ++ services/data-info/project.clj | 3 ++- services/data-info/test/data_info/core_test.clj | 7 +++++++ 3 files changed, 11 insertions(+), 1 deletion(-) create mode 100644 services/data-info/test/data_info/core_test.clj diff --git a/services/data-info/.gitignore b/services/data-info/.gitignore index 016827c2b..8bb7fb842 100644 --- a/services/data-info/.gitignore +++ b/services/data-info/.gitignore @@ -22,3 +22,5 @@ .env .vagrant/ *.swp +build.xml +test2junit diff --git a/services/data-info/project.clj b/services/data-info/project.clj index 35a7b4ce1..a7b29073c 100644 --- a/services/data-info/project.clj +++ b/services/data-info/project.clj @@ -41,7 +41,8 @@ [org.iplantc/kameleon "5.2.5.0"] [org.iplantc/service-logging "5.2.5.0"]] :plugins [[lein-ring "0.9.6"] - [swank-clojure "1.4.2"]] + [swank-clojure "1.4.2"] + [test2junit "1.1.3"]] :profiles {:dev {:resource-paths ["conf/test"]} ;; compojure-api route macros should not be AOT compiled: ;; https://github.com/metosin/compojure-api/issues/135#issuecomment-121388539 diff --git a/services/data-info/test/data_info/core_test.clj b/services/data-info/test/data_info/core_test.clj new file mode 100644 index 000000000..778d1c552 --- /dev/null +++ b/services/data-info/test/data_info/core_test.clj @@ -0,0 +1,7 @@ +(ns data-info.core-test + (:use clojure.test + data-info.core)) + +(deftest silly-test + (testing "This test is totally fake, but it'll pass!" + (is (= 1 1)))) From 61f8093ff53a45a04d21b6c6e8655ba4554e2818 Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Wed, 16 Mar 2016 15:47:55 -0700 Subject: [PATCH 178/183] CORE-7566: also unshare input files when unsharing an analysis --- .../src/apps/service/apps/jobs/sharing.clj | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/services/apps/src/apps/service/apps/jobs/sharing.clj b/services/apps/src/apps/service/apps/jobs/sharing.clj index b2e0093e4..3def22c67 100644 --- a/services/apps/src/apps/service/apps/jobs/sharing.clj +++ b/services/apps/src/apps/service/apps/jobs/sharing.clj @@ -173,16 +173,28 @@ (catch ce/clj-http-error? {:keys [body]} (str "unable to unshare result folder: " (:error_code (service/parse-json body)))))) -;; The apps client isn't used at this time, but it will be once we extend analysis sharing -;; to HPC apps. +(defn- unshare-input-file + [sharer sharee path] + (try+ + (data-info/unshare-path sharer path sharee) + nil + (catch ce/clj-http-error? {:keys [body]} + (str "unable to unshare input file: " (:error_code (service/parse-json body)))))) + +(defn- unshare-child-job + [apps-client sharer sharee job] + (or (process-job-inputs (partial unshare-input-file sharer sharee) apps-client job) + (iplant-groups/unshare-analysis (:id job) sharee))) + (defn- unshare-job* [apps-client sharer sharee job-id job] (or (verify-not-subjob job) (verify-accessible sharer job-id) (verify-support apps-client job-id) (unshare-output-folder sharer sharee job) + (process-job-inputs (partial unshare-input-file sharer sharee) apps-client job) (iplant-groups/unshare-analysis job-id sharee) - (process-child-jobs #(iplant-groups/unshare-analysis (:id %) sharee) job-id))) + (process-child-jobs (partial unshare-child-job apps-client sharer sharee) job-id))) (defn- unshare-job [apps-client sharer sharee job-id] From 363b38f3ef2bd9da1b17e36c7ed7aa90cbab2aae Mon Sep 17 00:00:00 2001 From: Dennis Roberts Date: Wed, 16 Mar 2016 17:07:54 -0700 Subject: [PATCH 179/183] CORE-7567 - CORE-7568: change the way that job permissions are validated for most endpoints --- services/apps/src/apps/service/apps/jobs.clj | 38 ++++++++----------- .../apps/service/apps/jobs/permissions.clj | 26 ++++++------- 2 files changed, 28 insertions(+), 36 deletions(-) diff --git a/services/apps/src/apps/service/apps/jobs.clj b/services/apps/src/apps/service/apps/jobs.clj index e01e76bdf..291a09b5e 100644 --- a/services/apps/src/apps/service/apps/jobs.clj +++ b/services/apps/src/apps/service/apps/jobs.clj @@ -113,45 +113,39 @@ (sync-incomplete-job-status apps-client job step) (sync-complete-job-status job))) -(defn validate-job-ownership - [username job-ids] - (let [unowned-ids (map :id (jp/list-unowned-jobs username job-ids))] - (when-not (empty? unowned-ids) - (service/not-owner "jobs" (string/join ", " unowned-ids))))) - (defn- validate-jobs-for-user - [username job-ids] + [user job-ids required-permission] (ju/validate-job-existence job-ids) - (validate-job-ownership username job-ids)) + (job-permissions/validate-job-permissions user required-permission job-ids)) (defn update-job - [{:keys [username]} job-id body] - (validate-jobs-for-user username [job-id]) + [user job-id body] + (validate-jobs-for-user user [job-id] "write") (jp/update-job job-id body) (->> (jp/get-job-by-id job-id) ((juxt :id :job-name :description)) (zipmap [:id :name :description]))) (defn delete-job - [{:keys [username]} job-id] - (validate-jobs-for-user username [job-id]) + [user job-id] + (validate-jobs-for-user user [job-id] "write") (jp/delete-jobs [job-id])) (defn delete-jobs - [{:keys [username]} job-ids] - (validate-jobs-for-user username job-ids) + [user job-ids] + (validate-jobs-for-user user job-ids "write") (jp/delete-jobs job-ids)) (defn get-parameter-values - [apps-client {:keys [username]} job-id] - (validate-jobs-for-user username [job-id]) + [apps-client user job-id] + (validate-jobs-for-user user [job-id] "read") (let [job (jp/get-job-by-id job-id)] {:app_id (:app-id job) :parameters (job-params/get-parameter-values apps-client job)})) (defn get-job-relaunch-info - [apps-client {:keys [username]} job-id] - (validate-jobs-for-user username [job-id]) + [apps-client user job-id] + (validate-jobs-for-user user [job-id] "read") (job-params/get-job-relaunch-info apps-client (jp/get-job-by-id job-id))) (defn- stop-job-steps @@ -162,8 +156,8 @@ (send-job-status-update apps-client job)) (defn stop-job - [apps-client {:keys [username] :as user} job-id] - (validate-jobs-for-user username [job-id]) + [apps-client user job-id] + (validate-jobs-for-user user [job-id] "write") (let [{:keys [status] :as job} (jp/get-job-by-id job-id)] (when (listings/is-completed? status) (service/bad-request (str "job, " job-id ", is already completed or canceled"))) @@ -176,8 +170,8 @@ (log/warn "unable to cancel the most recent step of job, " job-id))))) (defn list-job-steps - [{:keys [username]} job-id] - (validate-jobs-for-user username [job-id]) + [user job-id] + (validate-jobs-for-user user [job-id] "read") (listings/list-job-steps job-id)) (defn submit diff --git a/services/apps/src/apps/service/apps/jobs/permissions.clj b/services/apps/src/apps/service/apps/jobs/permissions.clj index 215d54745..df16abc66 100644 --- a/services/apps/src/apps/service/apps/jobs/permissions.clj +++ b/services/apps/src/apps/service/apps/jobs/permissions.clj @@ -9,13 +9,6 @@ [apps-client job-id] (every? #(.supportsJobSharing apps-client %) (jp/list-representative-job-steps job-id))) -(defn- validate-job-permission-level - [short-username perms required-level job-ids] - (doseq [job-id job-ids] - (let [user-perms (filter (comp (partial = short-username) :id :subject) (perms job-id))] - (when (iplant-groups/lacks-permission-level {job-id user-perms} required-level job-id) - (cxu/forbidden (str "insufficient privileges for analysis " job-id)))))) - (defn- validate-job-sharing-support [apps-client job-ids] (doseq [job-id job-ids] @@ -27,11 +20,15 @@ (when-let [subjob-ids (seq (map :id (filter :parent-id jobs)))] (cxu/bad-request (str "analysis sharing not supported for members of a batch job") :jobs subjob-ids))) -(defn- validate-jobs-for-permissions - [apps-client {short-username :shortUsername} perms required-level job-ids] - (ju/validate-job-existence job-ids) - (validate-job-permission-level short-username perms required-level job-ids) - (validate-job-sharing-support apps-client job-ids)) +(defn validate-job-permissions + ([{short-username :shortUsername :as user} required-level job-ids] + (let [perms (iplant-groups/load-analysis-permissions short-username job-ids)] + (validate-job-permissions user perms required-level job-ids))) + ([{short-username :shortUsername} perms required-level job-ids] + (doseq [job-id job-ids] + (let [user-perms (filter (comp (partial = short-username) :id :subject) (perms job-id))] + (when (iplant-groups/lacks-permission-level {job-id user-perms} required-level job-id) + (cxu/forbidden (str "insufficient privileges for analysis " job-id))))))) (defn- format-job-permission [short-username perms {:keys [id job-name]}] @@ -46,11 +43,12 @@ {:analyses (mapv (partial format-job-permission short-username perms) jobs)}) (defn list-job-permissions - [apps-client {:keys [username] :as user} job-ids] + [apps-client user job-ids] (ju/validate-job-existence job-ids) (transaction (let [jobs (jp/list-jobs-by-id job-ids)] (verify-not-subjobs jobs) (let [perms (iplant-groups/list-analysis-permissions job-ids)] - (validate-jobs-for-permissions apps-client user perms "read" job-ids) + (validate-job-permissions user perms "read" job-ids) + (validate-job-sharing-support apps-client job-ids) (format-job-permission-listing user perms (jp/list-jobs-by-id job-ids)))))) From dcfc7252fd9c9c163516d333b09f7491650335fd Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Thu, 17 Mar 2016 11:40:02 -0700 Subject: [PATCH 180/183] CORE-7575 Implement contextual click for analysis share notification. --- .../de/client/services/callbacks/NotificationCallback.java | 2 +- .../services/converters/NotificationCallbackConverter.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/services/callbacks/NotificationCallback.java b/ui/de-lib/src/main/java/org/iplantc/de/client/services/callbacks/NotificationCallback.java index 3d4563833..434b44e78 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/client/services/callbacks/NotificationCallback.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/services/callbacks/NotificationCallback.java @@ -63,7 +63,7 @@ public void onSuccess(String result) { PayloadAnalysis.class, payload).as(); String analysisAction = analysisPayload.getAction(); - if ("job_status_change".equals(analysisAction)) { + if ("job_status_change".equals(analysisAction) || "share".equals(analysisAction)) { msg.setContext(payload.getPayload()); } else { GWT.log("Unhandled Analysis action type!!"); diff --git a/ui/de-lib/src/main/java/org/iplantc/de/client/services/converters/NotificationCallbackConverter.java b/ui/de-lib/src/main/java/org/iplantc/de/client/services/converters/NotificationCallbackConverter.java index 5c90bc3da..1a05352ea 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/client/services/converters/NotificationCallbackConverter.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/client/services/converters/NotificationCallbackConverter.java @@ -60,7 +60,7 @@ protected List convertFrom(String object) { PayloadAnalysis.class, payload).as(); String analysisAction = analysisPayload.getAction(); - if ("job_status_change".equals(analysisAction)) { + if ("job_status_change".equals(analysisAction) || "share".equals(analysisAction)) { msg.setContext(payload.getPayload()); } else { GWT.log("Unhandled Analysis action type!!"); From 7ced8c92187dcf81da479d06a20303e0625a1692 Mon Sep 17 00:00:00 2001 From: Sriram Srinivasan Date: Thu, 17 Mar 2016 11:41:01 -0700 Subject: [PATCH 181/183] refactor variable name. --- .../de/analysis/client/views/AnalysesToolBarImpl.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java index f8a24f8b0..8dfd5e571 100644 --- a/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java +++ b/ui/de-lib/src/main/java/org/iplantc/de/analysis/client/views/AnalysesToolBarImpl.java @@ -181,7 +181,7 @@ public void onSelectionChanged(SelectionChangedEvent event) { final boolean canCancelSelection = canCancelSelection(currentSelection); final boolean canDeleteSelection = canDeleteSelection(currentSelection); boolean isOwner = isOwner(currentSelection); - boolean isShare = isSharable(currentSelection); + boolean can_share = isSharable(currentSelection); boolean goToFolderEnabled, viewParamsEnabled, relaunchEnabled, cancelEnabled, deleteEnabled; boolean renameEnabled, updateCommentsEnabled, shareEnabled; @@ -207,7 +207,7 @@ public void onSelectionChanged(SelectionChangedEvent event) { renameEnabled = isOwner; updateCommentsEnabled = isOwner; - shareEnabled = isOwner && isShare; + shareEnabled = isOwner && can_share; break; default: @@ -217,7 +217,7 @@ public void onSelectionChanged(SelectionChangedEvent event) { relaunchEnabled = false; cancelEnabled = canCancelSelection && isOwner; deleteEnabled = canDeleteSelection && isOwner; - shareEnabled = isOwner && isShare; + shareEnabled = isOwner && can_share; renameEnabled = false; updateCommentsEnabled = false; } From ad02c5f06de4c962b096f53769c939482d1bf305 Mon Sep 17 00:00:00 2001 From: Ian McEwen Date: Thu, 24 Mar 2016 13:03:52 -0700 Subject: [PATCH 182/183] Dewey's irods port as passed to irods/init needs to be a string, apparently. --- services/dewey/src/dewey/core.clj | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/dewey/src/dewey/core.clj b/services/dewey/src/dewey/core.clj index 284593db3..b3faffe5d 100644 --- a/services/dewey/src/dewey/core.clj +++ b/services/dewey/src/dewey/core.clj @@ -39,7 +39,7 @@ (defn- init-irods [] (irods/init (cfg/irods-host) - (cfg/irods-port) + (str (cfg/irods-port)) (cfg/irods-user) (cfg/irods-pass) (cfg/irods-home) From 28cc058afea2150201884d2a6e33b1248f0ea4ba Mon Sep 17 00:00:00 2001 From: Tony Edgin Date: Mon, 4 Apr 2016 11:12:37 -0700 Subject: [PATCH 183/183] iRODS AMQP broker can now be on separate host --- ansible/inventories/group_vars/all | 4 ++++ ansible/roles/util-cfg-service/templates/dewey.properties.j2 | 4 ++-- .../roles/util-cfg-service/templates/info-typer.properties.j2 | 4 ++-- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/ansible/inventories/group_vars/all b/ansible/inventories/group_vars/all index 788d2f776..e2a340f68 100644 --- a/ansible/inventories/group_vars/all +++ b/ansible/inventories/group_vars/all @@ -394,6 +394,10 @@ irods: admins: rodsadmin data_curators_group: data-curators bad_chars: \u0060\u0027\u000A\u0009 + amqp_broker: + host: "{{ groups['amqp-brokers'][0] }}" + port: + # Jex does not actually have a container. The container name is how most syslog entries # are identified. See rsyslog-config role. diff --git a/ansible/roles/util-cfg-service/templates/dewey.properties.j2 b/ansible/roles/util-cfg-service/templates/dewey.properties.j2 index bea2a487d..fe891fab5 100644 --- a/ansible/roles/util-cfg-service/templates/dewey.properties.j2 +++ b/ansible/roles/util-cfg-service/templates/dewey.properties.j2 @@ -1,7 +1,7 @@ dewey.environment-name = {{ environment_name }} -dewey.amqp.host = {{ amqp_broker.host }} -dewey.amqp.port = {{ amqp_broker.port }} +dewey.amqp.host = {{ irods.amqp_broker.host }} +dewey.amqp.port = {{ irods.amqp_broker.port }} dewey.amqp.user = {{ amqp_user }} dewey.amqp.password = {{ amqp_password }} dewey.amqp.exchange.name = {{ amqp_irods_exchange }} diff --git a/ansible/roles/util-cfg-service/templates/info-typer.properties.j2 b/ansible/roles/util-cfg-service/templates/info-typer.properties.j2 index 495bbd961..5c0d8ed10 100644 --- a/ansible/roles/util-cfg-service/templates/info-typer.properties.j2 +++ b/ansible/roles/util-cfg-service/templates/info-typer.properties.j2 @@ -16,8 +16,8 @@ info-typer.irods.retry-sleep = 1000 info-typer.irods.use-trash = true # AMQP settings -info-typer.amqp.host = {{ amqp_broker.host }} -info-typer.amqp.port = {{ amqp_broker.port }} +info-typer.amqp.host = {{ irods.amqp_broker.host }} +info-typer.amqp.port = {{ irods.amqp_broker.port }} info-typer.amqp.user = {{ amqp_user }} info-typer.amqp.pass = {{ amqp_password }} info-typer.amqp.retry-sleep = 10000