Skip to content
This repository has been archived by the owner on Feb 19, 2024. It is now read-only.

Commit

Permalink
Merge pull request #51 from cabify/jvp/add-trickster-as-a-sidecar-to-…
Browse files Browse the repository at this point in the history
…prometheus

Add trickster as a sidecar to prometheus
  • Loading branch information
jesusvazquez authored Feb 13, 2019
2 parents 1f0bacb + 96a0728 commit 1ca715d
Show file tree
Hide file tree
Showing 6 changed files with 183 additions and 23 deletions.
19 changes: 19 additions & 0 deletions apps/prometheus/config-map.tf
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
################################################################################
## Prometheus
################################################################################

resource "kubernetes_config_map" "prometheus" {
metadata {
name = "prometheus-configuration"
Expand Down Expand Up @@ -34,3 +38,18 @@ resource "k8s_manifest" "recordingrules" {
content = "${data.template_file.recordingrules.rendered}"
depends_on = ["kubernetes_config_map.prometheus"]
}

################################################################################
## Trickster
################################################################################

resource "kubernetes_config_map" "trickster" {
metadata {
name = "trickster-config"
namespace = "${kubernetes_namespace.prometheus.metadata.0.name}"
}

data {
trickster.conf = "${var.trickster_config}"
}
}
45 changes: 45 additions & 0 deletions apps/prometheus/inputs.tf
Original file line number Diff line number Diff line change
Expand Up @@ -109,3 +109,48 @@ variable "prometheus_io_scrape" {
type = "string"
default = "true"
}

################################################################################
## Trickster
################################################################################

variable "trickster_port" {
description = "Port where trickster is going to listen. We're using the 9092 to avoid collisions with prometheus port. This port is unallocated by the community to avoid conflicts with kafka but we're not going to have this problem inside our replication controller."
type = "string"
default = "9092"
}

variable "trickster_metrics_port" {
description = "Port where trickster exposes its metrics."
type = "string"
default = "8082"
}

variable "trickster_config" {
description = "Valid rendered trickster.yaml config"
type = "string"
}

variable "trickster_memory_limit" {
description = "Memory limit for the kubernetes trickster pod"
type = "string"
default = "6Gi"
}

variable "trickster_memory_request" {
description = "Memory request (minimum) for the kubernetes trickster pod"
type = "string"
default = "1Gi"
}

variable "trickster_cpu_limit" {
description = "CPU limit for the trickster kubernetes pod"
type = "string"
default = "2"
}

variable "trickster_cpu_request" {
description = "CPU request for the trickster kubernetes pod"
type = "string"
default = "1"
}
8 changes: 8 additions & 0 deletions apps/prometheus/outputs.tf
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,11 @@ output "prometheus_service_port" {
output "prometheus_namespace" {
value = "${kubernetes_namespace.prometheus.metadata.0.name}"
}

output "trickster_service_name" {
value = "${kubernetes_service.trickster.metadata.0.name}"
}

output "trickster_service_port" {
value = "${var.trickster_port}"
}
56 changes: 56 additions & 0 deletions apps/prometheus/replication-controller.tf
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,20 @@ resource "kubernetes_replication_controller" "prometheus" {
}
}

volume {
name = "trickster-config"

config_map {
name = "trickster-config"
default_mode = 420
}
}

volume {
name = "trickster-boltdb-cache"
empty_dir {}
}

container {
image = "weaveworks/watch:master-5b2a6e5"
name = "config-watcher"
Expand Down Expand Up @@ -166,6 +180,48 @@ resource "kubernetes_replication_controller" "prometheus" {
timeout_seconds = "${var.readinessprobe_timeout_seconds}"
}
}

container {
image = "tricksterio/trickster:0.1.7"
name = "trickster"

resources {
requests {
memory = "${var.trickster_memory_request}"
cpu = "${var.trickster_cpu_request}"
}

limits {
memory = "${var.trickster_memory_limit}"
cpu = "${var.trickster_cpu_limit}"
}
}

port {
container_port = "${var.trickster_port}"
}

port {
container_port = "${var.trickster_metrics_port}"
}


args = [
"--config=/etc/trickster/trickster.conf",
"--proxy-port=9092",
]

volume_mount {
name = "trickster-config"
mount_path = "/etc/trickster/"
}

volume_mount {
name = "trickster-boltdb-cache"
mount_path = "/tmp/trickster/"
}

}
}
}
}
23 changes: 0 additions & 23 deletions apps/prometheus/service.tf

This file was deleted.

55 changes: 55 additions & 0 deletions apps/prometheus/services.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
resource "kubernetes_service" "prometheus" {
metadata {
annotations {
prometheus_io_scrape = "${var.prometheus_io_scrape}"
}

name = "${kubernetes_replication_controller.prometheus.metadata.0.name}"
namespace = "${kubernetes_namespace.prometheus.metadata.0.name}"
}

spec {
selector {
app = "${kubernetes_replication_controller.prometheus.metadata.0.labels.app}"
}

session_affinity = "ClientIP"

port {
port = "${var.prometheus-port}"
target_port = "${var.prometheus-port}"
}
}
}

resource "kubernetes_service" "trickster" {
metadata {
annotations {
prometheus_io_scrape = "${var.prometheus_io_scrape}"
prometheus_io_port = "${var.trickster_metrics_port}"
}

name = "trickster-${kubernetes_replication_controller.prometheus.metadata.0.name}"
namespace = "${kubernetes_namespace.prometheus.metadata.0.name}"
}

spec {
selector {
app = "${kubernetes_replication_controller.prometheus.metadata.0.labels.app}"
}

session_affinity = "ClientIP"

port {
name = "trickster-port"
port = "${var.trickster_port}"
target_port = "${var.trickster_port}"
}

port {
name = "trickster-metrics-port"
port = "${var.trickster_metrics_port}"
target_port = "${var.trickster_metrics_port}"
}
}
}

0 comments on commit 1ca715d

Please sign in to comment.