From a64c126267955bca7ff223c0b2a3461c480e0146 Mon Sep 17 00:00:00 2001 From: Jeremy Kitchen Date: Sat, 16 Nov 2013 22:35:10 +0000 Subject: [PATCH] port to ubuntu --- README.md | 2 + manifests/params.pp | 47 ++++- templates/gmetad.conf.el6.erb | 6 +- templates/gmond.conf.ubuntu.erb | 361 ++++++++++++++++++++++++++++++++ 4 files changed, 405 insertions(+), 11 deletions(-) create mode 100644 templates/gmond.conf.ubuntu.erb diff --git a/README.md b/README.md index 88e7c40..14b915d 100644 --- a/README.md +++ b/README.md @@ -108,6 +108,8 @@ It has been tested on: * el5.x * el6.x +* ubuntu 12.04 +* ubuntu 13.04 `$::osfamily == RedHat` and EPEL packages ----------------------------------------- diff --git a/manifests/params.pp b/manifests/params.pp index 76be5d4..9fd0d83 100644 --- a/manifests/params.pp +++ b/manifests/params.pp @@ -12,19 +12,23 @@ # class ganglia::params { - $gmond_package_name = 'ganglia-gmond' - $gmond_service_name = 'gmond' - $gmetad_package_name = 'ganglia-gmetad' - $gmetad_service_name = 'gmetad' - - # paths are the same for el5.x & el6.x - $web_package_name = 'ganglia-web' - $web_php_config = '/etc/ganglia/conf.php' + # files are the same for ubuntu and el5/el6 $web_php_erb = 'ganglia/conf.php.el6.erb' case $::osfamily { redhat: { + $gmond_package_name = 'ganglia-gmond' + $gmond_service_name = 'gmond' + + $gmetad_package_name = 'ganglia-gmetad' + $gmetad_service_name = 'gmetad' + $gmetad_user = 'ganglia' + + # paths are the same for el5.x & el6.x + $web_package_name = 'ganglia-web' + $web_php_config = '/etc/ganglia/conf.php' + case $::operatingsystemmajrelease { # the epel packages change uid/gids + install paths between 5 & 6 5: { @@ -50,6 +54,33 @@ } } } + debian: { + case $::operatingsystem { + ubuntu: { + # I use ubuntu 12.04 and 13.04, have not tested others + $gmond_package_name = 'ganglia-monitor' + $gmond_service_name = 'gangilia-monitor' + + $gmetad_package_name = 'gmetad' + $gmetad_service_name = 'gmetad' + $gmetad_user = 'nobody' + + $web_package_name = 'ganglia-webfrontend' + $web_php_config = '/usr/share/ganglia-webfrontend/conf.php' + + $gmond_service_config = '/etc/ganglia/gmond.conf' + $gmond_service_erb = 'ganglia/gmond.conf.ubuntu.erb' + + $gmetad_service_config = '/etc/ganglia/gmetad.conf' + # it's the same file as el6 with only the default user comment changed + $gmetad_service_erb = 'ganglia/gmetad.conf.el6.erb' + } + default: { + fail("Module ${module_name} is not supported on osfamily/operatingsystem: ${::osfamily}/${::operatingsystem}") + } + } + } + default: { fail("Module ${module_name} is not supported on ${::operatingsystem}") } diff --git a/templates/gmetad.conf.el6.erb b/templates/gmetad.conf.el6.erb index bf13249..f78b036 100644 --- a/templates/gmetad.conf.el6.erb +++ b/templates/gmetad.conf.el6.erb @@ -99,9 +99,9 @@ gridname "<%= @gridname %>" # setuid off # #------------------------------------------------------------------------------- -# User gmetad will setuid to (defaults to "ganglia") -# default: "ganglia" -# setuid_username "ganglia" +# User gmetad will setuid to (defaults to "<%= @gmetad_user %>") +# default: "<%= @gmetad_user %>" +# setuid_username "<%= @gmetad_user %>" # #------------------------------------------------------------------------------- # The port gmetad will answer requests for XML diff --git a/templates/gmond.conf.ubuntu.erb b/templates/gmond.conf.ubuntu.erb new file mode 100644 index 0000000..2937386 --- /dev/null +++ b/templates/gmond.conf.ubuntu.erb @@ -0,0 +1,361 @@ +/* This configuration is as close to 2.5.x default behavior as possible + The values closely match ./gmond/metric.h definitions in 2.5.x */ +globals { + daemonize = yes + setuid = yes + user = ganglia + debug_level = 0 + max_udp_msg_len = 1472 + mute = no + deaf = no + host_dmax = 0 /*secs */ + cleanup_threshold = 300 /*secs */ + gexec = no + send_metadata_interval = 0 +} + +/* If a cluster attribute is specified, then all gmond hosts are wrapped inside + * of a tag. If you do not specify a cluster tag, then all will + * NOT be wrapped inside of a tag. */ +cluster { + name = "<%= @cluster_name %>" + owner = "<%= @cluster_owner %>" + latlong = "<%= @cluster_latlong %>" + url = "<%= @cluster_url %>" +} + +/* The host section describes attributes of the host, like the location */ +host { + location = "<%= @host_location %>" +} + +/* Feel free to specify as many udp_send_channels as you like. Gmond + used to only support having a single channel */ +<% @udp_send_channel.each do |channel| -%> +udp_send_channel { + <%- if channel['mcast_join'] then -%> + mcast_join = <%= channel['mcast_join'] %> + <%- end -%> + <%- if channel['host'] then -%> + host = <%= channel['host'] %> + <%- end -%> + <%- if channel['port'] then -%> + port = <%= channel['port'] %> + <%- end -%> + <%- if channel['ttl'] then -%> + ttl = <%= channel['ttl'] %> + <%- end -%> +} + +<% end -%> +/* You can specify as many udp_recv_channels as you like as well. */ +<% @udp_recv_channel.each do |channel| -%> +udp_recv_channel { + <%- if channel['mcast_join'] then -%> + mcast_join = <%= channel['mcast_join'] %> + <%- end -%> + <%- if channel['port'] then -%> + port = <%= channel['port'] %> + <%- end -%> + <%- if channel['bind'] then -%> + bind = <%= channel['bind'] %> + <%- end -%> +} + +<% end -%> +/* You can specify as many tcp_accept_channels as you like to share + an xml description of the state of the cluster */ +<% @tcp_accept_channel.each do |channel| -%> +tcp_accept_channel { + <%- if channel['port'] then -%> + port = <%= channel['port'] %> + <%- end -%> +} + +<% end -%> + +/* Each metrics module that is referenced by gmond must be specified and + loaded. If the module has been statically linked with gmond, it does not + require a load path. However all dynamically loadable modules must include + a load path. */ +modules { + module { + name = "core_metrics" + } + module { + name = "cpu_module" + path = "/usr/lib/ganglia/modcpu.so" + } + module { + name = "disk_module" + path = "/usr/lib/ganglia/moddisk.so" + } + module { + name = "load_module" + path = "/usr/lib/ganglia/modload.so" + } + module { + name = "mem_module" + path = "/usr/lib/ganglia/modmem.so" + } + module { + name = "net_module" + path = "/usr/lib/ganglia/modnet.so" + } + module { + name = "proc_module" + path = "/usr/lib/ganglia/modproc.so" + } + module { + name = "sys_module" + path = "/usr/lib/ganglia/modsys.so" + } +} + +include ('/etc/ganglia/conf.d/*.conf') + + +/* The old internal 2.5.x metric array has been replaced by the following + collection_group directives. What follows is the default behavior for + collecting and sending metrics that is as close to 2.5.x behavior as + possible. */ + +/* This collection group will cause a heartbeat (or beacon) to be sent every + 20 seconds. In the heartbeat is the GMOND_STARTED data which expresses + the age of the running gmond. */ +collection_group { + collect_once = yes + time_threshold = 20 + metric { + name = "heartbeat" + } +} + +/* This collection group will send general info about this host every 1200 secs. + This information doesn't change between reboots and is only collected once. */ +collection_group { + collect_once = yes + time_threshold = 1200 + metric { + name = "cpu_num" + title = "CPU Count" + } + metric { + name = "cpu_speed" + title = "CPU Speed" + } + metric { + name = "mem_total" + title = "Memory Total" + } + /* Should this be here? Swap can be added/removed between reboots. */ + metric { + name = "swap_total" + title = "Swap Space Total" + } + metric { + name = "boottime" + title = "Last Boot Time" + } + metric { + name = "machine_type" + title = "Machine Type" + } + metric { + name = "os_name" + title = "Operating System" + } + metric { + name = "os_release" + title = "Operating System Release" + } + metric { + name = "location" + title = "Location" + } +} + +/* This collection group will send the status of gexecd for this host every 300 secs */ +/* Unlike 2.5.x the default behavior is to report gexecd OFF. */ +collection_group { + collect_once = yes + time_threshold = 300 + metric { + name = "gexec" + title = "Gexec Status" + } +} + +/* This collection group will collect the CPU status info every 20 secs. + The time threshold is set to 90 seconds. In honesty, this time_threshold could be + set significantly higher to reduce unneccessary network chatter. */ +collection_group { + collect_every = 20 + time_threshold = 90 + /* CPU status */ + metric { + name = "cpu_user" + value_threshold = "1.0" + title = "CPU User" + } + metric { + name = "cpu_system" + value_threshold = "1.0" + title = "CPU System" + } + metric { + name = "cpu_idle" + value_threshold = "5.0" + title = "CPU Idle" + } + metric { + name = "cpu_nice" + value_threshold = "1.0" + title = "CPU Nice" + } + metric { + name = "cpu_aidle" + value_threshold = "5.0" + title = "CPU aidle" + } + metric { + name = "cpu_wio" + value_threshold = "1.0" + title = "CPU wio" + } + /* The next two metrics are optional if you want more detail... + ... since they are accounted for in cpu_system. + metric { + name = "cpu_intr" + value_threshold = "1.0" + title = "CPU intr" + } + metric { + name = "cpu_sintr" + value_threshold = "1.0" + title = "CPU sintr" + } + */ +} + +collection_group { + collect_every = 20 + time_threshold = 90 + /* Load Averages */ + metric { + name = "load_one" + value_threshold = "1.0" + title = "One Minute Load Average" + } + metric { + name = "load_five" + value_threshold = "1.0" + title = "Five Minute Load Average" + } + metric { + name = "load_fifteen" + value_threshold = "1.0" + title = "Fifteen Minute Load Average" + } +} + +/* This group collects the number of running and total processes */ +collection_group { + collect_every = 80 + time_threshold = 950 + metric { + name = "proc_run" + value_threshold = "1.0" + title = "Total Running Processes" + } + metric { + name = "proc_total" + value_threshold = "1.0" + title = "Total Processes" + } +} + +/* This collection group grabs the volatile memory metrics every 40 secs and + sends them at least every 180 secs. This time_threshold can be increased + significantly to reduce unneeded network traffic. */ +collection_group { + collect_every = 40 + time_threshold = 180 + metric { + name = "mem_free" + value_threshold = "1024.0" + title = "Free Memory" + } + metric { + name = "mem_shared" + value_threshold = "1024.0" + title = "Shared Memory" + } + metric { + name = "mem_buffers" + value_threshold = "1024.0" + title = "Memory Buffers" + } + metric { + name = "mem_cached" + value_threshold = "1024.0" + title = "Cached Memory" + } + metric { + name = "swap_free" + value_threshold = "1024.0" + title = "Free Swap Space" + } +} + +collection_group { + collect_every = 40 + time_threshold = 300 + metric { + name = "bytes_out" + value_threshold = 4096 + title = "Bytes Sent" + } + metric { + name = "bytes_in" + value_threshold = 4096 + title = "Bytes Received" + } + metric { + name = "pkts_in" + value_threshold = 256 + title = "Packets Received" + } + metric { + name = "pkts_out" + value_threshold = 256 + title = "Packets Sent" + } +} + +/* Different than 2.5.x default since the old config made no sense */ +collection_group { + collect_every = 1800 + time_threshold = 3600 + metric { + name = "disk_total" + value_threshold = 1.0 + title = "Total Disk Space" + } +} + +collection_group { + collect_every = 40 + time_threshold = 180 + metric { + name = "disk_free" + value_threshold = 1.0 + title = "Disk Space Available" + } + metric { + name = "part_max_used" + value_threshold = 1.0 + title = "Maximum Disk Space Used" + } +} +