From 4b74a444f19ca9dc902942b77bf1b3c9c2c81a34 Mon Sep 17 00:00:00 2001 From: Konstantin Suvorov Date: Mon, 28 Aug 2017 11:56:49 +0300 Subject: [PATCH] Reconnect to MongoDB if multiple failures Implemented https://github.com/logstash-plugins/logstash-output-mongodb/pull/29 --- lib/logstash/outputs/mongodb.rb | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lib/logstash/outputs/mongodb.rb b/lib/logstash/outputs/mongodb.rb index eb1a86e..12cb312 100644 --- a/lib/logstash/outputs/mongodb.rb +++ b/lib/logstash/outputs/mongodb.rb @@ -34,6 +34,9 @@ class LogStash::Outputs::Mongodb < LogStash::Outputs::Base # "_id" field in the event. config :generateId, :validate => :boolean, :default => false + # Number of insert attempts before reconnect to MongoDB + config :retries_before_reconnect, :validate => :number, :default => 3, :required => false + # Bulk insert flag, set to true to allow bulk insertion, else it will insert events one by one. config :bulk, :validate => :boolean, :default => false @@ -72,6 +75,7 @@ def register end # def register def receive(event) + retries = 0 begin # Our timestamp object now has a to_bson method, using it here # {}.merge(other) so we don't taint the event hash innards @@ -111,6 +115,11 @@ def receive(event) # to fix the issue. else sleep @retry_delay + retries += 1 + if @retries_before_reconnect == retries + register + retries = 0 + end retry end end