-
Notifications
You must be signed in to change notification settings - Fork 30
/
main.go
104 lines (88 loc) · 3.14 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
package main
import (
"context"
"fmt"
"log/slog"
"os"
"strconv"
"sync"
"time"
"github.com/artie-labs/transfer/lib/config"
"github.com/artie-labs/transfer/lib/config/constants"
"github.com/artie-labs/transfer/lib/cryptography"
"github.com/artie-labs/transfer/lib/destination"
"github.com/artie-labs/transfer/lib/destination/utils"
"github.com/artie-labs/transfer/lib/logger"
"github.com/artie-labs/transfer/lib/telemetry/metrics"
"github.com/artie-labs/transfer/models"
"github.com/artie-labs/transfer/processes/consumer"
"github.com/artie-labs/transfer/processes/pool"
)
func main() {
// Parse args into settings
settings, err := config.LoadSettings(os.Args, true)
if err != nil {
logger.Fatal("Failed to initialize config", slog.Any("err", err))
}
// Initialize default logger
_logger, cleanUpHandlers := logger.NewLogger(settings.VerboseLogging, settings.Config.Reporting.Sentry)
slog.SetDefault(_logger)
defer cleanUpHandlers()
// This is used to prevent all the instances from starting at the same time and causing a thundering herd problem
if value := os.Getenv("MAX_INIT_SLEEP_SECONDS"); value != "" {
castedValue, err := strconv.ParseInt(value, 10, 64)
if err != nil {
logger.Fatal("Failed to parse sleep duration", slog.Any("err", err), slog.String("value", value))
}
randomSeconds, err := cryptography.RandomInt64n(castedValue)
if err != nil {
logger.Fatal("Failed to generate random number", slog.Any("err", err))
}
duration := time.Duration(randomSeconds) * time.Second
slog.Info(fmt.Sprintf("Sleeping for %s before any data processing to prevent overwhelming Kafka", duration.String()))
time.Sleep(duration)
}
slog.Info("Config is loaded",
slog.Int("flushIntervalSeconds", settings.Config.FlushIntervalSeconds),
slog.Uint64("bufferPoolSize", uint64(settings.Config.BufferRows)),
slog.Int("flushPoolSizeKb", settings.Config.FlushSizeKb),
)
ctx := context.Background()
metricsClient := metrics.LoadExporter(settings.Config)
var dest destination.Baseline
if utils.IsOutputBaseline(settings.Config) {
dest, err = utils.LoadBaseline(settings.Config)
if err != nil {
logger.Fatal("Unable to load baseline destination", slog.Any("err", err))
}
} else {
dwh, err := utils.LoadDataWarehouse(settings.Config, nil)
if err != nil {
logger.Fatal("Unable to load data warehouse destination", slog.Any("err", err))
}
if err = dwh.SweepTemporaryTables(ctx); err != nil {
logger.Fatal("Failed to clean up temporary tables", slog.Any("err", err))
}
dest = dwh
}
inMemDB := models.NewMemoryDB()
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
pool.StartPool(ctx, inMemDB, dest, metricsClient, time.Duration(settings.Config.FlushIntervalSeconds)*time.Second)
}()
wg.Add(1)
go func(ctx context.Context) {
defer wg.Done()
switch settings.Config.Queue {
case constants.Kafka:
consumer.StartConsumer(ctx, settings.Config, inMemDB, dest, metricsClient)
case constants.PubSub:
consumer.StartSubscriber(ctx, settings.Config, inMemDB, dest, metricsClient)
default:
logger.Fatal(fmt.Sprintf("Message queue: %s not supported", settings.Config.Queue))
}
}(ctx)
wg.Wait()
}