-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.go
More file actions
119 lines (98 loc) · 2.78 KB
/
main.go
File metadata and controls
119 lines (98 loc) · 2.78 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
package main
import (
"context"
"flag"
"log/slog"
"net/http"
"os"
"os/signal"
"syscall"
"time"
"github.com/yourorg/loadbalancer/core"
"github.com/yourorg/loadbalancer/observability"
//"github.com/yourorg/loadbalancer/routing"
)
func main() {
cfgPath := flag.String("config", "config.json", "path to config file")
flag.Parse()
logger := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
cfg, err := core.LoadConfig(*cfgPath)
if err != nil {
logger.Error("failed to load config", "error", err)
os.Exit(1)
}
logger.Info("starting load balancer",
"addr", cfg.ListenAddr,
"algorithm", cfg.Algorithm,
"backends", len(cfg.Backends),
)
var backends []*core.Backend
var backendURLs []string
for _, bc := range cfg.Backends {
b, err := core.NewBackend(bc.URL, bc.Weight)
if err != nil {
logger.Error("invalid backend URL", "url", bc.URL, "error", err)
os.Exit(1)
}
backends = append(backends, b)
backendURLs = append(backendURLs, bc.URL)
}
pool := core.NewPool(backends)
collector := observability.NewCollector(backendURLs)
sticky := routing.NewStickySessionStore(cfg.StickySession)
algo := core.NewAlgorithm(cfg.Algorithm, pool)
logger.Info("algorithm selected", "name", algo.Name())
lb := core.NewLoadBalancer(core.Options{
Pool: pool,
Algorithm: algo,
Collector: collector,
Sticky: sticky,
StickyEnabled: cfg.Stickysession.Enabled,
Logger: logger,
})
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if cfg.HealthCheck.Enabled {
checker := observability.NewChecker(pool, cfg.HealthCheck, logger)
go checker.Start(ctx)
}
mux := http.NewServeMux()
if cfg.Metrics.Enabled {
mux.Handle(cfg.Metrics.Path, collector.Handler())
logger.Info("metrics endpoint enabled succassfully", "path", cfg.Metrics.Path)
}
mux.Handle("/", observability.Logger(logger, lb))
//timing
srv := &http.Server{
Addr: cfg.ListenAddr,
Handler: mux,
ReadTimeout: 15 * time.Second,
WriteTimeout: 30 * time.Second,
IdleTimeout: 60 * time.Second,
}
quit := make(chan os.Signal, 1)
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
go func() {
<-quit
logger.Info("shutting down...")
cancel()
sdCtx, sdCancel := context.WithTimeout(context.Background(), 10*time.Second)
defer sdCancel()
if err := srv.Shutdown(sdCtx); err != nil {
logger.Error("shutdown error", "error", err)
}
}()
if cfg.TLS.Enabled {
logger.Info("TLS enabled", "cert", cfg.TLS.CertFile)
err = srv.ListenAndServeTLS(cfg.TLS.CertFile, cfg.TLS.KeyFile)
} else {
err = srv.ListenAndServe()
}
if err != nil && err != http.ErrServerClosed {
logger.Error("server error", "error", err)
os.Exit(1)
}
logger.Info("load balancer stopped! restart")
}