aboutsummaryrefslogtreecommitdiff
path: root/internal/monitor/scheduler.go
blob: 5a7e817415da9355a871f0ad9ea9f271d4fd42c6 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
package monitor

import (
	"context"
	"log/slog"
	"sync"
	"time"

	"github.com/Fuwn/kaze/internal/config"
	"github.com/Fuwn/kaze/internal/storage"
)

// Scheduler manages and runs all monitors
type Scheduler struct {
	monitors []Monitor
	storage  *storage.Storage
	logger   *slog.Logger
	wg       sync.WaitGroup
	ctx      context.Context
	cancel   context.CancelFunc
}

// NewScheduler creates a new monitor scheduler
func NewScheduler(cfg *config.Config, store *storage.Storage, logger *slog.Logger) (*Scheduler, error) {
	ctx, cancel := context.WithCancel(context.Background())

	s := &Scheduler{
		storage: store,
		logger:  logger,
		ctx:     ctx,
		cancel:  cancel,
	}

	// Create monitors from configuration
	for _, group := range cfg.Groups {
		for _, monCfg := range group.Monitors {
			mon, err := New(monCfg)
			if err != nil {
				cancel()
				return nil, err
			}
			s.monitors = append(s.monitors, mon)
			logger.Info("registered monitor",
				"name", mon.Name(),
				"type", mon.Type(),
				"target", mon.Target(),
				"interval", mon.Interval())
		}
	}

	return s, nil
}

// Start begins running all monitors
func (s *Scheduler) Start() {
	s.logger.Info("starting scheduler", "monitors", len(s.monitors))

	for _, mon := range s.monitors {
		s.wg.Add(1)
		go s.runMonitor(mon)
	}

	// Start cleanup routine
	s.wg.Add(1)
	go s.runCleanup()
}

// Stop gracefully stops all monitors
func (s *Scheduler) Stop() {
	s.logger.Info("stopping scheduler")
	s.cancel()
	s.wg.Wait()
	s.logger.Info("scheduler stopped")
}

// runMonitor runs a single monitor in a loop
func (s *Scheduler) runMonitor(mon Monitor) {
	defer s.wg.Done()

	// Run immediately on start
	s.executeCheck(mon)

	ticker := time.NewTicker(mon.Interval())
	defer ticker.Stop()

	for {
		select {
		case <-s.ctx.Done():
			s.logger.Info("monitor stopped", "name", mon.Name())
			return
		case <-ticker.C:
			s.executeCheck(mon)
		}
	}
}

// executeCheck performs a single check and saves the result
func (s *Scheduler) executeCheck(mon Monitor) {
	// Create a context with timeout for this check
	checkCtx, cancel := context.WithTimeout(s.ctx, mon.Interval())
	defer cancel()

	var result *Result
	retries := mon.Retries()

	// Try the check, with retries if configured
	for attempt := 0; attempt <= retries; attempt++ {
		result = mon.Check(checkCtx)

		// If check succeeded (up or degraded), no need to retry
		if result.Status == StatusUp || result.Status == StatusDegraded {
			break
		}

		// If this wasn't the last attempt, log and retry
		if attempt < retries {
			s.logger.Debug("check failed, retrying",
				"name", mon.Name(),
				"attempt", attempt+1,
				"max_retries", retries,
				"error", result.Error)

			// Small delay before retry (500ms)
			select {
			case <-checkCtx.Done():
				// Context cancelled, abort retries
				break
			case <-time.After(500 * time.Millisecond):
				// Continue to next retry
			}
		}
	}

	// Log the result
	logAttrs := []any{
		"name", mon.Name(),
		"status", result.Status,
		"response_time", result.ResponseTime,
	}
	if result.StatusCode > 0 {
		logAttrs = append(logAttrs, "status_code", result.StatusCode)
	}
	if result.SSLDaysLeft > 0 {
		logAttrs = append(logAttrs, "ssl_days_left", result.SSLDaysLeft)
	}
	if result.Error != nil {
		logAttrs = append(logAttrs, "error", result.Error)
	}

	if result.Status == StatusUp {
		s.logger.Debug("check completed", logAttrs...)
	} else {
		s.logger.Warn("check completed", logAttrs...)
	}

	// Save to storage
	if err := s.storage.SaveCheckResult(s.ctx, result.ToCheckResult()); err != nil {
		s.logger.Error("failed to save check result",
			"name", mon.Name(),
			"error", err)
	}
}

// runCleanup periodically cleans up old data
func (s *Scheduler) runCleanup() {
	defer s.wg.Done()

	// Run cleanup daily
	ticker := time.NewTicker(24 * time.Hour)
	defer ticker.Stop()

	for {
		select {
		case <-s.ctx.Done():
			return
		case <-ticker.C:
			s.logger.Info("running database cleanup")
			if err := s.storage.Cleanup(s.ctx); err != nil {
				s.logger.Error("cleanup failed", "error", err)
			} else {
				s.logger.Info("cleanup completed")
			}
		}
	}
}

// GetMonitors returns all registered monitors
func (s *Scheduler) GetMonitors() []Monitor {
	return s.monitors
}

// RunCheck manually triggers a check for a specific monitor
func (s *Scheduler) RunCheck(name string) *Result {
	for _, mon := range s.monitors {
		if mon.Name() == name {
			ctx, cancel := context.WithTimeout(context.Background(), mon.Interval())
			defer cancel()
			result := mon.Check(ctx)

			// Save the result
			if err := s.storage.SaveCheckResult(context.Background(), result.ToCheckResult()); err != nil {
				s.logger.Error("failed to save manual check result",
					"name", mon.Name(),
					"error", err)
			}

			return result
		}
	}
	return nil
}