source: code/trunk/server.go@ 825

Last change on this file since 825 was 818, checked in by yakumo.izuru, 2 years ago

Do some tidying

Signed-off-by: Izuru Yakumo <yakumo.izuru@…>

File size: 9.8 KB
RevLine 
[804]1package suika
[1]2
3import (
[652]4 "context"
[656]5 "errors"
[1]6 "fmt"
[656]7 "io"
[37]8 "log"
[472]9 "mime"
[1]10 "net"
[323]11 "net/http"
[689]12 "runtime/debug"
[24]13 "sync"
[323]14 "sync/atomic"
[67]15 "time"
[1]16
[707]17 "github.com/prometheus/client_golang/prometheus"
[708]18 "github.com/prometheus/client_golang/prometheus/promauto"
[1]19 "gopkg.in/irc.v3"
[323]20 "nhooyr.io/websocket"
[370]21
[807]22 "marisa.chaotic.ninja/suika/config"
[1]23)
24
[67]25// TODO: make configurable
[818]26var (
27 retryConnectMinDelay = time.Minute
28 retryConnectMaxDelay = 10 * time.Minute
29 retryConnectJitter = time.Minute
30 connectTimeout = 15 * time.Second
31 writeTimeout = 10 * time.Second
32 upstreamMessageDelay = 2 * time.Second
33 upstreamMessageBurst = 10
34 backlogTimeout = 10 * time.Second
35 handleDownstreamMessageTimeout = 10 * time.Second
36 downstreamRegisterTimeout = 30 * time.Second
37 chatHistoryLimit = 1000
38 backlogLimit = 4000
39)
[67]40
[9]41type Logger interface {
42 Printf(format string, v ...interface{})
[747]43 Debugf(format string, v ...interface{})
[9]44}
45
[747]46type logger struct {
47 *log.Logger
48 debug bool
49}
50
51func (l logger) Debugf(format string, v ...interface{}) {
52 if !l.debug {
53 return
54 }
55 l.Logger.Printf(format, v...)
56}
57
58func NewLogger(out io.Writer, debug bool) Logger {
59 return logger{
60 Logger: log.New(log.Writer(), "", log.LstdFlags),
61 debug: debug,
62 }
63}
64
[21]65type prefixLogger struct {
66 logger Logger
67 prefix string
68}
69
70var _ Logger = (*prefixLogger)(nil)
71
72func (l *prefixLogger) Printf(format string, v ...interface{}) {
73 v = append([]interface{}{l.prefix}, v...)
74 l.logger.Printf("%v"+format, v...)
75}
76
[747]77func (l *prefixLogger) Debugf(format string, v ...interface{}) {
78 v = append([]interface{}{l.prefix}, v...)
79 l.logger.Debugf("%v"+format, v...)
80}
81
[709]82type int64Gauge struct {
83 v int64 // atomic
84}
85
86func (g *int64Gauge) Add(delta int64) {
87 atomic.AddInt64(&g.v, delta)
88}
89
90func (g *int64Gauge) Value() int64 {
91 return atomic.LoadInt64(&g.v)
92}
93
94func (g *int64Gauge) Float64() float64 {
95 return float64(g.Value())
96}
97
[766]98type retryListener struct {
99 net.Listener
100 Logger Logger
101
102 delay time.Duration
103}
104
105func (ln *retryListener) Accept() (net.Conn, error) {
106 for {
107 conn, err := ln.Listener.Accept()
108 if ne, ok := err.(net.Error); ok && ne.Temporary() {
109 if ln.delay == 0 {
110 ln.delay = 5 * time.Millisecond
111 } else {
112 ln.delay *= 2
113 }
114 if max := 1 * time.Second; ln.delay > max {
115 ln.delay = max
116 }
117 if ln.Logger != nil {
118 ln.Logger.Printf("accept error (retrying in %v): %v", ln.delay, err)
119 }
120 time.Sleep(ln.delay)
121 } else {
122 ln.delay = 0
123 return conn, err
124 }
125 }
126}
127
[691]128type Config struct {
[612]129 Hostname string
[662]130 Title string
[612]131 LogPath string
132 HTTPOrigins []string
133 AcceptProxyIPs config.IPSet
134 MaxUserNetworks int
[694]135 MultiUpstream bool
[691]136 MOTD string
[705]137 UpstreamUserIPs []*net.IPNet
[691]138}
[22]139
[691]140type Server struct {
[707]141 Logger Logger
142 Identd *Identd // can be nil
143 MetricsRegistry prometheus.Registerer // can be nil
[691]144
[709]145 config atomic.Value // *Config
146 db Database
147 stopWG sync.WaitGroup
[77]148
[449]149 lock sync.Mutex
150 listeners map[net.Listener]struct{}
151 users map[string]*user
[709]152
153 metrics struct {
154 downstreams int64Gauge
[710]155 upstreams int64Gauge
[711]156
157 upstreamOutMessagesTotal prometheus.Counter
158 upstreamInMessagesTotal prometheus.Counter
159 downstreamOutMessagesTotal prometheus.Counter
160 downstreamInMessagesTotal prometheus.Counter
[734]161
162 upstreamConnectErrorsTotal prometheus.Counter
[709]163 }
[10]164}
165
[531]166func NewServer(db Database) *Server {
[636]167 srv := &Server{
[747]168 Logger: NewLogger(log.Writer(), true),
[691]169 db: db,
170 listeners: make(map[net.Listener]struct{}),
171 users: make(map[string]*user),
[37]172 }
[694]173 srv.config.Store(&Config{
174 Hostname: "localhost",
175 MaxUserNetworks: -1,
176 MultiUpstream: true,
177 })
[636]178 return srv
[37]179}
180
[5]181func (s *Server) prefix() *irc.Prefix {
[691]182 return &irc.Prefix{Name: s.Config().Hostname}
[5]183}
184
[691]185func (s *Server) Config() *Config {
186 return s.config.Load().(*Config)
187}
188
189func (s *Server) SetConfig(cfg *Config) {
190 s.config.Store(cfg)
191}
192
[449]193func (s *Server) Start() error {
[708]194 s.registerMetrics()
195
[652]196 users, err := s.db.ListUsers(context.TODO())
[77]197 if err != nil {
198 return err
199 }
[71]200
[77]201 s.lock.Lock()
[378]202 for i := range users {
203 s.addUserLocked(&users[i])
[71]204 }
[37]205 s.lock.Unlock()
206
[449]207 return nil
[10]208}
209
[708]210func (s *Server) registerMetrics() {
211 factory := promauto.With(s.MetricsRegistry)
212
213 factory.NewGaugeFunc(prometheus.GaugeOpts{
[804]214 Name: "suika_users_active",
[708]215 Help: "Current number of active users",
216 }, func() float64 {
217 s.lock.Lock()
218 n := len(s.users)
219 s.lock.Unlock()
220 return float64(n)
221 })
222
223 factory.NewGaugeFunc(prometheus.GaugeOpts{
[804]224 Name: "suika_downstreams_active",
[708]225 Help: "Current number of downstream connections",
[709]226 }, s.metrics.downstreams.Float64)
[710]227
228 factory.NewGaugeFunc(prometheus.GaugeOpts{
[804]229 Name: "suika_upstreams_active",
[710]230 Help: "Current number of upstream connections",
231 }, s.metrics.upstreams.Float64)
[711]232
233 s.metrics.upstreamOutMessagesTotal = factory.NewCounter(prometheus.CounterOpts{
[804]234 Name: "suika_upstream_out_messages_total",
[711]235 Help: "Total number of outgoing messages sent to upstream servers",
236 })
237
238 s.metrics.upstreamInMessagesTotal = factory.NewCounter(prometheus.CounterOpts{
[804]239 Name: "suika_upstream_in_messages_total",
[711]240 Help: "Total number of incoming messages received from upstream servers",
241 })
242
243 s.metrics.downstreamOutMessagesTotal = factory.NewCounter(prometheus.CounterOpts{
[804]244 Name: "suika_downstream_out_messages_total",
[711]245 Help: "Total number of outgoing messages sent to downstream clients",
246 })
247
248 s.metrics.downstreamInMessagesTotal = factory.NewCounter(prometheus.CounterOpts{
[804]249 Name: "suika_downstream_in_messages_total",
[711]250 Help: "Total number of incoming messages received from downstream clients",
251 })
[734]252
253 s.metrics.upstreamConnectErrorsTotal = factory.NewCounter(prometheus.CounterOpts{
[804]254 Name: "suika_upstream_connect_errors_total",
[734]255 Help: "Total number of upstream connection errors",
256 })
[708]257}
258
[449]259func (s *Server) Shutdown() {
260 s.lock.Lock()
261 for ln := range s.listeners {
262 if err := ln.Close(); err != nil {
263 s.Logger.Printf("failed to stop listener: %v", err)
264 }
265 }
266 for _, u := range s.users {
267 u.events <- eventStop{}
268 }
269 s.lock.Unlock()
270
271 s.stopWG.Wait()
[599]272
273 if err := s.db.Close(); err != nil {
274 s.Logger.Printf("failed to close DB: %v", err)
275 }
[449]276}
277
[680]278func (s *Server) createUser(ctx context.Context, user *User) (*user, error) {
[329]279 s.lock.Lock()
280 defer s.lock.Unlock()
281
282 if _, ok := s.users[user.Username]; ok {
283 return nil, fmt.Errorf("user %q already exists", user.Username)
284 }
285
[680]286 err := s.db.StoreUser(ctx, user)
[329]287 if err != nil {
288 return nil, fmt.Errorf("could not create user in db: %v", err)
289 }
290
[378]291 return s.addUserLocked(user), nil
[329]292}
293
[563]294func (s *Server) forEachUser(f func(*user)) {
295 s.lock.Lock()
296 for _, u := range s.users {
297 f(u)
298 }
299 s.lock.Unlock()
300}
301
[38]302func (s *Server) getUser(name string) *user {
303 s.lock.Lock()
304 u := s.users[name]
305 s.lock.Unlock()
306 return u
307}
308
[378]309func (s *Server) addUserLocked(user *User) *user {
310 s.Logger.Printf("starting bouncer for user %q", user.Username)
311 u := newUser(s, user)
312 s.users[u.Username] = u
313
[449]314 s.stopWG.Add(1)
315
[378]316 go func() {
[689]317 defer func() {
318 if err := recover(); err != nil {
319 s.Logger.Printf("panic serving user %q: %v\n%v", user.Username, err, debug.Stack())
320 }
[756]321
322 s.lock.Lock()
323 delete(s.users, u.Username)
324 s.lock.Unlock()
325
326 s.stopWG.Done()
[689]327 }()
328
[378]329 u.run()
330 }()
331
332 return u
333}
334
[323]335var lastDownstreamID uint64 = 0
336
[347]337func (s *Server) handle(ic ircConn) {
[689]338 defer func() {
339 if err := recover(); err != nil {
340 s.Logger.Printf("panic serving downstream %q: %v\n%v", ic.RemoteAddr(), err, debug.Stack())
341 }
342 }()
343
[709]344 s.metrics.downstreams.Add(1)
[323]345 id := atomic.AddUint64(&lastDownstreamID, 1)
[347]346 dc := newDownstreamConn(s, ic, id)
[323]347 if err := dc.runUntilRegistered(); err != nil {
[655]348 if !errors.Is(err, io.EOF) {
[746]349 dc.logger.Printf("%v", err)
[655]350 }
[323]351 } else {
352 dc.user.events <- eventDownstreamConnected{dc}
353 if err := dc.readMessages(dc.user.events); err != nil {
[746]354 dc.logger.Printf("%v", err)
[323]355 }
356 dc.user.events <- eventDownstreamDisconnected{dc}
357 }
358 dc.Close()
[709]359 s.metrics.downstreams.Add(-1)
[323]360}
361
[3]362func (s *Server) Serve(ln net.Listener) error {
[766]363 ln = &retryListener{
364 Listener: ln,
365 Logger: &prefixLogger{logger: s.Logger, prefix: fmt.Sprintf("listener %v: ", ln.Addr())},
366 }
367
[449]368 s.lock.Lock()
369 s.listeners[ln] = struct{}{}
370 s.lock.Unlock()
371
372 s.stopWG.Add(1)
373
374 defer func() {
375 s.lock.Lock()
376 delete(s.listeners, ln)
377 s.lock.Unlock()
378
379 s.stopWG.Done()
380 }()
381
[1]382 for {
[323]383 conn, err := ln.Accept()
[601]384 if isErrClosed(err) {
[449]385 return nil
386 } else if err != nil {
[1]387 return fmt.Errorf("failed to accept connection: %v", err)
388 }
389
[347]390 go s.handle(newNetIRCConn(conn))
[1]391 }
392}
[323]393
394func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
395 conn, err := websocket.Accept(w, req, &websocket.AcceptOptions{
[597]396 Subprotocols: []string{"text.ircv3.net"}, // non-compliant, fight me
[691]397 OriginPatterns: s.Config().HTTPOrigins,
[323]398 })
399 if err != nil {
400 s.Logger.Printf("failed to serve HTTP connection: %v", err)
401 return
402 }
[345]403
[370]404 isProxy := false
[345]405 if host, _, err := net.SplitHostPort(req.RemoteAddr); err == nil {
406 if ip := net.ParseIP(host); ip != nil {
[691]407 isProxy = s.Config().AcceptProxyIPs.Contains(ip)
[345]408 }
409 }
410
[474]411 // Only trust the Forwarded header field if this is a trusted proxy IP
[345]412 // to prevent users from spoofing the remote address
[344]413 remoteAddr := req.RemoteAddr
[472]414 if isProxy {
415 forwarded := parseForwarded(req.Header)
[473]416 if forwarded["for"] != "" {
417 remoteAddr = forwarded["for"]
[472]418 }
[344]419 }
[345]420
[347]421 s.handle(newWebsocketIRCConn(conn, remoteAddr))
[323]422}
[472]423
424func parseForwarded(h http.Header) map[string]string {
425 forwarded := h.Get("Forwarded")
426 if forwarded == "" {
[474]427 return map[string]string{
428 "for": h.Get("X-Forwarded-For"),
429 "proto": h.Get("X-Forwarded-Proto"),
430 "host": h.Get("X-Forwarded-Host"),
431 }
[472]432 }
433 // Hack to easily parse header parameters
434 _, params, _ := mime.ParseMediaType("hack; " + forwarded)
435 return params
436}
[605]437
438type ServerStats struct {
439 Users int
440 Downstreams int64
[710]441 Upstreams int64
[605]442}
443
444func (s *Server) Stats() *ServerStats {
445 var stats ServerStats
446 s.lock.Lock()
447 stats.Users = len(s.users)
448 s.lock.Unlock()
[709]449 stats.Downstreams = s.metrics.downstreams.Value()
[710]450 stats.Upstreams = s.metrics.upstreams.Value()
[605]451 return &stats
452}
Note: See TracBrowser for help on using the repository browser.