2018-05-14 12:36:24 +00:00
|
|
|
// Copyright 2018 Prometheus Team
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2018-02-07 15:36:47 +00:00
|
|
|
package cluster
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2023-11-24 21:17:35 +00:00
|
|
|
"errors"
|
2018-06-05 12:28:49 +00:00
|
|
|
"fmt"
|
2024-11-06 09:09:57 +00:00
|
|
|
"log/slog"
|
2018-02-07 15:36:47 +00:00
|
|
|
"math/rand"
|
|
|
|
"net"
|
2018-02-27 09:37:56 +00:00
|
|
|
"sort"
|
2018-02-07 15:36:47 +00:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/hashicorp/memberlist"
|
|
|
|
"github.com/oklog/ulid"
|
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
|
|
|
)
|
|
|
|
|
2021-03-03 14:59:54 +00:00
|
|
|
// ClusterPeer represents a single Peer in a gossip cluster.
|
2021-02-19 19:02:06 +00:00
|
|
|
type ClusterPeer interface {
|
|
|
|
// Name returns the unique identifier of this peer in the cluster.
|
|
|
|
Name() string
|
|
|
|
// Status returns a status string representing the peer state.
|
|
|
|
Status() string
|
|
|
|
// Peers returns the peer nodes in the cluster.
|
2021-02-24 15:35:16 +00:00
|
|
|
Peers() []ClusterMember
|
2021-02-19 19:02:06 +00:00
|
|
|
}
|
|
|
|
|
2024-03-21 11:26:46 +00:00
|
|
|
// ClusterMember interface that represents node peers in a cluster.
|
2021-02-24 15:35:16 +00:00
|
|
|
type ClusterMember interface {
|
|
|
|
// Name returns the name of the node
|
|
|
|
Name() string
|
2021-02-19 19:02:06 +00:00
|
|
|
// Address returns the IP address of the node
|
|
|
|
Address() string
|
|
|
|
}
|
|
|
|
|
2021-02-25 16:00:49 +00:00
|
|
|
// ClusterChannel supports state broadcasting across peers.
|
|
|
|
type ClusterChannel interface {
|
|
|
|
Broadcast([]byte)
|
|
|
|
}
|
|
|
|
|
2018-02-07 15:36:47 +00:00
|
|
|
// Peer is a single peer in a gossip cluster.
|
|
|
|
type Peer struct {
|
|
|
|
mlist *memberlist.Memberlist
|
|
|
|
delegate *delegate
|
|
|
|
|
2018-07-09 09:16:04 +00:00
|
|
|
resolvedPeers []string
|
|
|
|
|
2018-02-07 15:36:47 +00:00
|
|
|
mtx sync.RWMutex
|
|
|
|
states map[string]State
|
|
|
|
stopc chan struct{}
|
2018-03-02 14:45:21 +00:00
|
|
|
readyc chan struct{}
|
|
|
|
|
2018-06-05 12:28:49 +00:00
|
|
|
peerLock sync.RWMutex
|
|
|
|
peers map[string]peer
|
|
|
|
failedPeers []peer
|
|
|
|
|
2018-11-23 08:47:13 +00:00
|
|
|
knownPeers []string
|
|
|
|
advertiseAddr string
|
|
|
|
|
2018-06-05 12:28:49 +00:00
|
|
|
failedReconnectionsCounter prometheus.Counter
|
|
|
|
reconnectionsCounter prometheus.Counter
|
2018-11-23 08:47:13 +00:00
|
|
|
failedRefreshCounter prometheus.Counter
|
|
|
|
refreshCounter prometheus.Counter
|
2018-06-05 12:28:49 +00:00
|
|
|
peerLeaveCounter prometheus.Counter
|
|
|
|
peerUpdateCounter prometheus.Counter
|
|
|
|
peerJoinCounter prometheus.Counter
|
|
|
|
|
2024-11-06 09:09:57 +00:00
|
|
|
logger *slog.Logger
|
2018-02-07 15:36:47 +00:00
|
|
|
}
|
|
|
|
|
2018-06-05 12:28:49 +00:00
|
|
|
// peer is an internal type used for bookkeeping. It holds the state of peers
|
|
|
|
// in the cluster.
|
|
|
|
type peer struct {
|
|
|
|
status PeerStatus
|
|
|
|
leaveTime time.Time
|
|
|
|
|
|
|
|
*memberlist.Node
|
|
|
|
}
|
|
|
|
|
|
|
|
// PeerStatus is the state that a peer is in.
|
|
|
|
type PeerStatus int
|
|
|
|
|
|
|
|
const (
|
|
|
|
StatusNone PeerStatus = iota
|
|
|
|
StatusAlive
|
|
|
|
StatusFailed
|
|
|
|
)
|
|
|
|
|
|
|
|
func (s PeerStatus) String() string {
|
|
|
|
switch s {
|
|
|
|
case StatusNone:
|
|
|
|
return "none"
|
|
|
|
case StatusAlive:
|
|
|
|
return "alive"
|
|
|
|
case StatusFailed:
|
|
|
|
return "failed"
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("unknown PeerStatus: %d", s))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-07 15:36:47 +00:00
|
|
|
const (
|
2018-06-05 12:28:49 +00:00
|
|
|
DefaultPushPullInterval = 60 * time.Second
|
|
|
|
DefaultGossipInterval = 200 * time.Millisecond
|
2022-03-25 16:59:51 +00:00
|
|
|
DefaultTCPTimeout = 10 * time.Second
|
2018-06-05 12:28:49 +00:00
|
|
|
DefaultProbeTimeout = 500 * time.Millisecond
|
|
|
|
DefaultProbeInterval = 1 * time.Second
|
|
|
|
DefaultReconnectInterval = 10 * time.Second
|
|
|
|
DefaultReconnectTimeout = 6 * time.Hour
|
2018-11-23 08:47:13 +00:00
|
|
|
DefaultRefreshInterval = 15 * time.Second
|
2021-02-05 17:06:47 +00:00
|
|
|
MaxGossipPacketSize = 1400
|
2018-02-07 15:36:47 +00:00
|
|
|
)
|
|
|
|
|
2018-07-09 09:16:04 +00:00
|
|
|
func Create(
|
2024-11-06 09:09:57 +00:00
|
|
|
l *slog.Logger,
|
2018-02-07 15:36:47 +00:00
|
|
|
reg prometheus.Registerer,
|
|
|
|
bindAddr string,
|
|
|
|
advertiseAddr string,
|
|
|
|
knownPeers []string,
|
|
|
|
waitIfEmpty bool,
|
|
|
|
pushPullInterval time.Duration,
|
|
|
|
gossipInterval time.Duration,
|
2018-05-14 07:22:04 +00:00
|
|
|
tcpTimeout time.Duration,
|
|
|
|
probeTimeout time.Duration,
|
|
|
|
probeInterval time.Duration,
|
2021-08-09 20:58:06 +00:00
|
|
|
tlsTransportConfig *TLSTransportConfig,
|
2021-11-10 16:40:48 +00:00
|
|
|
allowInsecureAdvertise bool,
|
2023-05-05 16:26:22 +00:00
|
|
|
label string,
|
2018-02-07 15:36:47 +00:00
|
|
|
) (*Peer, error) {
|
|
|
|
bindHost, bindPortStr, err := net.SplitHostPort(bindAddr)
|
|
|
|
if err != nil {
|
2023-11-24 21:17:35 +00:00
|
|
|
return nil, fmt.Errorf("invalid listen address: %w", err)
|
2018-02-07 15:36:47 +00:00
|
|
|
}
|
|
|
|
bindPort, err := strconv.Atoi(bindPortStr)
|
|
|
|
if err != nil {
|
2023-11-24 21:17:35 +00:00
|
|
|
return nil, fmt.Errorf("address %s: invalid port: %w", bindAddr, err)
|
2018-02-07 15:36:47 +00:00
|
|
|
}
|
2018-06-07 15:57:01 +00:00
|
|
|
|
2018-02-07 15:36:47 +00:00
|
|
|
var advertiseHost string
|
|
|
|
var advertisePort int
|
|
|
|
if advertiseAddr != "" {
|
|
|
|
var advertisePortStr string
|
|
|
|
advertiseHost, advertisePortStr, err = net.SplitHostPort(advertiseAddr)
|
|
|
|
if err != nil {
|
2023-11-24 21:17:35 +00:00
|
|
|
return nil, fmt.Errorf("invalid advertise address: %w", err)
|
2018-02-07 15:36:47 +00:00
|
|
|
}
|
|
|
|
advertisePort, err = strconv.Atoi(advertisePortStr)
|
|
|
|
if err != nil {
|
2023-11-24 21:17:35 +00:00
|
|
|
return nil, fmt.Errorf("address %s: invalid port: %w", advertiseAddr, err)
|
2018-02-07 15:36:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-29 08:54:40 +00:00
|
|
|
resolvedPeers, err := resolvePeers(context.Background(), knownPeers, advertiseAddr, &net.Resolver{}, waitIfEmpty)
|
2018-02-07 15:36:47 +00:00
|
|
|
if err != nil {
|
2023-11-24 21:17:35 +00:00
|
|
|
return nil, fmt.Errorf("resolve peers: %w", err)
|
2018-02-07 15:36:47 +00:00
|
|
|
}
|
2024-11-06 09:09:57 +00:00
|
|
|
l.Debug("resolved peers to following addresses", "peers", strings.Join(resolvedPeers, ","))
|
2018-02-07 15:36:47 +00:00
|
|
|
|
|
|
|
// Initial validation of user-specified advertise address.
|
2021-11-10 16:40:48 +00:00
|
|
|
addr, err := calculateAdvertiseAddress(bindHost, advertiseHost, allowInsecureAdvertise)
|
2018-02-07 15:36:47 +00:00
|
|
|
if err != nil {
|
2024-11-06 09:09:57 +00:00
|
|
|
l.Warn("couldn't deduce an advertise address: " + err.Error())
|
2018-02-07 15:36:47 +00:00
|
|
|
} else if hasNonlocal(resolvedPeers) && isUnroutable(addr.String()) {
|
2024-11-06 09:09:57 +00:00
|
|
|
l.Warn("this node advertises itself on an unroutable address", "addr", addr.String())
|
|
|
|
l.Warn("this node will be unreachable in the cluster")
|
|
|
|
l.Warn("provide --cluster.advertise-address as a routable IP address or hostname")
|
2018-06-07 15:57:01 +00:00
|
|
|
} else if isAny(bindAddr) && advertiseHost == "" {
|
|
|
|
// memberlist doesn't advertise properly when the bind address is empty or unspecified.
|
2024-11-06 09:09:57 +00:00
|
|
|
l.Info("setting advertise address explicitly", "addr", addr.String(), "port", bindPort)
|
2018-06-07 15:57:01 +00:00
|
|
|
advertiseHost = addr.String()
|
|
|
|
advertisePort = bindPort
|
2018-02-07 15:36:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(fabxc): generate human-readable but random names?
|
|
|
|
name, err := ulid.New(ulid.Now(), rand.New(rand.NewSource(time.Now().UnixNano())))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
p := &Peer{
|
2018-07-09 09:16:04 +00:00
|
|
|
states: map[string]State{},
|
|
|
|
stopc: make(chan struct{}),
|
|
|
|
readyc: make(chan struct{}),
|
|
|
|
logger: l,
|
|
|
|
peers: map[string]peer{},
|
|
|
|
resolvedPeers: resolvedPeers,
|
2018-11-23 08:47:13 +00:00
|
|
|
knownPeers: knownPeers,
|
2018-02-07 15:36:47 +00:00
|
|
|
}
|
2018-06-05 12:28:49 +00:00
|
|
|
|
2019-07-01 08:24:41 +00:00
|
|
|
p.register(reg, name.String())
|
2018-06-05 12:28:49 +00:00
|
|
|
|
2018-06-08 09:48:42 +00:00
|
|
|
retransmit := len(knownPeers) / 2
|
|
|
|
if retransmit < 3 {
|
|
|
|
retransmit = 3
|
|
|
|
}
|
|
|
|
p.delegate = newDelegate(l, reg, p, retransmit)
|
2018-02-07 15:36:47 +00:00
|
|
|
|
|
|
|
cfg := memberlist.DefaultLANConfig()
|
|
|
|
cfg.Name = name.String()
|
|
|
|
cfg.BindAddr = bindHost
|
|
|
|
cfg.BindPort = bindPort
|
|
|
|
cfg.Delegate = p.delegate
|
2019-07-01 08:24:41 +00:00
|
|
|
cfg.Ping = p.delegate
|
|
|
|
cfg.Alive = p.delegate
|
2018-02-07 15:36:47 +00:00
|
|
|
cfg.Events = p.delegate
|
|
|
|
cfg.GossipInterval = gossipInterval
|
|
|
|
cfg.PushPullInterval = pushPullInterval
|
2018-05-14 07:22:04 +00:00
|
|
|
cfg.TCPTimeout = tcpTimeout
|
|
|
|
cfg.ProbeTimeout = probeTimeout
|
|
|
|
cfg.ProbeInterval = probeInterval
|
2024-11-06 09:09:57 +00:00
|
|
|
cfg.Logger = slog.NewLogLogger(l.Handler(), slog.LevelDebug)
|
2018-06-08 09:48:42 +00:00
|
|
|
cfg.GossipNodes = retransmit
|
2021-02-05 17:06:47 +00:00
|
|
|
cfg.UDPBufferSize = MaxGossipPacketSize
|
2023-05-05 16:26:22 +00:00
|
|
|
cfg.Label = label
|
2018-02-07 15:36:47 +00:00
|
|
|
|
2018-06-07 15:57:01 +00:00
|
|
|
if advertiseHost != "" {
|
2018-02-07 15:36:47 +00:00
|
|
|
cfg.AdvertiseAddr = advertiseHost
|
|
|
|
cfg.AdvertisePort = advertisePort
|
2018-06-11 12:18:15 +00:00
|
|
|
p.setInitialFailed(resolvedPeers, fmt.Sprintf("%s:%d", advertiseHost, advertisePort))
|
|
|
|
} else {
|
|
|
|
p.setInitialFailed(resolvedPeers, bindAddr)
|
2018-02-07 15:36:47 +00:00
|
|
|
}
|
|
|
|
|
2021-08-09 20:58:06 +00:00
|
|
|
if tlsTransportConfig != nil {
|
2024-11-06 09:09:57 +00:00
|
|
|
l.Info("using TLS for gossip")
|
2021-08-09 20:58:06 +00:00
|
|
|
cfg.Transport, err = NewTLSTransport(context.Background(), l, reg, cfg.BindAddr, cfg.BindPort, tlsTransportConfig)
|
|
|
|
if err != nil {
|
2023-11-24 21:17:35 +00:00
|
|
|
return nil, fmt.Errorf("tls transport: %w", err)
|
2021-08-09 20:58:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-07 15:36:47 +00:00
|
|
|
ml, err := memberlist.Create(cfg)
|
|
|
|
if err != nil {
|
2023-11-24 21:17:35 +00:00
|
|
|
return nil, fmt.Errorf("create memberlist: %w", err)
|
2018-02-07 15:36:47 +00:00
|
|
|
}
|
|
|
|
p.mlist = ml
|
2018-07-09 09:16:04 +00:00
|
|
|
return p, nil
|
|
|
|
}
|
2018-02-07 15:36:47 +00:00
|
|
|
|
2018-07-09 09:16:04 +00:00
|
|
|
func (p *Peer) Join(
|
|
|
|
reconnectInterval time.Duration,
|
2022-03-25 16:59:51 +00:00
|
|
|
reconnectTimeout time.Duration,
|
|
|
|
) error {
|
2018-07-09 09:16:04 +00:00
|
|
|
n, err := p.mlist.Join(p.resolvedPeers)
|
2018-02-13 15:48:41 +00:00
|
|
|
if err != nil {
|
2024-11-06 09:09:57 +00:00
|
|
|
p.logger.Warn("failed to join cluster", "err", err)
|
2018-07-11 15:19:33 +00:00
|
|
|
if reconnectInterval != 0 {
|
2024-11-06 09:09:57 +00:00
|
|
|
p.logger.Info(fmt.Sprintf("will retry joining cluster every %v", reconnectInterval.String()))
|
2018-07-11 15:19:33 +00:00
|
|
|
}
|
2018-02-13 15:48:41 +00:00
|
|
|
} else {
|
2024-11-06 09:09:57 +00:00
|
|
|
p.logger.Debug("joined cluster", "peers", n)
|
2018-02-13 15:48:41 +00:00
|
|
|
}
|
2018-02-07 15:36:47 +00:00
|
|
|
|
2018-06-05 12:28:49 +00:00
|
|
|
if reconnectInterval != 0 {
|
2019-04-17 14:20:41 +00:00
|
|
|
go p.runPeriodicTask(
|
|
|
|
reconnectInterval,
|
|
|
|
p.reconnect,
|
|
|
|
)
|
2018-06-05 12:28:49 +00:00
|
|
|
}
|
|
|
|
if reconnectTimeout != 0 {
|
2019-04-17 14:20:41 +00:00
|
|
|
go p.runPeriodicTask(
|
|
|
|
5*time.Minute,
|
|
|
|
func() { p.removeFailedPeers(reconnectTimeout) },
|
|
|
|
)
|
2018-02-07 15:36:47 +00:00
|
|
|
}
|
2019-04-17 14:20:41 +00:00
|
|
|
go p.runPeriodicTask(
|
|
|
|
DefaultRefreshInterval,
|
|
|
|
p.refresh,
|
|
|
|
)
|
2018-06-05 12:28:49 +00:00
|
|
|
|
2018-07-09 09:16:04 +00:00
|
|
|
return err
|
2018-02-07 15:36:47 +00:00
|
|
|
}
|
|
|
|
|
2018-06-05 12:28:49 +00:00
|
|
|
// All peers are initially added to the failed list. They will be removed from
|
|
|
|
// this list in peerJoin when making their initial connection.
|
2018-06-08 10:34:52 +00:00
|
|
|
func (p *Peer) setInitialFailed(peers []string, myAddr string) {
|
2018-06-05 12:28:49 +00:00
|
|
|
if len(peers) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:15:58 +00:00
|
|
|
p.peerLock.Lock()
|
|
|
|
defer p.peerLock.Unlock()
|
2018-06-15 10:34:50 +00:00
|
|
|
|
2018-06-05 12:28:49 +00:00
|
|
|
now := time.Now()
|
|
|
|
for _, peerAddr := range peers {
|
2018-06-08 10:34:52 +00:00
|
|
|
if peerAddr == myAddr {
|
|
|
|
// Don't add ourselves to the initially failing list,
|
|
|
|
// we don't connect to ourselves.
|
|
|
|
continue
|
|
|
|
}
|
2018-06-15 10:34:50 +00:00
|
|
|
host, port, err := net.SplitHostPort(peerAddr)
|
2018-06-08 10:34:52 +00:00
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
2018-06-15 10:34:50 +00:00
|
|
|
ip := net.ParseIP(host)
|
|
|
|
if ip == nil {
|
|
|
|
// Don't add textual addresses since memberlist only advertises
|
|
|
|
// dotted decimal or IPv6 addresses.
|
|
|
|
continue
|
|
|
|
}
|
2018-06-08 10:34:52 +00:00
|
|
|
portUint, err := strconv.ParseUint(port, 10, 16)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-06-05 12:28:49 +00:00
|
|
|
pr := peer{
|
2018-06-08 10:34:52 +00:00
|
|
|
status: StatusFailed,
|
2018-06-05 12:28:49 +00:00
|
|
|
leaveTime: now,
|
2018-06-08 10:34:52 +00:00
|
|
|
Node: &memberlist.Node{
|
2018-06-15 10:34:50 +00:00
|
|
|
Addr: ip,
|
2018-06-08 10:34:52 +00:00
|
|
|
Port: uint16(portUint),
|
|
|
|
},
|
2018-06-05 12:28:49 +00:00
|
|
|
}
|
|
|
|
p.failedPeers = append(p.failedPeers, pr)
|
|
|
|
p.peers[peerAddr] = pr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-01 08:24:41 +00:00
|
|
|
func (p *Peer) register(reg prometheus.Registerer, name string) {
|
|
|
|
peerInfo := prometheus.NewGauge(
|
|
|
|
prometheus.GaugeOpts{
|
|
|
|
Name: "alertmanager_cluster_peer_info",
|
|
|
|
Help: "A metric with a constant '1' value labeled by peer name.",
|
|
|
|
ConstLabels: prometheus.Labels{"peer": name},
|
|
|
|
},
|
|
|
|
)
|
|
|
|
peerInfo.Set(1)
|
2018-06-05 12:28:49 +00:00
|
|
|
clusterFailedPeers := prometheus.NewGaugeFunc(prometheus.GaugeOpts{
|
|
|
|
Name: "alertmanager_cluster_failed_peers",
|
|
|
|
Help: "Number indicating the current number of failed peers in the cluster.",
|
|
|
|
}, func() float64 {
|
|
|
|
p.peerLock.RLock()
|
|
|
|
defer p.peerLock.RUnlock()
|
|
|
|
|
|
|
|
return float64(len(p.failedPeers))
|
|
|
|
})
|
|
|
|
p.failedReconnectionsCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
Name: "alertmanager_cluster_reconnections_failed_total",
|
|
|
|
Help: "A counter of the number of failed cluster peer reconnection attempts.",
|
|
|
|
})
|
|
|
|
|
|
|
|
p.reconnectionsCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
Name: "alertmanager_cluster_reconnections_total",
|
|
|
|
Help: "A counter of the number of cluster peer reconnections.",
|
|
|
|
})
|
|
|
|
|
2018-11-23 08:47:13 +00:00
|
|
|
p.failedRefreshCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
Name: "alertmanager_cluster_refresh_join_failed_total",
|
|
|
|
Help: "A counter of the number of failed cluster peer joined attempts via refresh.",
|
|
|
|
})
|
|
|
|
p.refreshCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
Name: "alertmanager_cluster_refresh_join_total",
|
|
|
|
Help: "A counter of the number of cluster peer joined via refresh.",
|
|
|
|
})
|
|
|
|
|
2018-06-05 12:28:49 +00:00
|
|
|
p.peerLeaveCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
Name: "alertmanager_cluster_peers_left_total",
|
|
|
|
Help: "A counter of the number of peers that have left.",
|
|
|
|
})
|
|
|
|
p.peerUpdateCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
Name: "alertmanager_cluster_peers_update_total",
|
|
|
|
Help: "A counter of the number of peers that have updated metadata.",
|
|
|
|
})
|
|
|
|
p.peerJoinCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
Name: "alertmanager_cluster_peers_joined_total",
|
|
|
|
Help: "A counter of the number of peers that have joined.",
|
|
|
|
})
|
|
|
|
|
2019-07-01 08:24:41 +00:00
|
|
|
reg.MustRegister(peerInfo, clusterFailedPeers, p.failedReconnectionsCounter, p.reconnectionsCounter,
|
2018-11-23 08:47:13 +00:00
|
|
|
p.peerLeaveCounter, p.peerUpdateCounter, p.peerJoinCounter, p.refreshCounter, p.failedRefreshCounter)
|
2018-06-05 12:28:49 +00:00
|
|
|
}
|
|
|
|
|
2019-04-17 14:20:41 +00:00
|
|
|
func (p *Peer) runPeriodicTask(d time.Duration, f func()) {
|
2018-02-07 15:36:47 +00:00
|
|
|
tick := time.NewTicker(d)
|
|
|
|
defer tick.Stop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-p.stopc:
|
|
|
|
return
|
|
|
|
case <-tick.C:
|
2019-04-17 14:20:41 +00:00
|
|
|
f()
|
2018-06-05 12:28:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Peer) removeFailedPeers(timeout time.Duration) {
|
|
|
|
p.peerLock.Lock()
|
|
|
|
defer p.peerLock.Unlock()
|
|
|
|
|
|
|
|
now := time.Now()
|
|
|
|
|
|
|
|
keep := make([]peer, 0, len(p.failedPeers))
|
|
|
|
for _, pr := range p.failedPeers {
|
|
|
|
if pr.leaveTime.Add(timeout).After(now) {
|
|
|
|
keep = append(keep, pr)
|
|
|
|
} else {
|
2024-11-06 09:09:57 +00:00
|
|
|
p.logger.Debug("failed peer has timed out", "peer", pr.Node, "addr", pr.Address())
|
2018-06-05 12:28:49 +00:00
|
|
|
delete(p.peers, pr.Name)
|
2018-02-07 15:36:47 +00:00
|
|
|
}
|
|
|
|
}
|
2018-06-05 12:28:49 +00:00
|
|
|
|
|
|
|
p.failedPeers = keep
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Peer) reconnect() {
|
|
|
|
p.peerLock.RLock()
|
|
|
|
failedPeers := p.failedPeers
|
|
|
|
p.peerLock.RUnlock()
|
|
|
|
|
2024-11-06 09:09:57 +00:00
|
|
|
logger := p.logger.With("msg", "reconnect")
|
2018-06-05 12:28:49 +00:00
|
|
|
for _, pr := range failedPeers {
|
|
|
|
// No need to do book keeping on failedPeers here. If a
|
|
|
|
// reconnect is successful, they will be announced in
|
|
|
|
// peerJoin().
|
|
|
|
if _, err := p.mlist.Join([]string{pr.Address()}); err != nil {
|
|
|
|
p.failedReconnectionsCounter.Inc()
|
2024-11-06 09:09:57 +00:00
|
|
|
logger.Debug("failure", "peer", pr.Node, "addr", pr.Address(), "err", err)
|
2018-06-05 12:28:49 +00:00
|
|
|
} else {
|
|
|
|
p.reconnectionsCounter.Inc()
|
2024-11-06 09:09:57 +00:00
|
|
|
logger.Debug("success", "peer", pr.Node, "addr", pr.Address())
|
2018-06-05 12:28:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-23 08:47:13 +00:00
|
|
|
func (p *Peer) refresh() {
|
2024-11-06 09:09:57 +00:00
|
|
|
logger := p.logger.With("msg", "refresh")
|
2018-11-23 08:47:13 +00:00
|
|
|
|
2019-04-29 08:54:40 +00:00
|
|
|
resolvedPeers, err := resolvePeers(context.Background(), p.knownPeers, p.advertiseAddr, &net.Resolver{}, false)
|
2018-11-23 08:47:13 +00:00
|
|
|
if err != nil {
|
2024-11-06 09:09:57 +00:00
|
|
|
logger.Debug(fmt.Sprintf("%v", p.knownPeers), "err", err)
|
2018-11-23 08:47:13 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
members := p.mlist.Members()
|
|
|
|
for _, peer := range resolvedPeers {
|
|
|
|
var isPeerFound bool
|
|
|
|
for _, member := range members {
|
|
|
|
if member.Address() == peer {
|
|
|
|
isPeerFound = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !isPeerFound {
|
|
|
|
if _, err := p.mlist.Join([]string{peer}); err != nil {
|
|
|
|
p.failedRefreshCounter.Inc()
|
2024-11-06 09:09:57 +00:00
|
|
|
logger.Warn("failure", "addr", peer, "err", err)
|
2018-11-23 08:47:13 +00:00
|
|
|
} else {
|
|
|
|
p.refreshCounter.Inc()
|
2024-11-06 09:09:57 +00:00
|
|
|
logger.Debug("success", "addr", peer)
|
2018-11-23 08:47:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-05 12:28:49 +00:00
|
|
|
func (p *Peer) peerJoin(n *memberlist.Node) {
|
|
|
|
p.peerLock.Lock()
|
|
|
|
defer p.peerLock.Unlock()
|
|
|
|
|
|
|
|
var oldStatus PeerStatus
|
|
|
|
pr, ok := p.peers[n.Address()]
|
|
|
|
if !ok {
|
|
|
|
oldStatus = StatusNone
|
|
|
|
pr = peer{
|
|
|
|
status: StatusAlive,
|
|
|
|
Node: n,
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
oldStatus = pr.status
|
|
|
|
pr.Node = n
|
|
|
|
pr.status = StatusAlive
|
|
|
|
pr.leaveTime = time.Time{}
|
|
|
|
}
|
|
|
|
|
|
|
|
p.peers[n.Address()] = pr
|
|
|
|
p.peerJoinCounter.Inc()
|
|
|
|
|
|
|
|
if oldStatus == StatusFailed {
|
2024-11-06 09:09:57 +00:00
|
|
|
p.logger.Debug("peer rejoined", "peer", pr.Node)
|
2018-06-08 10:34:52 +00:00
|
|
|
p.failedPeers = removeOldPeer(p.failedPeers, pr.Address())
|
2018-06-05 12:28:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Peer) peerLeave(n *memberlist.Node) {
|
|
|
|
p.peerLock.Lock()
|
|
|
|
defer p.peerLock.Unlock()
|
|
|
|
|
|
|
|
pr, ok := p.peers[n.Address()]
|
|
|
|
if !ok {
|
|
|
|
// Why are we receiving a leave notification from a node that
|
|
|
|
// never joined?
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
pr.status = StatusFailed
|
|
|
|
pr.leaveTime = time.Now()
|
|
|
|
p.failedPeers = append(p.failedPeers, pr)
|
|
|
|
p.peers[n.Address()] = pr
|
|
|
|
|
|
|
|
p.peerLeaveCounter.Inc()
|
2024-11-06 09:09:57 +00:00
|
|
|
p.logger.Debug("peer left", "peer", pr.Node)
|
2018-06-05 12:28:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Peer) peerUpdate(n *memberlist.Node) {
|
|
|
|
p.peerLock.Lock()
|
|
|
|
defer p.peerLock.Unlock()
|
|
|
|
|
|
|
|
pr, ok := p.peers[n.Address()]
|
|
|
|
if !ok {
|
|
|
|
// Why are we receiving an update from a node that never
|
|
|
|
// joined?
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
pr.Node = n
|
|
|
|
p.peers[n.Address()] = pr
|
|
|
|
|
|
|
|
p.peerUpdateCounter.Inc()
|
2024-11-06 09:09:57 +00:00
|
|
|
p.logger.Debug("peer updated", "peer", pr.Node)
|
2018-02-07 15:36:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// AddState adds a new state that will be gossiped. It returns a channel to which
|
|
|
|
// broadcast messages for the state can be sent.
|
2021-02-25 16:00:49 +00:00
|
|
|
func (p *Peer) AddState(key string, s State, reg prometheus.Registerer) ClusterChannel {
|
2021-04-13 12:51:07 +00:00
|
|
|
p.mtx.Lock()
|
2018-02-07 15:36:47 +00:00
|
|
|
p.states[key] = s
|
2021-04-13 12:51:07 +00:00
|
|
|
p.mtx.Unlock()
|
|
|
|
|
2018-06-15 11:40:21 +00:00
|
|
|
send := func(b []byte) {
|
|
|
|
p.delegate.bcast.QueueBroadcast(simpleBroadcast(b))
|
|
|
|
}
|
|
|
|
peers := func() []*memberlist.Node {
|
2021-02-19 19:02:06 +00:00
|
|
|
nodes := p.mlist.Members()
|
2018-06-15 11:40:21 +00:00
|
|
|
for i, n := range nodes {
|
2021-02-19 19:02:06 +00:00
|
|
|
if n.String() == p.Self().Name {
|
2018-06-15 11:40:21 +00:00
|
|
|
nodes = append(nodes[:i], nodes[i+1:]...)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nodes
|
|
|
|
}
|
|
|
|
sendOversize := func(n *memberlist.Node, b []byte) error {
|
|
|
|
return p.mlist.SendReliable(n, b)
|
|
|
|
}
|
|
|
|
return NewChannel(key, send, peers, sendOversize, p.logger, p.stopc, reg)
|
2018-02-07 15:36:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Leave the cluster, waiting up to timeout.
|
|
|
|
func (p *Peer) Leave(timeout time.Duration) error {
|
|
|
|
close(p.stopc)
|
2024-11-06 09:09:57 +00:00
|
|
|
p.logger.Debug("leaving cluster")
|
2018-02-07 15:36:47 +00:00
|
|
|
return p.mlist.Leave(timeout)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Name returns the unique ID of this peer in the cluster.
|
|
|
|
func (p *Peer) Name() string {
|
|
|
|
return p.mlist.LocalNode().Name
|
|
|
|
}
|
|
|
|
|
|
|
|
// ClusterSize returns the current number of alive members in the cluster.
|
|
|
|
func (p *Peer) ClusterSize() int {
|
|
|
|
return p.mlist.NumMembers()
|
|
|
|
}
|
|
|
|
|
2018-03-02 14:45:21 +00:00
|
|
|
// Return true when router has settled.
|
|
|
|
func (p *Peer) Ready() bool {
|
|
|
|
select {
|
|
|
|
case <-p.readyc:
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait until Settle() has finished.
|
2021-03-09 13:25:34 +00:00
|
|
|
func (p *Peer) WaitReady(ctx context.Context) error {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
case <-p.readyc:
|
|
|
|
return nil
|
|
|
|
}
|
2018-03-02 14:45:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Return a status string representing the peer state.
|
|
|
|
func (p *Peer) Status() string {
|
|
|
|
if p.Ready() {
|
|
|
|
return "ready"
|
|
|
|
}
|
2019-03-15 09:48:36 +00:00
|
|
|
|
|
|
|
return "settling"
|
2018-03-02 14:45:21 +00:00
|
|
|
}
|
|
|
|
|
2018-02-07 15:36:47 +00:00
|
|
|
// Info returns a JSON-serializable dump of cluster state.
|
|
|
|
// Useful for debug.
|
|
|
|
func (p *Peer) Info() map[string]interface{} {
|
|
|
|
p.mtx.RLock()
|
|
|
|
defer p.mtx.RUnlock()
|
|
|
|
|
|
|
|
return map[string]interface{}{
|
|
|
|
"self": p.mlist.LocalNode(),
|
|
|
|
"members": p.mlist.Members(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Self returns the node information about the peer itself.
|
|
|
|
func (p *Peer) Self() *memberlist.Node {
|
|
|
|
return p.mlist.LocalNode()
|
|
|
|
}
|
|
|
|
|
2021-02-24 15:35:16 +00:00
|
|
|
// Member represents a member in the cluster.
|
|
|
|
type Member struct {
|
2021-03-02 15:50:40 +00:00
|
|
|
node *memberlist.Node
|
2021-02-24 15:35:16 +00:00
|
|
|
}
|
2021-02-24 15:38:05 +00:00
|
|
|
|
2024-03-21 11:26:46 +00:00
|
|
|
// Name implements cluster.ClusterMember.
|
2021-03-02 15:50:40 +00:00
|
|
|
func (m Member) Name() string { return m.node.Name }
|
2021-02-24 15:38:05 +00:00
|
|
|
|
2024-03-21 11:26:46 +00:00
|
|
|
// Address implements cluster.ClusterMember.
|
2021-03-02 15:50:40 +00:00
|
|
|
func (m Member) Address() string { return m.node.Address() }
|
2021-02-24 15:35:16 +00:00
|
|
|
|
2018-02-07 15:36:47 +00:00
|
|
|
// Peers returns the peers in the cluster.
|
2021-02-24 15:35:16 +00:00
|
|
|
func (p *Peer) Peers() []ClusterMember {
|
|
|
|
peers := make([]ClusterMember, 0, len(p.mlist.Members()))
|
2021-02-19 19:02:06 +00:00
|
|
|
for _, member := range p.mlist.Members() {
|
2021-02-24 15:35:16 +00:00
|
|
|
peers = append(peers, Member{
|
2021-03-02 15:50:40 +00:00
|
|
|
node: member,
|
2021-02-24 15:35:16 +00:00
|
|
|
})
|
2021-02-19 19:02:06 +00:00
|
|
|
}
|
|
|
|
return peers
|
2018-02-07 15:36:47 +00:00
|
|
|
}
|
|
|
|
|
2018-02-27 09:37:56 +00:00
|
|
|
// Position returns the position of the peer in the cluster.
|
|
|
|
func (p *Peer) Position() int {
|
2021-02-19 19:02:06 +00:00
|
|
|
all := p.mlist.Members()
|
2018-02-27 09:37:56 +00:00
|
|
|
sort.Slice(all, func(i, j int) bool {
|
|
|
|
return all[i].Name < all[j].Name
|
|
|
|
})
|
|
|
|
|
|
|
|
k := 0
|
|
|
|
for _, n := range all {
|
|
|
|
if n.Name == p.Self().Name {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
k++
|
|
|
|
}
|
|
|
|
return k
|
|
|
|
}
|
|
|
|
|
2018-03-02 14:45:21 +00:00
|
|
|
// Settle waits until the mesh is ready (and sets the appropriate internal state when it is).
|
|
|
|
// The idea is that we don't want to start "working" before we get a chance to know most of the alerts and/or silences.
|
|
|
|
// Inspired from https://github.com/apache/cassandra/blob/7a40abb6a5108688fb1b10c375bb751cbb782ea4/src/java/org/apache/cassandra/gms/Gossiper.java
|
|
|
|
// This is clearly not perfect or strictly correct but should prevent the alertmanager to send notification before it is obviously not ready.
|
|
|
|
// This is especially important for those that do not have persistent storage.
|
|
|
|
func (p *Peer) Settle(ctx context.Context, interval time.Duration) {
|
|
|
|
const NumOkayRequired = 3
|
2024-11-06 09:09:57 +00:00
|
|
|
p.logger.Info("Waiting for gossip to settle...", "interval", interval)
|
2018-03-02 14:45:21 +00:00
|
|
|
start := time.Now()
|
|
|
|
nPeers := 0
|
|
|
|
nOkay := 0
|
|
|
|
totalPolls := 0
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
elapsed := time.Since(start)
|
2024-11-06 09:09:57 +00:00
|
|
|
p.logger.Info("gossip not settled but continuing anyway", "polls", totalPolls, "elapsed", elapsed)
|
2018-03-02 14:45:21 +00:00
|
|
|
close(p.readyc)
|
|
|
|
return
|
|
|
|
case <-time.After(interval):
|
|
|
|
}
|
|
|
|
elapsed := time.Since(start)
|
|
|
|
n := len(p.Peers())
|
|
|
|
if nOkay >= NumOkayRequired {
|
2024-11-06 09:09:57 +00:00
|
|
|
p.logger.Info("gossip settled; proceeding", "elapsed", elapsed)
|
2018-03-02 14:45:21 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
if n == nPeers {
|
|
|
|
nOkay++
|
2024-11-06 09:09:57 +00:00
|
|
|
p.logger.Debug("gossip looks settled", "elapsed", elapsed)
|
2018-03-02 14:45:21 +00:00
|
|
|
} else {
|
|
|
|
nOkay = 0
|
2024-11-06 09:09:57 +00:00
|
|
|
p.logger.Info("gossip not settled", "polls", totalPolls, "before", nPeers, "now", n, "elapsed", elapsed)
|
2018-03-02 14:45:21 +00:00
|
|
|
}
|
|
|
|
nPeers = n
|
|
|
|
totalPolls++
|
|
|
|
}
|
|
|
|
close(p.readyc)
|
|
|
|
}
|
|
|
|
|
2018-02-07 15:36:47 +00:00
|
|
|
// State is a piece of state that can be serialized and merged with other
|
|
|
|
// serialized state.
|
|
|
|
type State interface {
|
2018-02-09 10:06:51 +00:00
|
|
|
// MarshalBinary serializes the underlying state.
|
2018-02-07 15:36:47 +00:00
|
|
|
MarshalBinary() ([]byte, error)
|
2018-02-09 10:06:51 +00:00
|
|
|
|
|
|
|
// Merge merges serialized state into the underlying state.
|
2018-02-07 15:36:47 +00:00
|
|
|
Merge(b []byte) error
|
|
|
|
}
|
|
|
|
|
|
|
|
// We use a simple broadcast implementation in which items are never invalidated by others.
|
|
|
|
type simpleBroadcast []byte
|
|
|
|
|
|
|
|
func (b simpleBroadcast) Message() []byte { return []byte(b) }
|
|
|
|
func (b simpleBroadcast) Invalidates(memberlist.Broadcast) bool { return false }
|
|
|
|
func (b simpleBroadcast) Finished() {}
|
|
|
|
|
2019-04-29 08:54:40 +00:00
|
|
|
func resolvePeers(ctx context.Context, peers []string, myAddress string, res *net.Resolver, waitIfEmpty bool) ([]string, error) {
|
2018-02-07 15:36:47 +00:00
|
|
|
var resolvedPeers []string
|
|
|
|
|
|
|
|
for _, peer := range peers {
|
|
|
|
host, port, err := net.SplitHostPort(peer)
|
|
|
|
if err != nil {
|
2023-11-24 21:17:35 +00:00
|
|
|
return nil, fmt.Errorf("split host/port for peer %s: %w", peer, err)
|
2018-02-07 15:36:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
retryCtx, cancel := context.WithCancel(ctx)
|
2019-04-29 08:54:40 +00:00
|
|
|
defer cancel()
|
2018-02-07 15:36:47 +00:00
|
|
|
|
|
|
|
ips, err := res.LookupIPAddr(ctx, host)
|
|
|
|
if err != nil {
|
|
|
|
// Assume direct address.
|
|
|
|
resolvedPeers = append(resolvedPeers, peer)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(ips) == 0 {
|
|
|
|
var lookupErrSpotted bool
|
|
|
|
|
|
|
|
err := retry(2*time.Second, retryCtx.Done(), func() error {
|
|
|
|
if lookupErrSpotted {
|
|
|
|
// We need to invoke cancel in next run of retry when lookupErrSpotted to preserve LookupIPAddr error.
|
|
|
|
cancel()
|
|
|
|
}
|
|
|
|
|
|
|
|
ips, err = res.LookupIPAddr(retryCtx, host)
|
|
|
|
if err != nil {
|
|
|
|
lookupErrSpotted = true
|
2023-11-24 21:17:35 +00:00
|
|
|
return fmt.Errorf("IP Addr lookup for peer %s: %w", peer, err)
|
2018-02-07 15:36:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ips = removeMyAddr(ips, port, myAddress)
|
|
|
|
if len(ips) == 0 {
|
|
|
|
if !waitIfEmpty {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return errors.New("empty IPAddr result. Retrying")
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, ip := range ips {
|
|
|
|
resolvedPeers = append(resolvedPeers, net.JoinHostPort(ip.String(), port))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return resolvedPeers, nil
|
|
|
|
}
|
|
|
|
|
2022-03-25 16:59:51 +00:00
|
|
|
func removeMyAddr(ips []net.IPAddr, targetPort, myAddr string) []net.IPAddr {
|
2018-02-07 15:36:47 +00:00
|
|
|
var result []net.IPAddr
|
|
|
|
|
|
|
|
for _, ip := range ips {
|
|
|
|
if net.JoinHostPort(ip.String(), targetPort) == myAddr {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
result = append(result, ip)
|
|
|
|
}
|
|
|
|
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
|
|
|
func hasNonlocal(clusterPeers []string) bool {
|
|
|
|
for _, peer := range clusterPeers {
|
|
|
|
if host, _, err := net.SplitHostPort(peer); err == nil {
|
|
|
|
peer = host
|
|
|
|
}
|
|
|
|
if ip := net.ParseIP(peer); ip != nil && !ip.IsLoopback() {
|
|
|
|
return true
|
|
|
|
} else if ip == nil && strings.ToLower(peer) != "localhost" {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
func isUnroutable(addr string) bool {
|
|
|
|
if host, _, err := net.SplitHostPort(addr); err == nil {
|
|
|
|
addr = host
|
|
|
|
}
|
|
|
|
if ip := net.ParseIP(addr); ip != nil && (ip.IsUnspecified() || ip.IsLoopback()) {
|
|
|
|
return true // typically 0.0.0.0 or localhost
|
|
|
|
} else if ip == nil && strings.ToLower(addr) == "localhost" {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2018-06-07 15:57:01 +00:00
|
|
|
func isAny(addr string) bool {
|
|
|
|
if host, _, err := net.SplitHostPort(addr); err == nil {
|
|
|
|
addr = host
|
|
|
|
}
|
|
|
|
return addr == "" || net.ParseIP(addr).IsUnspecified()
|
|
|
|
}
|
|
|
|
|
2018-02-07 15:36:47 +00:00
|
|
|
// retry executes f every interval seconds until timeout or no error is returned from f.
|
|
|
|
func retry(interval time.Duration, stopc <-chan struct{}, f func() error) error {
|
|
|
|
tick := time.NewTicker(interval)
|
|
|
|
defer tick.Stop()
|
|
|
|
|
|
|
|
var err error
|
|
|
|
for {
|
|
|
|
if err = f(); err == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-stopc:
|
|
|
|
return err
|
|
|
|
case <-tick.C:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-06-05 12:28:49 +00:00
|
|
|
|
2018-06-08 10:34:52 +00:00
|
|
|
func removeOldPeer(old []peer, addr string) []peer {
|
2018-06-05 12:28:49 +00:00
|
|
|
new := make([]peer, 0, len(old))
|
|
|
|
for _, p := range old {
|
2018-06-08 10:34:52 +00:00
|
|
|
if p.Address() != addr {
|
2018-06-05 12:28:49 +00:00
|
|
|
new = append(new, p)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return new
|
|
|
|
}
|