Remove a deadlock during shutdown.
If queries are still running when the shutdown is initiated, they will finish _during_ the shutdown. In that case, they might request chunk eviction upon unpinning their pinned chunks. That might completely fill the evict request queue _after_ draining it during storage shutdown. If that ever happens (which is the case if there are _many_ queries still running during shutdown), the affected queries will be stuck while keeping a fingerprint locked. The checkpointing can then not process that fingerprint (or one that shares the same lock). And then we are deadlocked.
This commit is contained in:
parent
edc91cbabb
commit
2c8fdcbc23
|
@ -467,20 +467,19 @@ func (s *memorySeriesStorage) handleEvictList() {
|
||||||
s.maybeEvict()
|
s.maybeEvict()
|
||||||
}
|
}
|
||||||
case <-s.evictStopping:
|
case <-s.evictStopping:
|
||||||
// Drain evictRequests to not let requesters hang.
|
// Drain evictRequests forever in a goroutine to not let
|
||||||
|
// requesters hang.
|
||||||
|
go func() {
|
||||||
for {
|
for {
|
||||||
select {
|
<-s.evictRequests
|
||||||
case <-s.evictRequests:
|
}
|
||||||
// Do nothing.
|
}()
|
||||||
default:
|
|
||||||
ticker.Stop()
|
ticker.Stop()
|
||||||
glog.Info("Chunk eviction stopped.")
|
glog.Info("Chunk eviction stopped.")
|
||||||
close(s.evictStopped)
|
close(s.evictStopped)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// maybeEvict is a local helper method. Must only be called by handleEvictList.
|
// maybeEvict is a local helper method. Must only be called by handleEvictList.
|
||||||
|
|
Loading…
Reference in New Issue