Fix panic in acceptance tests (#3592)

* Fix panic in acceptance tests

This commit attempts to address a panic that occurs in acceptance
tests if a server in the cluster fails to start.

Signed-off-by: George Robinson <george.robinson@grafana.com>

* Remove started and check am.cmd.Process != nil

Signed-off-by: George Robinson <george.robinson@grafana.com>

---------

Signed-off-by: George Robinson <george.robinson@grafana.com>
This commit is contained in:
George Robinson 2024-02-13 15:38:44 +00:00 committed by GitHub
parent 604d442f30
commit 4d6ddd25c9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1 changed files with 16 additions and 12 deletions

View File

@ -169,17 +169,15 @@ func (t *AcceptanceTest) Run() {
for _, am := range t.amc.ams { for _, am := range t.amc.ams {
am.errc = errc am.errc = errc
defer func(am *Alertmanager) { t.T.Cleanup(am.Terminate)
am.Terminate() t.T.Cleanup(am.cleanup)
am.cleanup()
t.Logf("stdout:\n%v", am.cmd.Stdout)
t.Logf("stderr:\n%v", am.cmd.Stderr)
}(am)
} }
err := t.amc.Start() err := t.amc.Start()
if err != nil { if err != nil {
t.T.Fatal(err) t.T.Log(err)
t.T.Fail()
return
} }
// Set the reference time right before running the test actions to avoid // Set the reference time right before running the test actions to avoid
@ -251,10 +249,10 @@ type Alertmanager struct {
apiAddr string apiAddr string
clusterAddr string clusterAddr string
clientV2 *apiclient.AlertmanagerAPI clientV2 *apiclient.AlertmanagerAPI
cmd *exec.Cmd
confFile *os.File confFile *os.File
dir string dir string
cmd *exec.Cmd
errc chan<- error errc chan<- error
} }
@ -386,8 +384,12 @@ func (amc *AlertmanagerCluster) Terminate() {
// data. // data.
func (am *Alertmanager) Terminate() { func (am *Alertmanager) Terminate() {
am.t.Helper() am.t.Helper()
if err := syscall.Kill(am.cmd.Process.Pid, syscall.SIGTERM); err != nil { if am.cmd.Process != nil {
am.t.Logf("Error sending SIGTERM to Alertmanager process: %v", err) if err := syscall.Kill(am.cmd.Process.Pid, syscall.SIGTERM); err != nil {
am.t.Logf("Error sending SIGTERM to Alertmanager process: %v", err)
}
am.t.Logf("stdout:\n%v", am.cmd.Stdout)
am.t.Logf("stderr:\n%v", am.cmd.Stderr)
} }
} }
@ -401,8 +403,10 @@ func (amc *AlertmanagerCluster) Reload() {
// Reload sends the reloading signal to the Alertmanager process. // Reload sends the reloading signal to the Alertmanager process.
func (am *Alertmanager) Reload() { func (am *Alertmanager) Reload() {
am.t.Helper() am.t.Helper()
if err := syscall.Kill(am.cmd.Process.Pid, syscall.SIGHUP); err != nil { if am.cmd.Process != nil {
am.t.Fatalf("Error sending SIGHUP to Alertmanager process: %v", err) if err := syscall.Kill(am.cmd.Process.Pid, syscall.SIGHUP); err != nil {
am.t.Fatalf("Error sending SIGHUP to Alertmanager process: %v", err)
}
} }
} }