mirror of
https://github.com/prometheus-community/windows_exporter
synced 2024-12-27 00:42:06 +00:00
187dbfc4ae
This change adds 4 new CPU related metrics: * process_mperf_total * processor_rtc_total * processor_utility_total * processor_privileged_utility_total and renames the existing process_performance to processor_performance_total, since it was previously misunderstood and was unlikely to be have been useful without the above new metrics The data sources for these are not particularly well understood, and the examples show that in some cases, arbitrary scaling factors are required to actually make them useful, but in my testing on hundreds of systems with a broad range of CPUs and operating systems from 2012r2 through to 2019 has proved out that we can use them to accurately display actual CPU frequencies and CPU utilisation as it is represented in taskmgr. Things I don't particularly like and would like input on: * I would have preferred to do the scaling of processor_mperf_total in the code, but there isn't an elegant way of doing this right now. * Maybe processor_mperf_total should be called processor_mperformance_total. See #787 for discussion. Signed-off-by: Steffen Higel <higels@valvesoftware.com>
129 lines
2.9 KiB
Go
129 lines
2.9 KiB
Go
package collector
|
|
|
|
import (
|
|
"reflect"
|
|
"testing"
|
|
|
|
perflibCollector "github.com/leoluk/perflib_exporter/collector"
|
|
"github.com/leoluk/perflib_exporter/perflib"
|
|
)
|
|
|
|
type simple struct {
|
|
ValA float64 `perflib:"Something"`
|
|
ValB float64 `perflib:"Something Else"`
|
|
ValC float64 `perflib:"Something Else,secondvalue"`
|
|
}
|
|
|
|
func TestUnmarshalPerflib(t *testing.T) {
|
|
cases := []struct {
|
|
name string
|
|
obj *perflib.PerfObject
|
|
|
|
expectedOutput []simple
|
|
expectError bool
|
|
}{
|
|
{
|
|
name: "nil check",
|
|
obj: nil,
|
|
expectedOutput: []simple{},
|
|
expectError: true,
|
|
},
|
|
{
|
|
name: "Simple",
|
|
obj: &perflib.PerfObject{
|
|
Instances: []*perflib.PerfInstance{
|
|
{
|
|
Counters: []*perflib.PerfCounter{
|
|
{
|
|
Def: &perflib.PerfCounterDef{
|
|
Name: "Something",
|
|
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
|
},
|
|
Value: 123,
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
expectedOutput: []simple{{ValA: 123}},
|
|
expectError: false,
|
|
},
|
|
{
|
|
name: "Multiple properties",
|
|
obj: &perflib.PerfObject{
|
|
Instances: []*perflib.PerfInstance{
|
|
{
|
|
Counters: []*perflib.PerfCounter{
|
|
{
|
|
Def: &perflib.PerfCounterDef{
|
|
Name: "Something",
|
|
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
|
},
|
|
Value: 123,
|
|
},
|
|
{
|
|
Def: &perflib.PerfCounterDef{
|
|
Name: "Something Else",
|
|
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
|
HasSecondValue: true,
|
|
},
|
|
Value: 256,
|
|
SecondValue: 222,
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
expectedOutput: []simple{{ValA: 123, ValB: 256, ValC: 222}},
|
|
expectError: false,
|
|
},
|
|
{
|
|
name: "Multiple instances",
|
|
obj: &perflib.PerfObject{
|
|
Instances: []*perflib.PerfInstance{
|
|
{
|
|
Counters: []*perflib.PerfCounter{
|
|
{
|
|
Def: &perflib.PerfCounterDef{
|
|
Name: "Something",
|
|
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
|
},
|
|
Value: 321,
|
|
},
|
|
},
|
|
},
|
|
{
|
|
Counters: []*perflib.PerfCounter{
|
|
{
|
|
Def: &perflib.PerfCounterDef{
|
|
Name: "Something",
|
|
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
|
},
|
|
Value: 231,
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
expectedOutput: []simple{{ValA: 321}, {ValA: 231}},
|
|
expectError: false,
|
|
},
|
|
}
|
|
for _, c := range cases {
|
|
t.Run(c.name, func(t *testing.T) {
|
|
output := make([]simple, 0)
|
|
err := unmarshalObject(c.obj, &output)
|
|
if err != nil && !c.expectError {
|
|
t.Errorf("Did not expect error, got %q", err)
|
|
}
|
|
if err == nil && c.expectError {
|
|
t.Errorf("Expected an error, but got ok")
|
|
}
|
|
|
|
if err == nil && !reflect.DeepEqual(output, c.expectedOutput) {
|
|
t.Errorf("Output mismatch, expected %+v, got %+v", c.expectedOutput, output)
|
|
}
|
|
})
|
|
}
|
|
}
|