mirror of
https://github.com/prometheus/prometheus
synced 2024-12-26 00:23:18 +00:00
f08abdb48b
irate is a rate function that only looks at the most recent two data points, and calucaltes a per-second value from that. This produces much more granular graphs for fast moving data, and works sanely across many scrape intervals. It doesn't do so well for slowly moving data.
59 lines
2.9 KiB
HTML
59 lines
2.9 KiB
HTML
{{ template "head" . }}
|
|
|
|
{{ template "prom_right_table_head" }}
|
|
<tr><th>{{ .Params.backend }}</th><th>{{ template "prom_query_drilldown" (args (printf "sum(min by (server)(haproxy_server_up{job='haproxy',backend='%s'}))" .Params.backend)) }} / {{ template "prom_query_drilldown" (args (printf "count(sum by (server)(haproxy_server_up{job='haproxy',backend='%s'}))" .Params.backend))}}</th></tr>
|
|
<tr>
|
|
<td>Responses</td>
|
|
<td>{{ template "prom_query_drilldown" (args (printf "sum(irate(haproxy_backend_http_responses_total{job='haproxy',backend='%s'}[5m]))" .Params.backend) "/s" "humanizeNoSmallPrefix") }}</td>
|
|
</tr>
|
|
<tr>
|
|
<td>Data In</td>
|
|
<td>{{ template "prom_query_drilldown" (args (printf "sum(irate(haproxy_backend_bytes_in_total{job='haproxy',backend='%s'}[5m]))" .Params.backend) "B/s" "humanize") }}</td>
|
|
</tr>
|
|
<tr>
|
|
<td>Data Out</td>
|
|
<td>{{ template "prom_query_drilldown" (args (printf "sum(irate(haproxy_backend_bytes_out_total{job='haproxy',backend='%s'}[5m]))" .Params.backend) "B/s" "humanize") }}</td>
|
|
</tr>
|
|
<tr>
|
|
<td>Current Sessions</td>
|
|
<td>{{ template "prom_query_drilldown" (args (printf "sum(haproxy_backend_current_sessions{job='haproxy',backend='%s'})" .Params.backend) "" "humanize") }}</td>
|
|
</tr>
|
|
<tr>
|
|
<td>Current Queue</td>
|
|
<td>{{ template "prom_query_drilldown" (args (printf "sum(haproxy_backend_current_queue{job='haproxy',backend='%s'})" .Params.backend) "" "humanize") }}</td>
|
|
</tr>
|
|
<tr><th colspan="2">Server Errors</th></tr>
|
|
<tr>
|
|
<td>Connection Errors</td>
|
|
<td>{{ template "prom_query_drilldown" (args (printf "sum(irate(haproxy_backend_connection_errors_total{job='haproxy',backend='%s'}[5m]))" .Params.backend) "/s" "humanizeNoSmallPrefix") }}</td>
|
|
</tr>
|
|
<tr>
|
|
<td>Response Errors</td>
|
|
<td>{{ template "prom_query_drilldown" (args (printf "sum(irate(haproxy_backend_connection_errors_total{job='haproxy',backend='%s'}[5m]))" .Params.backend) "/s" "humanizeNoSmallPrefix") }}</td>
|
|
</tr>
|
|
<tr>
|
|
<td>Retry Warnings</td>
|
|
<td>{{ template "prom_query_drilldown" (args (printf "sum(irate(haproxy_backend_retry_warnings_total{job='haproxy',backend='%s'}[5m]))" .Params.backend) "/s" "humanizeNoSmallPrefix") }}</td>
|
|
</tr>
|
|
{{ template "prom_right_table_tail" }}
|
|
|
|
{{ template "prom_content_head" . }}
|
|
<h1>HAProxy Backend - {{ .Params.backend }}</h1>
|
|
|
|
<h3>Responses</h3>
|
|
<div id="responsesGraph"></div>
|
|
<script>
|
|
new PromConsole.Graph({
|
|
node: document.querySelector("#responsesGraph"),
|
|
expr: "sum by (code)(irate(haproxy_backend_http_responses_total{job='haproxy',backend='{{ .Params.backend }}'}[5m]))",
|
|
renderer: 'area',
|
|
yTitle: 'Queries',
|
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
yUnits: '/s'
|
|
})
|
|
</script>
|
|
{{ template "prom_content_tail" . }}
|
|
|
|
{{ template "tail" }}
|