2015-01-21 19:07:45 +00:00
|
|
|
// Copyright 2014 The Prometheus Authors
|
2014-03-28 10:58:47 +00:00
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package metric
|
|
|
|
|
|
|
|
import (
|
2015-03-30 16:12:51 +00:00
|
|
|
"fmt"
|
2014-03-28 10:58:47 +00:00
|
|
|
"regexp"
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
"strings"
|
2014-03-28 10:58:47 +00:00
|
|
|
|
2015-08-20 15:18:46 +00:00
|
|
|
"github.com/prometheus/common/model"
|
2014-03-28 10:58:47 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// MatchType is an enum for label matching types.
|
|
|
|
type MatchType int
|
|
|
|
|
|
|
|
// Possible MatchTypes.
|
|
|
|
const (
|
|
|
|
Equal MatchType = iota
|
|
|
|
NotEqual
|
|
|
|
RegexMatch
|
|
|
|
RegexNoMatch
|
|
|
|
)
|
|
|
|
|
|
|
|
func (m MatchType) String() string {
|
|
|
|
typeToStr := map[MatchType]string{
|
|
|
|
Equal: "=",
|
|
|
|
NotEqual: "!=",
|
|
|
|
RegexMatch: "=~",
|
|
|
|
RegexNoMatch: "!~",
|
|
|
|
}
|
|
|
|
if str, ok := typeToStr[m]; ok {
|
|
|
|
return str
|
|
|
|
}
|
|
|
|
panic("unknown match type")
|
|
|
|
}
|
|
|
|
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
// LabelMatchers is a slice of LabelMatcher objects. By implementing the
|
|
|
|
// sort.Interface, it is sortable by cardinality score, i.e. after sorting, the
|
|
|
|
// LabelMatcher that is expected to yield the fewest matches is first in the
|
|
|
|
// slice, and LabelMatchers that match the empty string are last.
|
2014-03-28 10:58:47 +00:00
|
|
|
type LabelMatchers []*LabelMatcher
|
|
|
|
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
func (lms LabelMatchers) Len() int { return len(lms) }
|
|
|
|
func (lms LabelMatchers) Swap(i, j int) { lms[i], lms[j] = lms[j], lms[i] }
|
|
|
|
func (lms LabelMatchers) Less(i, j int) bool { return lms[i].score < lms[j].score }
|
|
|
|
|
|
|
|
// LabelMatcher models the matching of a label. Create with NewLabelMatcher.
|
2014-03-28 10:58:47 +00:00
|
|
|
type LabelMatcher struct {
|
|
|
|
Type MatchType
|
2015-08-20 15:18:46 +00:00
|
|
|
Name model.LabelName
|
|
|
|
Value model.LabelValue
|
2014-03-28 10:58:47 +00:00
|
|
|
re *regexp.Regexp
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
score float64 // Cardinality score, between 0 and 1, 0 is lowest cardinality.
|
2014-03-28 10:58:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewLabelMatcher returns a LabelMatcher object ready to use.
|
2015-08-20 15:18:46 +00:00
|
|
|
func NewLabelMatcher(matchType MatchType, name model.LabelName, value model.LabelValue) (*LabelMatcher, error) {
|
2014-03-28 10:58:47 +00:00
|
|
|
m := &LabelMatcher{
|
|
|
|
Type: matchType,
|
|
|
|
Name: name,
|
|
|
|
Value: value,
|
|
|
|
}
|
|
|
|
if matchType == RegexMatch || matchType == RegexNoMatch {
|
2015-11-05 10:23:43 +00:00
|
|
|
re, err := regexp.Compile("^(?:" + string(value) + ")$")
|
2014-03-28 10:58:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
m.re = re
|
|
|
|
}
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
m.calculateScore()
|
2014-03-28 10:58:47 +00:00
|
|
|
return m, nil
|
|
|
|
}
|
|
|
|
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
// calculateScore is a helper method only called in the constructor. It
|
|
|
|
// calculates the cardinality score upfront, so that sorting by it is faster and
|
|
|
|
// doesn't change internal state of the matcher.
|
|
|
|
//
|
|
|
|
// The score is based on a pretty bad but still quite helpful heuristics for
|
|
|
|
// now. Note that this is an interim solution until the work in progress to
|
|
|
|
// properly intersect matchers is complete. We intend to not invest any further
|
|
|
|
// effort into tweaking the score calculation, as this could easily devolve into
|
|
|
|
// a rabbit hole.
|
|
|
|
//
|
|
|
|
// The heuristics works along the following lines:
|
|
|
|
//
|
|
|
|
// - A matcher that is known to match nothing would have a score of 0. (This
|
|
|
|
// case doesn't happen in the scope of this method.)
|
|
|
|
//
|
|
|
|
// - A matcher that matches the empty string has a score of 1.
|
|
|
|
//
|
|
|
|
// - Equal matchers have a score <= 0.5. The order in score for other matchers
|
|
|
|
// are RegexMatch, RegexNoMatch, NotEqual.
|
|
|
|
//
|
|
|
|
// - There are a number of score adjustments for known "magic" parts, like
|
|
|
|
// instance labels, metric names containing a colon (which are probably
|
|
|
|
// recording rules) and such.
|
|
|
|
//
|
|
|
|
// - On top, there is a tiny adjustment for the length of the matcher, following
|
|
|
|
// the blunt expectation that a long label name and/or value is more specific
|
|
|
|
// and will therefore have a lower cardinality.
|
|
|
|
//
|
|
|
|
// To reiterate on the above: PLEASE RESIST THE TEMPTATION TO TWEAK THIS
|
|
|
|
// METHOD. IT IS "MAGIC" ENOUGH ALREADY AND WILL GO AWAY WITH THE UPCOMING MORE
|
|
|
|
// POWERFUL INDEXING.
|
|
|
|
func (m *LabelMatcher) calculateScore() {
|
|
|
|
if m.Match("") {
|
|
|
|
m.score = 1
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// lengthCorrection is between 0 (for length 0) and 0.1 (for length +Inf).
|
|
|
|
lengthCorrection := 0.1 * (1 - 1/float64(len(m.Name)+len(m.Value)+1))
|
|
|
|
switch m.Type {
|
|
|
|
case Equal:
|
|
|
|
m.score = 0.3 - lengthCorrection
|
|
|
|
case RegexMatch:
|
|
|
|
m.score = 0.6 - lengthCorrection
|
|
|
|
case RegexNoMatch:
|
|
|
|
m.score = 0.8 + lengthCorrection
|
|
|
|
case NotEqual:
|
|
|
|
m.score = 0.9 + lengthCorrection
|
|
|
|
}
|
|
|
|
if m.Type != Equal {
|
|
|
|
// Don't bother anymore in this case.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
switch m.Name {
|
|
|
|
case model.InstanceLabel:
|
|
|
|
// Matches only metrics from a single instance, which clearly
|
|
|
|
// limits the damage.
|
|
|
|
m.score -= 0.2
|
|
|
|
case model.JobLabel:
|
|
|
|
// The usual case is a relatively low number of jobs with many
|
|
|
|
// metrics each.
|
|
|
|
m.score += 0.1
|
|
|
|
case model.BucketLabel, model.QuantileLabel:
|
|
|
|
// Magic labels for buckets and quantiles will match copiously.
|
|
|
|
m.score += 0.2
|
|
|
|
case model.MetricNameLabel:
|
|
|
|
if strings.Contains(string(m.Value), ":") {
|
|
|
|
// Probably a recording rule with limited cardinality.
|
|
|
|
m.score -= 0.1
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if m.Value == "up" || m.Value == "scrape_duration_seconds" {
|
|
|
|
// Synthetic metrics which are contained in every scrape
|
|
|
|
// exactly once. There might be less frequent metric
|
|
|
|
// names, but the worst case is limited here, so give it
|
|
|
|
// a bump.
|
|
|
|
m.score -= 0.05
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// MatchesEmptyString returns true if the LabelMatcher matches the empty string.
|
|
|
|
func (m *LabelMatcher) MatchesEmptyString() bool {
|
|
|
|
return m.score >= 1
|
|
|
|
}
|
|
|
|
|
2015-03-30 16:12:51 +00:00
|
|
|
func (m *LabelMatcher) String() string {
|
|
|
|
return fmt.Sprintf("%s%s%q", m.Name, m.Type, m.Value)
|
|
|
|
}
|
|
|
|
|
2014-03-28 10:58:47 +00:00
|
|
|
// Match returns true if the label matcher matches the supplied label value.
|
2015-08-20 15:18:46 +00:00
|
|
|
func (m *LabelMatcher) Match(v model.LabelValue) bool {
|
2014-03-28 10:58:47 +00:00
|
|
|
switch m.Type {
|
|
|
|
case Equal:
|
|
|
|
return m.Value == v
|
|
|
|
case NotEqual:
|
|
|
|
return m.Value != v
|
|
|
|
case RegexMatch:
|
|
|
|
return m.re.MatchString(string(v))
|
|
|
|
case RegexNoMatch:
|
|
|
|
return !m.re.MatchString(string(v))
|
|
|
|
default:
|
|
|
|
panic("invalid match type")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Filter takes a list of label values and returns all label values which match
|
|
|
|
// the label matcher.
|
2015-08-20 15:18:46 +00:00
|
|
|
func (m *LabelMatcher) Filter(in model.LabelValues) model.LabelValues {
|
|
|
|
out := model.LabelValues{}
|
2014-03-28 10:58:47 +00:00
|
|
|
for _, v := range in {
|
|
|
|
if m.Match(v) {
|
|
|
|
out = append(out, v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return out
|
|
|
|
}
|