diff --git a/.travis.yml b/.travis.yml
index bb704e9cd4..018bdabc2f 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,5 +1,3 @@
-sudo: false
-
 language: go
 
 # Whenever the Go version is updated here, .circleci/config.yml and .promu.yml
diff --git a/Makefile.common b/Makefile.common
index fff85f9226..a422e1b69a 100644
--- a/Makefile.common
+++ b/Makefile.common
@@ -36,7 +36,8 @@ GO_VERSION        ?= $(shell $(GO) version)
 GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
 PRE_GO_111        ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
 
-unexport GOVENDOR
+GOVENDOR :=
+GO111MODULE :=
 ifeq (, $(PRE_GO_111))
 	ifneq (,$(wildcard go.mod))
 		# Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI).
@@ -57,8 +58,6 @@ $(warning Some recipes may not work as expected as the current Go runtime is '$(
 		# This repository isn't using Go modules (yet).
 		GOVENDOR := $(FIRST_GOPATH)/bin/govendor
 	endif
-
-	unexport GO111MODULE
 endif
 PROMU        := $(FIRST_GOPATH)/bin/promu
 STATICCHECK  := $(FIRST_GOPATH)/bin/staticcheck
diff --git a/RELEASE.md b/RELEASE.md
index f889424f51..cc5eb42755 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -12,8 +12,9 @@ Release cadence of first pre-releases being cut is 6 weeks.
 | v2.5           | 2018-10-24                                 | Frederic Branczyk (GitHub: @brancz)         |
 | v2.6           | 2018-12-05                                 | Simon Pasquier (GitHub: @simonpasquier)     |
 | v2.7           | 2019-01-16                                 | Goutham Veeramachaneni (GitHub: @gouthamve) |
-| v2.8           | 2019-02-27                                 | **searching for volunteer**                 |
+| v2.8           | 2019-02-27                                 | Ganesh Vernekar (GitHub: @codesome)         |
 | v2.9           | 2019-04-10                                 | **searching for volunteer**                 |
+| v2.10          | 2019-05-22                                 | **searching for volunteer**                 |
 
 If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
 
diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go
index 045ec53777..79675ec831 100644
--- a/cmd/prometheus/main.go
+++ b/cmd/prometheus/main.go
@@ -307,7 +307,7 @@ func main() {
 
 	var (
 		localStorage  = &tsdb.ReadyStorage{}
-		remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), localStorage.StartTime, time.Duration(cfg.RemoteFlushDeadline))
+		remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, cfg.localStoragePath, time.Duration(cfg.RemoteFlushDeadline))
 		fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage)
 	)
 
diff --git a/config/config.go b/config/config.go
index d12c8d651d..004d58a855 100644
--- a/config/config.go
+++ b/config/config.go
@@ -107,9 +107,10 @@ var (
 		MinShards:         1,
 		MaxSamplesPerSend: 100,
 
-		// By default, buffer 100 batches, which at 100ms per batch is 10s. At
-		// 1000 shards, this will buffer 10M samples total.
-		Capacity:          100 * 100,
+		// Each shard will have a max of 10 samples pending in it's channel, plus the pending
+		// samples that have been enqueued. Theoretically we should only ever have about 110 samples
+		// per shard pending. At 1000 shards that's 110k.
+		Capacity:          10,
 		BatchSendDeadline: model.Duration(5 * time.Second),
 
 		// Max number of times to retry a batch on recoverable errors.
diff --git a/discovery/triton/triton.go b/discovery/triton/triton.go
index d6a92f3aff..242ac146da 100644
--- a/discovery/triton/triton.go
+++ b/discovery/triton/triton.go
@@ -215,7 +215,7 @@ func (d *Discovery) refresh() (tg *targetgroup.Group, err error) {
 	dr := DiscoveryResponse{}
 	err = json.Unmarshal(data, &dr)
 	if err != nil {
-		return tg, fmt.Errorf("an error occurred unmarshaling the disovery response json. %s", err)
+		return tg, fmt.Errorf("an error occurred unmarshaling the discovery response json. %s", err)
 	}
 
 	for _, container := range dr.Containers {
diff --git a/pkg/labels/labels.go b/pkg/labels/labels.go
index a10f636d78..bfc0ce5860 100644
--- a/pkg/labels/labels.go
+++ b/pkg/labels/labels.go
@@ -81,6 +81,25 @@ func (ls *Labels) UnmarshalJSON(b []byte) error {
 	return nil
 }
 
+// MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean.
+// If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false.
+func (ls Labels) MatchLabels(on bool, names ...string) Labels {
+	matchedLabels := Labels{}
+
+	nameSet := map[string]struct{}{}
+	for _, n := range names {
+		nameSet[n] = struct{}{}
+	}
+
+	for _, v := range ls {
+		if _, ok := nameSet[v.Name]; on == ok {
+			matchedLabels = append(matchedLabels, v)
+		}
+	}
+
+	return matchedLabels
+}
+
 // Hash returns a hash value for the label set.
 func (ls Labels) Hash() uint64 {
 	b := make([]byte, 0, 1024)
diff --git a/pkg/labels/labels_test.go b/pkg/labels/labels_test.go
new file mode 100644
index 0000000000..3651ae5f1e
--- /dev/null
+++ b/pkg/labels/labels_test.go
@@ -0,0 +1,104 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package labels
+
+import (
+	"testing"
+)
+
+func TestLabels_MatchLabels(t *testing.T) {
+	labels := Labels{
+		{
+			Name:  "__name__",
+			Value: "ALERTS",
+		},
+		{
+			Name:  "alertname",
+			Value: "HTTPRequestRateLow",
+		},
+		{
+			Name:  "alertstate",
+			Value: "pending",
+		},
+		{
+			Name:  "instance",
+			Value: "0",
+		},
+		{
+			Name:  "job",
+			Value: "app-server",
+		},
+		{
+			Name:  "severity",
+			Value: "critical",
+		},
+	}
+
+	providedNames := []string{
+		"__name__",
+		"alertname",
+		"alertstate",
+		"instance",
+	}
+
+	got := labels.MatchLabels(true, providedNames...)
+	expected := Labels{
+		{
+			Name:  "__name__",
+			Value: "ALERTS",
+		},
+		{
+			Name:  "alertname",
+			Value: "HTTPRequestRateLow",
+		},
+		{
+			Name:  "alertstate",
+			Value: "pending",
+		},
+		{
+			Name:  "instance",
+			Value: "0",
+		},
+	}
+
+	assertSlice(t, got, expected)
+
+	// Now try with 'on' set to false.
+	got = labels.MatchLabels(false, providedNames...)
+
+	expected = Labels{
+		{
+			Name:  "job",
+			Value: "app-server",
+		},
+		{
+			Name:  "severity",
+			Value: "critical",
+		},
+	}
+
+	assertSlice(t, got, expected)
+}
+
+func assertSlice(t *testing.T, got, expected Labels) {
+	if len(expected) != len(got) {
+		t.Errorf("expected the length of matched label names to be %d, but got %d", len(expected), len(got))
+	}
+
+	for i, expectedLabel := range expected {
+		if expectedLabel.Name != got[i].Name {
+			t.Errorf("expected to get Label with name %s, but got %s instead", expectedLabel.Name, got[i].Name)
+		}
+	}
+}
diff --git a/promql/engine.go b/promql/engine.go
index f405724d3b..7450dcd389 100644
--- a/promql/engine.go
+++ b/promql/engine.go
@@ -1434,9 +1434,16 @@ func (ev *evaluator) VectorBinop(op ItemType, lhs, rhs Vector, matching *VectorM
 		sig := sigf(rs.Metric)
 		// The rhs is guaranteed to be the 'one' side. Having multiple samples
 		// with the same signature means that the matching is many-to-many.
-		if _, found := rightSigs[sig]; found {
+		if duplSample, found := rightSigs[sig]; found {
+			// oneSide represents which side of the vector represents the 'one' in the many-to-one relationship.
+			oneSide := "right"
+			if matching.Card == CardOneToMany {
+				oneSide = "left"
+			}
+			matchedLabels := rs.Metric.MatchLabels(matching.On, matching.MatchingLabels...)
 			// Many-to-many matching not allowed.
-			ev.errorf("many-to-many matching not allowed: matching labels must be unique on one side")
+			ev.errorf("found duplicate series for the match group %s on the %s hand-side of the operation: [%s, %s]"+
+				";many-to-many matching not allowed: matching labels must be unique on one side", matchedLabels.String(), oneSide, rs.Metric.String(), duplSample.Metric.String())
 		}
 		rightSigs[sig] = rs
 	}
diff --git a/scrape/manager.go b/scrape/manager.go
index 4e38b69961..65ca1dc2b8 100644
--- a/scrape/manager.go
+++ b/scrape/manager.go
@@ -14,6 +14,7 @@
 package scrape
 
 import (
+	"fmt"
 	"reflect"
 	"sync"
 	"time"
@@ -104,18 +105,18 @@ func (m *Manager) reload() {
 	m.mtxScrape.Lock()
 	var wg sync.WaitGroup
 	for setName, groups := range m.targetSets {
-		var sp *scrapePool
-		existing, ok := m.scrapePools[setName]
-		if !ok {
+		if _, ok := m.scrapePools[setName]; !ok {
 			scrapeConfig, ok := m.scrapeConfigs[setName]
 			if !ok {
 				level.Error(m.logger).Log("msg", "error reloading target set", "err", "invalid config id:"+setName)
 				continue
 			}
-			sp = newScrapePool(scrapeConfig, m.append, log.With(m.logger, "scrape_pool", setName))
+			sp, err := newScrapePool(scrapeConfig, m.append, log.With(m.logger, "scrape_pool", setName))
+			if err != nil {
+				level.Error(m.logger).Log("msg", "error creating new scrape pool", "err", err, "scrape_pool", setName)
+				continue
+			}
 			m.scrapePools[setName] = sp
-		} else {
-			sp = existing
 		}
 
 		wg.Add(1)
@@ -123,7 +124,7 @@ func (m *Manager) reload() {
 		go func(sp *scrapePool, groups []*targetgroup.Group) {
 			sp.Sync(groups)
 			wg.Done()
-		}(sp, groups)
+		}(m.scrapePools[setName], groups)
 
 	}
 	m.mtxScrape.Unlock()
@@ -158,16 +159,24 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error {
 	}
 	m.scrapeConfigs = c
 
-	// Cleanup and reload pool if config has changed.
+	// Cleanup and reload pool if the configuration has changed.
+	var failed bool
 	for name, sp := range m.scrapePools {
 		if cfg, ok := m.scrapeConfigs[name]; !ok {
 			sp.stop()
 			delete(m.scrapePools, name)
 		} else if !reflect.DeepEqual(sp.config, cfg) {
-			sp.reload(cfg)
+			err := sp.reload(cfg)
+			if err != nil {
+				level.Error(m.logger).Log("msg", "error reloading scrape pool", "err", err, "scrape_pool", name)
+				failed = true
+			}
 		}
 	}
 
+	if failed {
+		return fmt.Errorf("failed to apply the new configuration")
+	}
 	return nil
 }
 
diff --git a/scrape/manager_test.go b/scrape/manager_test.go
index b548ddd9c9..499d13885d 100644
--- a/scrape/manager_test.go
+++ b/scrape/manager_test.go
@@ -222,47 +222,115 @@ func TestPopulateLabels(t *testing.T) {
 	}
 }
 
-// TestScrapeManagerReloadNoChange tests that no scrape reload happens when there is no config change.
-func TestManagerReloadNoChange(t *testing.T) {
-	tsetName := "test"
+func loadConfiguration(t *testing.T, c string) *config.Config {
+	t.Helper()
 
-	cfgText := `
+	cfg := &config.Config{}
+	if err := yaml.UnmarshalStrict([]byte(c), cfg); err != nil {
+		t.Fatalf("Unable to load YAML config: %s", err)
+	}
+	return cfg
+}
+
+func noopLoop() loop {
+	return &testLoop{
+		startFunc: func(interval, timeout time.Duration, errc chan<- error) {},
+		stopFunc:  func() {},
+	}
+}
+
+func TestManagerApplyConfig(t *testing.T) {
+	// Valid initial configuration.
+	cfgText1 := `
 scrape_configs:
- - job_name: '` + tsetName + `'
+ - job_name: job1
    static_configs:
    - targets: ["foo:9090"]
-   - targets: ["bar:9090"]
 `
-	cfg := &config.Config{}
-	if err := yaml.UnmarshalStrict([]byte(cfgText), cfg); err != nil {
-		t.Fatalf("Unable to load YAML config cfgYaml: %s", err)
-	}
+	// Invalid configuration.
+	cfgText2 := `
+scrape_configs:
+ - job_name: job1
+   scheme: https
+   static_configs:
+   - targets: ["foo:9090"]
+   tls_config:
+     ca_file: /not/existing/ca/file
+`
+	// Valid configuration.
+	cfgText3 := `
+scrape_configs:
+ - job_name: job1
+   scheme: https
+   static_configs:
+   - targets: ["foo:9090"]
+`
+	var (
+		cfg1 = loadConfiguration(t, cfgText1)
+		cfg2 = loadConfiguration(t, cfgText2)
+		cfg3 = loadConfiguration(t, cfgText3)
+
+		ch = make(chan struct{}, 1)
+	)
 
 	scrapeManager := NewManager(nil, nil)
-	// Load the current config.
-	scrapeManager.ApplyConfig(cfg)
-
-	// As reload never happens, new loop should never be called.
 	newLoop := func(_ *Target, s scraper, _ int, _ bool, _ []*relabel.Config) loop {
-		t.Fatal("reload happened")
-		return nil
+		ch <- struct{}{}
+		return noopLoop()
 	}
-
 	sp := &scrapePool{
 		appendable:    &nopAppendable{},
 		activeTargets: map[uint64]*Target{},
 		loops: map[uint64]loop{
-			1: &testLoop{},
+			1: noopLoop(),
 		},
 		newLoop: newLoop,
 		logger:  nil,
-		config:  cfg.ScrapeConfigs[0],
+		config:  cfg1.ScrapeConfigs[0],
 	}
 	scrapeManager.scrapePools = map[string]*scrapePool{
-		tsetName: sp,
+		"job1": sp,
 	}
 
-	scrapeManager.ApplyConfig(cfg)
+	// Apply the initial configuration.
+	if err := scrapeManager.ApplyConfig(cfg1); err != nil {
+		t.Fatalf("unable to apply configuration: %s", err)
+	}
+	select {
+	case <-ch:
+		t.Fatal("reload happened")
+	default:
+	}
+
+	// Apply a configuration for which the reload fails.
+	if err := scrapeManager.ApplyConfig(cfg2); err == nil {
+		t.Fatalf("expecting error but got none")
+	}
+	select {
+	case <-ch:
+		t.Fatal("reload happened")
+	default:
+	}
+
+	// Apply a configuration for which the reload succeeds.
+	if err := scrapeManager.ApplyConfig(cfg3); err != nil {
+		t.Fatalf("unable to apply configuration: %s", err)
+	}
+	select {
+	case <-ch:
+	default:
+		t.Fatal("reload didn't happen")
+	}
+
+	// Re-applying the same configuration shouldn't trigger a reload.
+	if err := scrapeManager.ApplyConfig(cfg3); err != nil {
+		t.Fatalf("unable to apply configuration: %s", err)
+	}
+	select {
+	case <-ch:
+		t.Fatal("reload happened")
+	default:
+	}
 }
 
 func TestManagerTargetsUpdates(t *testing.T) {
diff --git a/scrape/scrape.go b/scrape/scrape.go
index 29553aea20..d21bc04bcc 100644
--- a/scrape/scrape.go
+++ b/scrape/scrape.go
@@ -28,6 +28,7 @@ import (
 
 	"github.com/go-kit/kit/log"
 	"github.com/go-kit/kit/log/level"
+	"github.com/pkg/errors"
 	"github.com/prometheus/client_golang/prometheus"
 	config_util "github.com/prometheus/common/config"
 	"github.com/prometheus/common/model"
@@ -61,6 +62,30 @@ var (
 		},
 		[]string{"interval"},
 	)
+	targetScrapePools = prometheus.NewCounter(
+		prometheus.CounterOpts{
+			Name: "prometheus_target_scrape_pools_total",
+			Help: "Total number of scrape pool creation atttempts.",
+		},
+	)
+	targetScrapePoolsFailed = prometheus.NewCounter(
+		prometheus.CounterOpts{
+			Name: "prometheus_target_scrape_pools_failed_total",
+			Help: "Total number of scrape pool creations that failed.",
+		},
+	)
+	targetScrapePoolReloads = prometheus.NewCounter(
+		prometheus.CounterOpts{
+			Name: "prometheus_target_scrape_pool_reloads_total",
+			Help: "Total number of scrape loop reloads.",
+		},
+	)
+	targetScrapePoolReloadsFailed = prometheus.NewCounter(
+		prometheus.CounterOpts{
+			Name: "prometheus_target_scrape_pool_reloads_failed_total",
+			Help: "Total number of failed scrape loop reloads.",
+		},
+	)
 	targetSyncIntervalLength = prometheus.NewSummaryVec(
 		prometheus.SummaryOpts{
 			Name:       "prometheus_target_sync_length_seconds",
@@ -105,6 +130,10 @@ var (
 func init() {
 	prometheus.MustRegister(targetIntervalLength)
 	prometheus.MustRegister(targetReloadIntervalLength)
+	prometheus.MustRegister(targetScrapePools)
+	prometheus.MustRegister(targetScrapePoolsFailed)
+	prometheus.MustRegister(targetScrapePoolReloads)
+	prometheus.MustRegister(targetScrapePoolReloadsFailed)
 	prometheus.MustRegister(targetSyncIntervalLength)
 	prometheus.MustRegister(targetScrapePoolSyncsCounter)
 	prometheus.MustRegister(targetScrapeSampleLimit)
@@ -136,15 +165,16 @@ const maxAheadTime = 10 * time.Minute
 
 type labelsMutator func(labels.Labels) labels.Labels
 
-func newScrapePool(cfg *config.ScrapeConfig, app Appendable, logger log.Logger) *scrapePool {
+func newScrapePool(cfg *config.ScrapeConfig, app Appendable, logger log.Logger) (*scrapePool, error) {
+	targetScrapePools.Inc()
 	if logger == nil {
 		logger = log.NewNopLogger()
 	}
 
 	client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName)
 	if err != nil {
-		// Any errors that could occur here should be caught during config validation.
-		level.Error(logger).Log("msg", "Error creating HTTP client", "err", err)
+		targetScrapePoolsFailed.Inc()
+		return nil, errors.Wrap(err, "error creating HTTP client")
 	}
 
 	buffers := pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) })
@@ -182,7 +212,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app Appendable, logger log.Logger)
 		)
 	}
 
-	return sp
+	return sp, nil
 }
 
 func (sp *scrapePool) ActiveTargets() []*Target {
@@ -227,7 +257,8 @@ func (sp *scrapePool) stop() {
 // reload the scrape pool with the given scrape configuration. The target state is preserved
 // but all scrape loops are restarted with the new scrape configuration.
 // This method returns after all scrape loops that were stopped have stopped scraping.
-func (sp *scrapePool) reload(cfg *config.ScrapeConfig) {
+func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
+	targetScrapePoolReloads.Inc()
 	start := time.Now()
 
 	sp.mtx.Lock()
@@ -235,8 +266,8 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) {
 
 	client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName)
 	if err != nil {
-		// Any errors that could occur here should be caught during config validation.
-		level.Error(sp.logger).Log("msg", "Error creating HTTP client", "err", err)
+		targetScrapePoolReloadsFailed.Inc()
+		return errors.Wrap(err, "error creating HTTP client")
 	}
 	sp.config = cfg
 	sp.client = client
@@ -272,6 +303,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) {
 	targetReloadIntervalLength.WithLabelValues(interval.String()).Observe(
 		time.Since(start).Seconds(),
 	)
+	return nil
 }
 
 // Sync converts target groups into actual scrape targets and synchronizes
diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go
index a27ccb0aa2..c301fdbb77 100644
--- a/scrape/scrape_test.go
+++ b/scrape/scrape_test.go
@@ -29,16 +29,14 @@ import (
 	"testing"
 	"time"
 
-	"github.com/prometheus/prometheus/pkg/relabel"
-
+	dto "github.com/prometheus/client_model/go"
 	"github.com/prometheus/common/model"
 	"github.com/stretchr/testify/require"
 
-	dto "github.com/prometheus/client_model/go"
-
 	"github.com/prometheus/prometheus/config"
 	"github.com/prometheus/prometheus/discovery/targetgroup"
 	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/pkg/relabel"
 	"github.com/prometheus/prometheus/pkg/textparse"
 	"github.com/prometheus/prometheus/pkg/timestamp"
 	"github.com/prometheus/prometheus/pkg/value"
@@ -48,9 +46,9 @@ import (
 
 func TestNewScrapePool(t *testing.T) {
 	var (
-		app = &nopAppendable{}
-		cfg = &config.ScrapeConfig{}
-		sp  = newScrapePool(cfg, app, nil)
+		app   = &nopAppendable{}
+		cfg   = &config.ScrapeConfig{}
+		sp, _ = newScrapePool(cfg, app, nil)
 	)
 
 	if a, ok := sp.appendable.(*nopAppendable); !ok || a != app {
@@ -85,7 +83,7 @@ func TestDroppedTargetsList(t *testing.T) {
 				},
 			},
 		}
-		sp                     = newScrapePool(cfg, app, nil)
+		sp, _                  = newScrapePool(cfg, app, nil)
 		expectedLabelSetString = "{__address__=\"127.0.0.1:9090\", __metrics_path__=\"\", __scheme__=\"\", job=\"dropMe\"}"
 		expectedLength         = 1
 	)
@@ -307,7 +305,7 @@ func TestScrapePoolReload(t *testing.T) {
 func TestScrapePoolAppender(t *testing.T) {
 	cfg := &config.ScrapeConfig{}
 	app := &nopAppendable{}
-	sp := newScrapePool(cfg, app, nil)
+	sp, _ := newScrapePool(cfg, app, nil)
 
 	loop := sp.newLoop(&Target{}, nil, 0, false, nil)
 	appl, ok := loop.(*scrapeLoop)
@@ -350,7 +348,7 @@ func TestScrapePoolRaces(t *testing.T) {
 	newConfig := func() *config.ScrapeConfig {
 		return &config.ScrapeConfig{ScrapeInterval: interval, ScrapeTimeout: timeout}
 	}
-	sp := newScrapePool(newConfig(), &nopAppendable{}, nil)
+	sp, _ := newScrapePool(newConfig(), &nopAppendable{}, nil)
 	tgts := []*targetgroup.Group{
 		{
 			Targets: []model.LabelSet{
@@ -880,7 +878,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
 		t.Fatalf("Did not see expected sample limit error: %s", err)
 	}
 
-	// Check that the Counter has been incremented a simgle time for the scrape,
+	// Check that the Counter has been incremented a single time for the scrape,
 	// not multiple times for each sample.
 	metric := dto.Metric{}
 	err = targetScrapeSampleLimit.Write(&metric)
diff --git a/storage/remote/client.go b/storage/remote/client.go
index 2679f42b6b..b182cda614 100644
--- a/storage/remote/client.go
+++ b/storage/remote/client.go
@@ -70,15 +70,10 @@ type recoverableError struct {
 	error
 }
 
-// Store sends a batch of samples to the HTTP endpoint.
-func (c *Client) Store(ctx context.Context, req *prompb.WriteRequest) error {
-	data, err := proto.Marshal(req)
-	if err != nil {
-		return err
-	}
-
-	compressed := snappy.Encode(nil, data)
-	httpReq, err := http.NewRequest("POST", c.url.String(), bytes.NewReader(compressed))
+// Store sends a batch of samples to the HTTP endpoint, the request is the proto marshalled
+// and encoded bytes from codec.go.
+func (c *Client) Store(ctx context.Context, req []byte) error {
+	httpReq, err := http.NewRequest("POST", c.url.String(), bytes.NewReader(req))
 	if err != nil {
 		// Errors from NewRequest are from unparseable URLs, so are not
 		// recoverable.
diff --git a/storage/remote/client_test.go b/storage/remote/client_test.go
index 73ec875a5d..9ef3c9bd26 100644
--- a/storage/remote/client_test.go
+++ b/storage/remote/client_test.go
@@ -26,7 +26,6 @@ import (
 
 	config_util "github.com/prometheus/common/config"
 	"github.com/prometheus/common/model"
-	"github.com/prometheus/prometheus/prompb"
 )
 
 var longErrMessage = strings.Repeat("error message", maxErrMsgLen)
@@ -74,7 +73,7 @@ func TestStoreHTTPErrorHandling(t *testing.T) {
 			t.Fatal(err)
 		}
 
-		err = c.Store(context.Background(), &prompb.WriteRequest{})
+		err = c.Store(context.Background(), []byte{})
 		if !reflect.DeepEqual(err, test.err) {
 			t.Errorf("%d. Unexpected error; want %v, got %v", i, test.err, err)
 		}
diff --git a/storage/remote/codec.go b/storage/remote/codec.go
index 6f49bfbbaa..82dd4126e4 100644
--- a/storage/remote/codec.go
+++ b/storage/remote/codec.go
@@ -80,28 +80,6 @@ func EncodeReadResponse(resp *prompb.ReadResponse, w http.ResponseWriter) error
 	return err
 }
 
-// ToWriteRequest converts an array of samples into a WriteRequest proto.
-func ToWriteRequest(samples []*model.Sample) *prompb.WriteRequest {
-	req := &prompb.WriteRequest{
-		Timeseries: make([]prompb.TimeSeries, 0, len(samples)),
-	}
-
-	for _, s := range samples {
-		ts := prompb.TimeSeries{
-			Labels: MetricToLabelProtos(s.Metric),
-			Samples: []prompb.Sample{
-				{
-					Value:     float64(s.Value),
-					Timestamp: int64(s.Timestamp),
-				},
-			},
-		}
-		req.Timeseries = append(req.Timeseries, ts)
-	}
-
-	return req
-}
-
 // ToQuery builds a Query proto.
 func ToQuery(from, to int64, matchers []*labels.Matcher, p *storage.SelectParams) (*prompb.Query, error) {
 	ms, err := toLabelMatchers(matchers)
@@ -364,21 +342,6 @@ func fromLabelMatchers(matchers []*prompb.LabelMatcher) ([]*labels.Matcher, erro
 	return result, nil
 }
 
-// MetricToLabelProtos builds a []*prompb.Label from a model.Metric
-func MetricToLabelProtos(metric model.Metric) []prompb.Label {
-	labels := make([]prompb.Label, 0, len(metric))
-	for k, v := range metric {
-		labels = append(labels, prompb.Label{
-			Name:  string(k),
-			Value: string(v),
-		})
-	}
-	sort.Slice(labels, func(i int, j int) bool {
-		return labels[i].Name < labels[j].Name
-	})
-	return labels
-}
-
 // LabelProtosToMetric unpack a []*prompb.Label to a model.Metric
 func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric {
 	metric := make(model.Metric, len(labelPairs))
@@ -400,6 +363,26 @@ func labelProtosToLabels(labelPairs []prompb.Label) labels.Labels {
 	return result
 }
 
+func labelsetToLabelsProto(ls model.LabelSet) []prompb.Label {
+	result := make([]prompb.Label, 0, len(ls))
+	keys := make([]string, 0, len(ls))
+
+	for k := range ls {
+		keys = append(keys, string(k))
+	}
+	sort.Strings(keys)
+
+	for _, k := range keys {
+		ln := model.LabelName(k)
+		result = append(result, prompb.Label{
+			Name:  k,
+			Value: string(ls[ln]),
+		})
+	}
+
+	return result
+}
+
 func labelsToLabelsProto(labels labels.Labels) []prompb.Label {
 	result := make([]prompb.Label, 0, len(labels))
 	for _, l := range labels {
@@ -410,11 +393,3 @@ func labelsToLabelsProto(labels labels.Labels) []prompb.Label {
 	}
 	return result
 }
-
-func labelsToMetric(ls labels.Labels) model.Metric {
-	metric := make(model.Metric, len(ls))
-	for _, l := range ls {
-		metric[model.LabelName(l.Name)] = model.LabelValue(l.Value)
-	}
-	return metric
-}
diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go
index e76912e1b9..e91c6e86de 100644
--- a/storage/remote/queue_manager.go
+++ b/storage/remote/queue_manager.go
@@ -16,6 +16,7 @@ package remote
 import (
 	"context"
 	"math"
+	"strconv"
 	"sync"
 	"sync/atomic"
 	"time"
@@ -24,12 +25,16 @@ import (
 
 	"github.com/go-kit/kit/log"
 	"github.com/go-kit/kit/log/level"
+	"github.com/gogo/protobuf/proto"
+	"github.com/golang/snappy"
+
 	"github.com/prometheus/client_golang/prometheus"
 	"github.com/prometheus/common/model"
 	"github.com/prometheus/prometheus/config"
 	pkgrelabel "github.com/prometheus/prometheus/pkg/relabel"
 	"github.com/prometheus/prometheus/prompb"
 	"github.com/prometheus/prometheus/relabel"
+	"github.com/prometheus/tsdb"
 )
 
 // String constants for instrumentation.
@@ -66,7 +71,16 @@ var (
 			Namespace: namespace,
 			Subsystem: subsystem,
 			Name:      "failed_samples_total",
-			Help:      "Total number of samples which failed on send to remote storage.",
+			Help:      "Total number of samples which failed on send to remote storage, non-recoverable errors.",
+		},
+		[]string{queue},
+	)
+	retriedSamplesTotal = prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Namespace: namespace,
+			Subsystem: subsystem,
+			Name:      "retried_samples_total",
+			Help:      "Total number of samples which failed on send to remote storage but were retried because the send error was recoverable.",
 		},
 		[]string{queue},
 	)
@@ -75,7 +89,16 @@ var (
 			Namespace: namespace,
 			Subsystem: subsystem,
 			Name:      "dropped_samples_total",
-			Help:      "Total number of samples which were dropped due to the queue being full.",
+			Help:      "Total number of samples which were dropped after being read from the WAL before being sent via remote write.",
+		},
+		[]string{queue},
+	)
+	enqueueRetriesTotal = prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Namespace: namespace,
+			Subsystem: subsystem,
+			Name:      "enqueue_retries_total",
+			Help:      "Total number of times enqueue has failed because a shards queue was full.",
 		},
 		[]string{queue},
 	)
@@ -89,12 +112,30 @@ var (
 		},
 		[]string{queue},
 	)
-	queueLength = prometheus.NewGaugeVec(
+	queueLastSendTimestamp = prometheus.NewGaugeVec(
 		prometheus.GaugeOpts{
 			Namespace: namespace,
 			Subsystem: subsystem,
-			Name:      "queue_length",
-			Help:      "The number of processed samples queued to be sent to the remote storage.",
+			Name:      "queue_last_send_timestamp",
+			Help:      "Timestamp of the last successful send by this queue.",
+		},
+		[]string{queue},
+	)
+	queueHighestSentTimestamp = prometheus.NewGaugeVec(
+		prometheus.GaugeOpts{
+			Namespace: namespace,
+			Subsystem: subsystem,
+			Name:      "queue_highest_sent_timestamp",
+			Help:      "Timestamp from a WAL sample, the highest timestamp successfully sent by this queue.",
+		},
+		[]string{queue},
+	)
+	queuePendingSamples = prometheus.NewGaugeVec(
+		prometheus.GaugeOpts{
+			Namespace: namespace,
+			Subsystem: subsystem,
+			Name:      "pending_samples",
+			Help:      "The number of samples pending in the queues shards to be sent to the remote storage.",
 		},
 		[]string{queue},
 	)
@@ -121,9 +162,13 @@ var (
 func init() {
 	prometheus.MustRegister(succeededSamplesTotal)
 	prometheus.MustRegister(failedSamplesTotal)
+	prometheus.MustRegister(retriedSamplesTotal)
 	prometheus.MustRegister(droppedSamplesTotal)
+	prometheus.MustRegister(enqueueRetriesTotal)
 	prometheus.MustRegister(sentBatchDuration)
-	prometheus.MustRegister(queueLength)
+	prometheus.MustRegister(queueLastSendTimestamp)
+	prometheus.MustRegister(queueHighestSentTimestamp)
+	prometheus.MustRegister(queuePendingSamples)
 	prometheus.MustRegister(shardCapacity)
 	prometheus.MustRegister(numShards)
 }
@@ -132,25 +177,41 @@ func init() {
 // external timeseries database.
 type StorageClient interface {
 	// Store stores the given samples in the remote storage.
-	Store(context.Context, *prompb.WriteRequest) error
+	Store(context.Context, []byte) error
 	// Name identifies the remote storage implementation.
 	Name() string
 }
 
 // QueueManager manages a queue of samples to be sent to the Storage
-// indicated by the provided StorageClient.
+// indicated by the provided StorageClient. Implements writeTo interface
+// used by WAL Watcher.
 type QueueManager struct {
 	logger log.Logger
 
-	flushDeadline  time.Duration
-	cfg            config.QueueConfig
-	externalLabels model.LabelSet
-	relabelConfigs []*pkgrelabel.Config
-	client         StorageClient
-	queueName      string
-	logLimiter     *rate.Limiter
+	flushDeadline              time.Duration
+	cfg                        config.QueueConfig
+	externalLabels             model.LabelSet
+	relabelConfigs             []*pkgrelabel.Config
+	client                     StorageClient
+	queueName                  string
+	logLimiter                 *rate.Limiter
+	watcher                    *WALWatcher
+	lastSendTimestampMetric    prometheus.Gauge
+	highestSentTimestampMetric prometheus.Gauge
+	pendingSamplesMetric       prometheus.Gauge
+	enqueueRetriesMetric       prometheus.Counter
+
+	lastSendTimestamp    int64
+	highestSentTimestamp int64
+	timestampLock        sync.Mutex
+
+	highestTimestampIn *int64 // highest timestamp of any sample ingested by remote storage via scrape (Appender)
+
+	seriesMtx            sync.Mutex
+	seriesLabels         map[uint64][]prompb.Label
+	seriesSegmentIndexes map[uint64]int
+	droppedSeries        map[uint64]struct{}
 
-	shardsMtx   sync.RWMutex
 	shards      *shards
 	numShards   int
 	reshardChan chan int
@@ -162,7 +223,7 @@ type QueueManager struct {
 }
 
 // NewQueueManager builds a new QueueManager.
-func NewQueueManager(logger log.Logger, cfg config.QueueConfig, externalLabels model.LabelSet, relabelConfigs []*pkgrelabel.Config, client StorageClient, flushDeadline time.Duration) *QueueManager {
+func NewQueueManager(logger log.Logger, walDir string, samplesIn *ewmaRate, highestTimestampIn *int64, cfg config.QueueConfig, externalLabels model.LabelSet, relabelConfigs []*pkgrelabel.Config, client StorageClient, flushDeadline time.Duration, startTime int64) *QueueManager {
 	if logger == nil {
 		logger = log.NewNopLogger()
 	} else {
@@ -177,16 +238,29 @@ func NewQueueManager(logger log.Logger, cfg config.QueueConfig, externalLabels m
 		client:         client,
 		queueName:      client.Name(),
 
+		highestTimestampIn: highestTimestampIn,
+
+		seriesLabels:         make(map[uint64][]prompb.Label),
+		seriesSegmentIndexes: make(map[uint64]int),
+		droppedSeries:        make(map[uint64]struct{}),
+
 		logLimiter:  rate.NewLimiter(logRateLimit, logBurst),
 		numShards:   cfg.MinShards,
 		reshardChan: make(chan int),
 		quit:        make(chan struct{}),
 
-		samplesIn:          newEWMARate(ewmaWeight, shardUpdateDuration),
+		samplesIn:          samplesIn,
 		samplesOut:         newEWMARate(ewmaWeight, shardUpdateDuration),
 		samplesOutDuration: newEWMARate(ewmaWeight, shardUpdateDuration),
 	}
-	t.shards = t.newShards(t.numShards)
+
+	t.lastSendTimestampMetric = queueLastSendTimestamp.WithLabelValues(t.queueName)
+	t.highestSentTimestampMetric = queueHighestSentTimestamp.WithLabelValues(t.queueName)
+	t.pendingSamplesMetric = queuePendingSamples.WithLabelValues(t.queueName)
+	t.enqueueRetriesMetric = enqueueRetriesTotal.WithLabelValues(t.queueName)
+	t.watcher = NewWALWatcher(logger, client.Name(), t, walDir, startTime)
+	t.shards = t.newShards()
+
 	numShards.WithLabelValues(t.queueName).Set(float64(t.numShards))
 	shardCapacity.WithLabelValues(t.queueName).Set(float64(t.cfg.Capacity))
 
@@ -195,76 +269,144 @@ func NewQueueManager(logger log.Logger, cfg config.QueueConfig, externalLabels m
 	succeededSamplesTotal.WithLabelValues(t.queueName)
 	failedSamplesTotal.WithLabelValues(t.queueName)
 	droppedSamplesTotal.WithLabelValues(t.queueName)
+	retriedSamplesTotal.WithLabelValues(t.queueName)
+	// Reset pending samples metric to 0.
+	t.pendingSamplesMetric.Set(0)
 
 	return t
 }
 
-// Append queues a sample to be sent to the remote storage. It drops the
-// sample on the floor if the queue is full.
-// Always returns nil.
-func (t *QueueManager) Append(s *model.Sample) error {
-	snew := *s
-	snew.Metric = s.Metric.Clone()
+// Append queues a sample to be sent to the remote storage. Blocks until all samples are
+// enqueued on their shards or a shutdown signal is received.
+func (t *QueueManager) Append(s []tsdb.RefSample) bool {
+	type enqueuable struct {
+		ts  prompb.TimeSeries
+		ref uint64
+	}
 
-	for ln, lv := range t.externalLabels {
-		if _, ok := s.Metric[ln]; !ok {
-			snew.Metric[ln] = lv
+	tempSamples := make([]enqueuable, 0, len(s))
+	t.seriesMtx.Lock()
+	for _, sample := range s {
+		// If we have no labels for the series, due to relabelling or otherwise, don't send the sample.
+		if _, ok := t.seriesLabels[sample.Ref]; !ok {
+			droppedSamplesTotal.WithLabelValues(t.queueName).Inc()
+			if _, ok := t.droppedSeries[sample.Ref]; !ok && t.logLimiter.Allow() {
+				level.Info(t.logger).Log("msg", "dropped sample for series that was not explicitly dropped via relabelling", "ref", sample.Ref)
+			}
+			continue
+		}
+		tempSamples = append(tempSamples, enqueuable{
+			ts: prompb.TimeSeries{
+				Labels: t.seriesLabels[sample.Ref],
+				Samples: []prompb.Sample{
+					prompb.Sample{
+						Value:     float64(sample.V),
+						Timestamp: sample.T,
+					},
+				},
+			},
+			ref: sample.Ref,
+		})
+	}
+	t.seriesMtx.Unlock()
+
+outer:
+	for _, sample := range tempSamples {
+		// This will only loop if the queues are being resharded.
+		backoff := t.cfg.MinBackoff
+		for {
+			select {
+			case <-t.quit:
+				return false
+			default:
+			}
+
+			if t.shards.enqueue(sample.ref, sample.ts) {
+				continue outer
+			}
+
+			t.enqueueRetriesMetric.Inc()
+			time.Sleep(time.Duration(backoff))
+			backoff = backoff * 2
+			if backoff > t.cfg.MaxBackoff {
+				backoff = t.cfg.MaxBackoff
+			}
 		}
 	}
-
-	snew.Metric = model.Metric(
-		relabel.Process(model.LabelSet(snew.Metric), t.relabelConfigs...))
-
-	if snew.Metric == nil {
-		return nil
-	}
-
-	t.shardsMtx.RLock()
-	enqueued := t.shards.enqueue(&snew)
-	t.shardsMtx.RUnlock()
-
-	if enqueued {
-		queueLength.WithLabelValues(t.queueName).Inc()
-	} else {
-		droppedSamplesTotal.WithLabelValues(t.queueName).Inc()
-		if t.logLimiter.Allow() {
-			level.Warn(t.logger).Log("msg", "Remote storage queue full, discarding sample. Multiple subsequent messages of this kind may be suppressed.")
-		}
-	}
-	return nil
-}
-
-// NeedsThrottling implements storage.SampleAppender. It will always return
-// false as a remote storage drops samples on the floor if backlogging instead
-// of asking for throttling.
-func (*QueueManager) NeedsThrottling() bool {
-	return false
+	return true
 }
 
 // Start the queue manager sending samples to the remote storage.
 // Does not block.
 func (t *QueueManager) Start() {
+	t.shards.start(t.numShards)
+	t.watcher.Start()
+
 	t.wg.Add(2)
 	go t.updateShardsLoop()
 	go t.reshardLoop()
-
-	t.shardsMtx.Lock()
-	defer t.shardsMtx.Unlock()
-	t.shards.start()
 }
 
 // Stop stops sending samples to the remote storage and waits for pending
 // sends to complete.
 func (t *QueueManager) Stop() {
 	level.Info(t.logger).Log("msg", "Stopping remote storage...")
+	defer level.Info(t.logger).Log("msg", "Remote storage stopped.")
+
 	close(t.quit)
+	t.shards.stop()
+	t.watcher.Stop()
 	t.wg.Wait()
+}
 
-	t.shardsMtx.Lock()
-	defer t.shardsMtx.Unlock()
-	t.shards.stop(t.flushDeadline)
+// StoreSeries keeps track of which series we know about for lookups when sending samples to remote.
+func (t *QueueManager) StoreSeries(series []tsdb.RefSeries, index int) {
+	temp := make(map[uint64][]prompb.Label, len(series))
+	for _, s := range series {
+		ls := make(model.LabelSet, len(s.Labels))
+		for _, label := range s.Labels {
+			ls[model.LabelName(label.Name)] = model.LabelValue(label.Value)
+		}
+		t.processExternalLabels(ls)
+		rl := relabel.Process(ls, t.relabelConfigs...)
+		if len(rl) == 0 {
+			t.droppedSeries[s.Ref] = struct{}{}
+			continue
+		}
+		temp[s.Ref] = labelsetToLabelsProto(rl)
+	}
 
-	level.Info(t.logger).Log("msg", "Remote storage stopped.")
+	t.seriesMtx.Lock()
+	defer t.seriesMtx.Unlock()
+	for ref, labels := range temp {
+		t.seriesLabels[ref] = labels
+		t.seriesSegmentIndexes[ref] = index
+	}
+}
+
+// SeriesReset is used when reading a checkpoint. WAL Watcher should have
+// stored series records with the checkpoints index number, so we can now
+// delete any ref ID's lower than that # from the two maps.
+func (t *QueueManager) SeriesReset(index int) {
+	t.seriesMtx.Lock()
+	defer t.seriesMtx.Unlock()
+
+	// Check for series that are in segments older than the checkpoint
+	// that were not also present in the checkpoint.
+	for k, v := range t.seriesSegmentIndexes {
+		if v < index {
+			delete(t.seriesLabels, k)
+			delete(t.seriesSegmentIndexes, k)
+		}
+	}
+}
+
+func (t *QueueManager) processExternalLabels(ls model.LabelSet) {
+	for ln, lv := range t.externalLabels {
+		if _, ok := ls[ln]; !ok {
+			ls[ln] = lv
+		}
+	}
 }
 
 func (t *QueueManager) updateShardsLoop() {
@@ -275,6 +417,12 @@ func (t *QueueManager) updateShardsLoop() {
 	for {
 		select {
 		case <-ticker.C:
+			now := time.Now().Unix()
+			threshold := int64(time.Duration(2 * t.cfg.BatchSendDeadline).Seconds())
+			if now-t.lastSendTimestamp > threshold {
+				level.Debug(t.logger).Log("msg", "Skipping resharding, last successful send was beyond threshold")
+				continue
+			}
 			t.calculateDesiredShards()
 		case <-t.quit:
 			return
@@ -351,107 +499,150 @@ func (t *QueueManager) reshardLoop() {
 	for {
 		select {
 		case numShards := <-t.reshardChan:
-			t.reshard(numShards)
+			// We start the newShards after we have stopped (the therefore completely
+			// flushed) the oldShards, to guarantee we only every deliver samples in
+			// order.
+			t.shards.stop()
+			t.shards.start(numShards)
 		case <-t.quit:
 			return
 		}
 	}
 }
 
-func (t *QueueManager) reshard(n int) {
-	numShards.WithLabelValues(t.queueName).Set(float64(n))
-
-	t.shardsMtx.Lock()
-	newShards := t.newShards(n)
-	oldShards := t.shards
-	t.shards = newShards
-	t.shardsMtx.Unlock()
-
-	oldShards.stop(t.flushDeadline)
-
-	// We start the newShards after we have stopped (the therefore completely
-	// flushed) the oldShards, to guarantee we only every deliver samples in
-	// order.
-	newShards.start()
-}
-
-type shards struct {
-	qm      *QueueManager
-	queues  []chan *model.Sample
-	done    chan struct{}
-	running int32
-	ctx     context.Context
-	cancel  context.CancelFunc
-}
-
-func (t *QueueManager) newShards(numShards int) *shards {
-	queues := make([]chan *model.Sample, numShards)
-	for i := 0; i < numShards; i++ {
-		queues[i] = make(chan *model.Sample, t.cfg.Capacity)
-	}
-	ctx, cancel := context.WithCancel(context.Background())
+func (t *QueueManager) newShards() *shards {
 	s := &shards{
-		qm:      t,
-		queues:  queues,
-		done:    make(chan struct{}),
-		running: int32(numShards),
-		ctx:     ctx,
-		cancel:  cancel,
+		qm:   t,
+		done: make(chan struct{}),
 	}
 	return s
 }
 
-func (s *shards) start() {
-	for i := 0; i < len(s.queues); i++ {
-		go s.runShard(i)
+// Check and set highestSentTimestamp
+func (t *QueueManager) setHighestSentTimestamp(highest int64) {
+	t.timestampLock.Lock()
+	defer t.timestampLock.Unlock()
+	if highest > t.highestSentTimestamp {
+		t.highestSentTimestamp = highest
+		t.highestSentTimestampMetric.Set(float64(t.highestSentTimestamp))
 	}
 }
 
-func (s *shards) stop(deadline time.Duration) {
-	// Attempt a clean shutdown.
-	for _, shard := range s.queues {
-		close(shard)
+func (t *QueueManager) setLastSendTimestamp(now time.Time) {
+	t.timestampLock.Lock()
+	defer t.timestampLock.Unlock()
+	t.lastSendTimestampMetric.Set(float64(now.UnixNano()) / 1e9)
+	t.lastSendTimestamp = now.Unix()
+}
+
+type shards struct {
+	mtx sync.RWMutex // With the WAL, this is never actually contended.
+
+	qm     *QueueManager
+	queues []chan prompb.TimeSeries
+
+	// Emulate a wait group with a channel and an atomic int, as you
+	// cannot select on a wait group.
+	done    chan struct{}
+	running int32
+
+	// Soft shutdown context will prevent new enqueues and deadlocks.
+	softShutdown chan struct{}
+
+	// Hard shutdown context is used to terminate outgoing HTTP connections
+	// after giving them a chance to terminate.
+	hardShutdown context.CancelFunc
+}
+
+// start the shards; must be called before any call to enqueue.
+func (s *shards) start(n int) {
+	s.mtx.Lock()
+	defer s.mtx.Unlock()
+
+	newQueues := make([]chan prompb.TimeSeries, n)
+	for i := 0; i < n; i++ {
+		newQueues[i] = make(chan prompb.TimeSeries, s.qm.cfg.Capacity)
+	}
+
+	s.queues = newQueues
+
+	var hardShutdownCtx context.Context
+	hardShutdownCtx, s.hardShutdown = context.WithCancel(context.Background())
+	s.softShutdown = make(chan struct{})
+	s.running = int32(n)
+	s.done = make(chan struct{})
+	for i := 0; i < n; i++ {
+		go s.runShard(hardShutdownCtx, i, newQueues[i])
+	}
+	numShards.WithLabelValues(s.qm.queueName).Set(float64(n))
+}
+
+// stop the shards; subsequent call to enqueue will return false.
+func (s *shards) stop() {
+	// Attempt a clean shutdown, but only wait flushDeadline for all the shards
+	// to cleanly exit.  As we're doing RPCs, enqueue can block indefinately.
+	// We must be able so call stop concurrently, hence we can only take the
+	// RLock here.
+	s.mtx.RLock()
+	close(s.softShutdown)
+	s.mtx.RUnlock()
+
+	// Enqueue should now be unblocked, so we can take the write lock.  This
+	// also ensures we don't race with writes to the queues, and get a panic:
+	// send on closed channel.
+	s.mtx.Lock()
+	defer s.mtx.Unlock()
+	for _, queue := range s.queues {
+		close(queue)
 	}
 	select {
 	case <-s.done:
 		return
-	case <-time.After(deadline):
+	case <-time.After(s.qm.flushDeadline):
 		level.Error(s.qm.logger).Log("msg", "Failed to flush all samples on shutdown")
 	}
 
 	// Force an unclean shutdown.
-	s.cancel()
+	s.hardShutdown()
 	<-s.done
 }
 
-func (s *shards) enqueue(sample *model.Sample) bool {
-	s.qm.samplesIn.incr(1)
-
-	fp := sample.Metric.FastFingerprint()
-	shard := uint64(fp) % uint64(len(s.queues))
+// enqueue a sample.  If we are currently in the process of shutting down or resharding,
+// will return false; in this case, you should back off and retry.
+func (s *shards) enqueue(ref uint64, sample prompb.TimeSeries) bool {
+	s.mtx.RLock()
+	defer s.mtx.RUnlock()
 
 	select {
+	case <-s.softShutdown:
+		return false
+	default:
+	}
+
+	shard := uint64(ref) % uint64(len(s.queues))
+	select {
+	case <-s.softShutdown:
+		return false
 	case s.queues[shard] <- sample:
 		return true
-	default:
-		return false
 	}
 }
 
-func (s *shards) runShard(i int) {
+func (s *shards) runShard(ctx context.Context, i int, queue chan prompb.TimeSeries) {
 	defer func() {
 		if atomic.AddInt32(&s.running, -1) == 0 {
 			close(s.done)
 		}
 	}()
 
-	queue := s.queues[i]
+	shardNum := strconv.Itoa(i)
 
 	// Send batches of at most MaxSamplesPerSend samples to the remote storage.
 	// If we have fewer samples than that, flush them out after a deadline
 	// anyways.
-	pendingSamples := model.Samples{}
+	pendingSamples := []prompb.TimeSeries{}
 
+	max := s.qm.cfg.MaxSamplesPerSend
 	timer := time.NewTimer(time.Duration(s.qm.cfg.BatchSendDeadline))
 	stop := func() {
 		if !timer.Stop() {
@@ -465,25 +656,29 @@ func (s *shards) runShard(i int) {
 
 	for {
 		select {
-		case <-s.ctx.Done():
+		case <-ctx.Done():
 			return
 
 		case sample, ok := <-queue:
 			if !ok {
 				if len(pendingSamples) > 0 {
 					level.Debug(s.qm.logger).Log("msg", "Flushing samples to remote storage...", "count", len(pendingSamples))
-					s.sendSamples(pendingSamples)
+					s.sendSamples(ctx, pendingSamples)
 					level.Debug(s.qm.logger).Log("msg", "Done flushing.")
 				}
 				return
 			}
 
-			queueLength.WithLabelValues(s.qm.queueName).Dec()
+			// Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff)
+			// retries endlessly, so once we reach > 100 samples, if we can never send to the endpoint we'll
+			// stop reading from the queue (which has a size of 10).
 			pendingSamples = append(pendingSamples, sample)
+			s.qm.pendingSamplesMetric.Inc()
 
-			if len(pendingSamples) >= s.qm.cfg.MaxSamplesPerSend {
-				s.sendSamples(pendingSamples[:s.qm.cfg.MaxSamplesPerSend])
-				pendingSamples = pendingSamples[s.qm.cfg.MaxSamplesPerSend:]
+			if len(pendingSamples) >= max {
+				s.sendSamples(ctx, pendingSamples[:max])
+				pendingSamples = pendingSamples[max:]
+				s.qm.pendingSamplesMetric.Sub(float64(max))
 
 				stop()
 				timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline))
@@ -491,17 +686,24 @@ func (s *shards) runShard(i int) {
 
 		case <-timer.C:
 			if len(pendingSamples) > 0 {
-				s.sendSamples(pendingSamples)
+				level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending samples", "samples", len(pendingSamples), "shard", shardNum)
+				n := len(pendingSamples)
+				s.sendSamples(ctx, pendingSamples)
 				pendingSamples = pendingSamples[:0]
+				s.qm.pendingSamplesMetric.Sub(float64(n))
 			}
 			timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline))
 		}
 	}
 }
 
-func (s *shards) sendSamples(samples model.Samples) {
+func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries) {
 	begin := time.Now()
-	s.sendSamplesWithBackoff(samples)
+	err := s.sendSamplesWithBackoff(ctx, samples)
+	if err != nil && s.qm.logLimiter.Allow() {
+		level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", len(samples), "err", err)
+		failedSamplesTotal.WithLabelValues(s.qm.queueName).Add(float64(len(samples)))
+	}
 
 	// These counters are used to calculate the dynamic sharding, and as such
 	// should be maintained irrespective of success or failure.
@@ -510,30 +712,67 @@ func (s *shards) sendSamples(samples model.Samples) {
 }
 
 // sendSamples to the remote storage with backoff for recoverable errors.
-func (s *shards) sendSamplesWithBackoff(samples model.Samples) {
+func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries) error {
 	backoff := s.qm.cfg.MinBackoff
-	req := ToWriteRequest(samples)
-
-	for retries := s.qm.cfg.MaxRetries; retries > 0; retries-- {
+	req, highest, err := buildWriteRequest(samples)
+	// Failing to build the write request is non-recoverable, since it will
+	// only error if marshaling the proto to bytes fails.
+	if err != nil {
+		return err
+	}
+	for {
+		select {
+		case <-ctx.Done():
+			return ctx.Err()
+		default:
+		}
 		begin := time.Now()
-		err := s.qm.client.Store(s.ctx, req)
+		err := s.qm.client.Store(ctx, req)
 
 		sentBatchDuration.WithLabelValues(s.qm.queueName).Observe(time.Since(begin).Seconds())
+
 		if err == nil {
 			succeededSamplesTotal.WithLabelValues(s.qm.queueName).Add(float64(len(samples)))
-			return
+			now := time.Now()
+			s.qm.setLastSendTimestamp(now)
+			s.qm.setHighestSentTimestamp(highest)
+			return nil
 		}
 
-		level.Warn(s.qm.logger).Log("msg", "Error sending samples to remote storage", "count", len(samples), "err", err)
 		if _, ok := err.(recoverableError); !ok {
-			break
+			return err
 		}
+		retriedSamplesTotal.WithLabelValues(s.qm.queueName).Add(float64(len(samples)))
+
+		if s.qm.logLimiter.Allow() {
+			level.Error(s.qm.logger).Log("err", err)
+		}
+
 		time.Sleep(time.Duration(backoff))
 		backoff = backoff * 2
 		if backoff > s.qm.cfg.MaxBackoff {
 			backoff = s.qm.cfg.MaxBackoff
 		}
 	}
-
-	failedSamplesTotal.WithLabelValues(s.qm.queueName).Add(float64(len(samples)))
+}
+
+func buildWriteRequest(samples []prompb.TimeSeries) ([]byte, int64, error) {
+	var highest int64
+	for _, ts := range samples {
+		// At the moment we only ever append a TimeSeries with a single sample in it.
+		if ts.Samples[0].Timestamp > highest {
+			highest = ts.Samples[0].Timestamp
+		}
+	}
+	req := &prompb.WriteRequest{
+		Timeseries: samples,
+	}
+
+	data, err := proto.Marshal(req)
+	if err != nil {
+		return nil, highest, err
+	}
+
+	compressed := snappy.Encode(nil, data)
+	return compressed, highest, nil
 }
diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go
index d70c7789aa..789875d6d8 100644
--- a/storage/remote/queue_manager_test.go
+++ b/storage/remote/queue_manager_test.go
@@ -22,13 +22,229 @@ import (
 	"testing"
 	"time"
 
+	"github.com/gogo/protobuf/proto"
+	"github.com/golang/snappy"
+	"github.com/stretchr/testify/require"
+
 	"github.com/prometheus/common/model"
 	"github.com/prometheus/prometheus/config"
 	"github.com/prometheus/prometheus/prompb"
+	"github.com/prometheus/prometheus/util/testutil"
+	"github.com/prometheus/tsdb"
+	"github.com/prometheus/tsdb/labels"
 )
 
 const defaultFlushDeadline = 1 * time.Minute
 
+func TestSampleDelivery(t *testing.T) {
+	// Let's create an even number of send batches so we don't run into the
+	// batch timeout case.
+	n := config.DefaultQueueConfig.Capacity * 2
+	samples, series := createTimeseries(n)
+
+	c := NewTestStorageClient()
+	c.expectSamples(samples[:len(samples)/2], series)
+
+	cfg := config.DefaultQueueConfig
+	cfg.BatchSendDeadline = model.Duration(100 * time.Millisecond)
+	cfg.MaxShards = 1
+	var temp int64
+	m := NewQueueManager(nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), &temp, cfg, nil, nil, c, defaultFlushDeadline, 0)
+	m.seriesLabels = refSeriesToLabelsProto(series)
+
+	// These should be received by the client.
+	m.Start()
+	m.Append(samples[:len(samples)/2])
+	defer m.Stop()
+
+	c.waitForExpectedSamples(t)
+	m.Append(samples[len(samples)/2:])
+	c.expectSamples(samples[len(samples)/2:], series)
+	c.waitForExpectedSamples(t)
+}
+
+func TestSampleDeliveryTimeout(t *testing.T) {
+	// Let's send one less sample than batch size, and wait the timeout duration
+	n := 9
+	samples, series := createTimeseries(n)
+	c := NewTestStorageClient()
+
+	cfg := config.DefaultQueueConfig
+	cfg.MaxShards = 1
+	cfg.BatchSendDeadline = model.Duration(100 * time.Millisecond)
+	var temp int64
+	m := NewQueueManager(nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), &temp, cfg, nil, nil, c, defaultFlushDeadline, 0)
+	m.seriesLabels = refSeriesToLabelsProto(series)
+	m.Start()
+	defer m.Stop()
+
+	// Send the samples twice, waiting for the samples in the meantime.
+	c.expectSamples(samples, series)
+	m.Append(samples)
+	c.waitForExpectedSamples(t)
+
+	c.expectSamples(samples, series)
+	m.Append(samples)
+	c.waitForExpectedSamples(t)
+}
+
+func TestSampleDeliveryOrder(t *testing.T) {
+	ts := 10
+	n := config.DefaultQueueConfig.MaxSamplesPerSend * ts
+	samples := make([]tsdb.RefSample, 0, n)
+	series := make([]tsdb.RefSeries, 0, n)
+	for i := 0; i < n; i++ {
+		name := fmt.Sprintf("test_metric_%d", i%ts)
+		samples = append(samples, tsdb.RefSample{
+			Ref: uint64(i),
+			T:   int64(i),
+			V:   float64(i),
+		})
+		series = append(series, tsdb.RefSeries{
+			Ref:    uint64(i),
+			Labels: labels.Labels{labels.Label{Name: "__name__", Value: name}},
+		})
+	}
+
+	c := NewTestStorageClient()
+	c.expectSamples(samples, series)
+	var temp int64
+	m := NewQueueManager(nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), &temp, config.DefaultQueueConfig, nil, nil, c, defaultFlushDeadline, 0)
+	m.seriesLabels = refSeriesToLabelsProto(series)
+
+	m.Start()
+	defer m.Stop()
+	// These should be received by the client.
+	m.Append(samples)
+	c.waitForExpectedSamples(t)
+}
+
+func TestShutdown(t *testing.T) {
+	deadline := 5 * time.Second
+	c := NewTestBlockedStorageClient()
+
+	var temp int64
+	m := NewQueueManager(nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), &temp, config.DefaultQueueConfig, nil, nil, c, deadline, 0)
+	samples, series := createTimeseries(2 * config.DefaultQueueConfig.MaxSamplesPerSend)
+	m.seriesLabels = refSeriesToLabelsProto(series)
+	m.Start()
+
+	// Append blocks to guarantee delivery, so we do it in the background.
+	go func() {
+		m.Append(samples)
+	}()
+	time.Sleep(1 * time.Second)
+
+	// Test to ensure that Stop doesn't block.
+	start := time.Now()
+	m.Stop()
+	// The samples will never be delivered, so duration should
+	// be at least equal to deadline, otherwise the flush deadline
+	// was not respected.
+	duration := time.Since(start)
+	if duration > time.Duration(deadline+(deadline/10)) {
+		t.Errorf("Took too long to shutdown: %s > %s", duration, deadline)
+	}
+	if duration < time.Duration(deadline) {
+		t.Errorf("Shutdown occurred before flush deadline: %s < %s", duration, deadline)
+	}
+}
+
+func TestSeriesReset(t *testing.T) {
+	c := NewTestBlockedStorageClient()
+	deadline := 5 * time.Second
+	var temp int64
+	numSegments := 4
+	numSeries := 25
+
+	m := NewQueueManager(nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), &temp, config.DefaultQueueConfig, nil, nil, c, deadline, 0)
+	for i := 0; i < numSegments; i++ {
+		series := []tsdb.RefSeries{}
+		for j := 0; j < numSeries; j++ {
+			series = append(series, tsdb.RefSeries{Ref: uint64((i * 100) + j), Labels: labels.Labels{labels.Label{Name: "a", Value: "a"}}})
+		}
+		m.StoreSeries(series, i)
+	}
+	testutil.Equals(t, numSegments*numSeries, len(m.seriesLabels))
+	m.SeriesReset(2)
+	testutil.Equals(t, numSegments*numSeries/2, len(m.seriesLabels))
+}
+
+func TestReshard(t *testing.T) {
+	size := 10 // Make bigger to find more races.
+	n := config.DefaultQueueConfig.Capacity * size
+	samples, series := createTimeseries(n)
+
+	c := NewTestStorageClient()
+	c.expectSamples(samples, series)
+
+	cfg := config.DefaultQueueConfig
+	cfg.MaxShards = 1
+
+	var temp int64
+	m := NewQueueManager(nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), &temp, cfg, nil, nil, c, defaultFlushDeadline, 0)
+	m.seriesLabels = refSeriesToLabelsProto(series)
+
+	m.Start()
+	defer m.Stop()
+
+	go func() {
+		for i := 0; i < len(samples); i += config.DefaultQueueConfig.Capacity {
+			sent := m.Append(samples[i : i+config.DefaultQueueConfig.Capacity])
+			require.True(t, sent)
+			time.Sleep(100 * time.Millisecond)
+		}
+	}()
+
+	for i := 1; i < len(samples)/config.DefaultQueueConfig.Capacity; i++ {
+		m.shards.stop()
+		m.shards.start(i)
+		time.Sleep(100 * time.Millisecond)
+	}
+
+	c.waitForExpectedSamples(t)
+}
+
+func createTimeseries(n int) ([]tsdb.RefSample, []tsdb.RefSeries) {
+	samples := make([]tsdb.RefSample, 0, n)
+	series := make([]tsdb.RefSeries, 0, n)
+	for i := 0; i < n; i++ {
+		name := fmt.Sprintf("test_metric_%d", i)
+		samples = append(samples, tsdb.RefSample{
+			Ref: uint64(i),
+			T:   int64(i),
+			V:   float64(i),
+		})
+		series = append(series, tsdb.RefSeries{
+			Ref:    uint64(i),
+			Labels: labels.Labels{labels.Label{Name: "__name__", Value: name}},
+		})
+	}
+	return samples, series
+}
+
+func getSeriesNameFromRef(r tsdb.RefSeries) string {
+	for _, l := range r.Labels {
+		if l.Name == "__name__" {
+			return l.Value
+		}
+	}
+	return ""
+}
+
+func refSeriesToLabelsProto(series []tsdb.RefSeries) map[uint64][]prompb.Label {
+	result := make(map[uint64][]prompb.Label)
+	for _, s := range series {
+		for _, l := range s.Labels {
+			result[s.Ref] = append(result[s.Ref], prompb.Label{
+				Name:  l.Name,
+				Value: l.Value,
+			})
+		}
+	}
+	return result
+}
+
 type TestStorageClient struct {
 	receivedSamples map[string][]prompb.Sample
 	expectedSamples map[string][]prompb.Sample
@@ -43,7 +259,7 @@ func NewTestStorageClient() *TestStorageClient {
 	}
 }
 
-func (c *TestStorageClient) expectSamples(ss model.Samples) {
+func (c *TestStorageClient) expectSamples(ss []tsdb.RefSample, series []tsdb.RefSeries) {
 	c.mtx.Lock()
 	defer c.mtx.Unlock()
 
@@ -51,10 +267,10 @@ func (c *TestStorageClient) expectSamples(ss model.Samples) {
 	c.receivedSamples = map[string][]prompb.Sample{}
 
 	for _, s := range ss {
-		ts := labelProtosToLabels(MetricToLabelProtos(s.Metric)).String()
-		c.expectedSamples[ts] = append(c.expectedSamples[ts], prompb.Sample{
-			Timestamp: int64(s.Timestamp),
-			Value:     float64(s.Value),
+		seriesName := getSeriesNameFromRef(series[s.Ref])
+		c.expectedSamples[seriesName] = append(c.expectedSamples[seriesName], prompb.Sample{
+			Timestamp: s.T,
+			Value:     s.V,
 		})
 	}
 	c.wg.Add(len(ss))
@@ -62,7 +278,6 @@ func (c *TestStorageClient) expectSamples(ss model.Samples) {
 
 func (c *TestStorageClient) waitForExpectedSamples(t *testing.T) {
 	c.wg.Wait()
-
 	c.mtx.Lock()
 	defer c.mtx.Unlock()
 	for ts, expectedSamples := range c.expectedSamples {
@@ -72,15 +287,31 @@ func (c *TestStorageClient) waitForExpectedSamples(t *testing.T) {
 	}
 }
 
-func (c *TestStorageClient) Store(_ context.Context, req *prompb.WriteRequest) error {
+func (c *TestStorageClient) Store(_ context.Context, req []byte) error {
 	c.mtx.Lock()
 	defer c.mtx.Unlock()
+	reqBuf, err := snappy.Decode(nil, req)
+	if err != nil {
+		return err
+	}
+
+	var reqProto prompb.WriteRequest
+	if err := proto.Unmarshal(reqBuf, &reqProto); err != nil {
+		return err
+	}
+
 	count := 0
-	for _, ts := range req.Timeseries {
-		labels := labelProtosToLabels(ts.Labels).String()
+	for _, ts := range reqProto.Timeseries {
+		var seriesName string
+		labels := labelProtosToLabels(ts.Labels)
+		for _, label := range labels {
+			if label.Name == "__name__" {
+				seriesName = label.Value
+			}
+		}
 		for _, sample := range ts.Samples {
 			count++
-			c.receivedSamples[labels] = append(c.receivedSamples[labels], sample)
+			c.receivedSamples[seriesName] = append(c.receivedSamples[seriesName], sample)
 		}
 	}
 	c.wg.Add(-count)
@@ -91,133 +322,21 @@ func (c *TestStorageClient) Name() string {
 	return "teststorageclient"
 }
 
-func TestSampleDelivery(t *testing.T) {
-	// Let's create an even number of send batches so we don't run into the
-	// batch timeout case.
-	n := config.DefaultQueueConfig.Capacity * 2
-
-	samples := make(model.Samples, 0, n)
-	for i := 0; i < n; i++ {
-		name := model.LabelValue(fmt.Sprintf("test_metric_%d", i))
-		samples = append(samples, &model.Sample{
-			Metric: model.Metric{
-				model.MetricNameLabel: name,
-			},
-			Value: model.SampleValue(i),
-		})
-	}
-
-	c := NewTestStorageClient()
-	c.expectSamples(samples[:len(samples)/2])
-
-	cfg := config.DefaultQueueConfig
-	cfg.MaxShards = 1
-	m := NewQueueManager(nil, cfg, nil, nil, c, defaultFlushDeadline)
-
-	// These should be received by the client.
-	for _, s := range samples[:len(samples)/2] {
-		m.Append(s)
-	}
-	// These will be dropped because the queue is full.
-	for _, s := range samples[len(samples)/2:] {
-		m.Append(s)
-	}
-	m.Start()
-	defer m.Stop()
-
-	c.waitForExpectedSamples(t)
-}
-
-func TestSampleDeliveryTimeout(t *testing.T) {
-	// Let's send one less sample than batch size, and wait the timeout duration
-	n := config.DefaultQueueConfig.Capacity - 1
-
-	samples := make(model.Samples, 0, n)
-	for i := 0; i < n; i++ {
-		name := model.LabelValue(fmt.Sprintf("test_metric_%d", i))
-		samples = append(samples, &model.Sample{
-			Metric: model.Metric{
-				model.MetricNameLabel: name,
-			},
-			Value: model.SampleValue(i),
-		})
-	}
-
-	c := NewTestStorageClient()
-
-	cfg := config.DefaultQueueConfig
-	cfg.MaxShards = 1
-	cfg.BatchSendDeadline = model.Duration(100 * time.Millisecond)
-	m := NewQueueManager(nil, cfg, nil, nil, c, defaultFlushDeadline)
-	m.Start()
-	defer m.Stop()
-
-	// Send the samples twice, waiting for the samples in the meantime.
-	c.expectSamples(samples)
-	for _, s := range samples {
-		m.Append(s)
-	}
-	c.waitForExpectedSamples(t)
-
-	c.expectSamples(samples)
-	for _, s := range samples {
-		m.Append(s)
-	}
-	c.waitForExpectedSamples(t)
-}
-
-func TestSampleDeliveryOrder(t *testing.T) {
-	ts := 10
-	n := config.DefaultQueueConfig.MaxSamplesPerSend * ts
-
-	samples := make(model.Samples, 0, n)
-	for i := 0; i < n; i++ {
-		name := model.LabelValue(fmt.Sprintf("test_metric_%d", i%ts))
-		samples = append(samples, &model.Sample{
-			Metric: model.Metric{
-				model.MetricNameLabel: name,
-			},
-			Value:     model.SampleValue(i),
-			Timestamp: model.Time(i),
-		})
-	}
-
-	c := NewTestStorageClient()
-	c.expectSamples(samples)
-	m := NewQueueManager(nil, config.DefaultQueueConfig, nil, nil, c, defaultFlushDeadline)
-
-	// These should be received by the client.
-	for _, s := range samples {
-		m.Append(s)
-	}
-	m.Start()
-	defer m.Stop()
-
-	c.waitForExpectedSamples(t)
-}
-
 // TestBlockingStorageClient is a queue_manager StorageClient which will block
-// on any calls to Store(), until the `block` channel is closed, at which point
-// the `numCalls` property will contain a count of how many times Store() was
-// called.
+// on any calls to Store(), until the request's Context is cancelled, at which
+// point the `numCalls` property will contain a count of how many times Store()
+// was called.
 type TestBlockingStorageClient struct {
 	numCalls uint64
-	block    chan bool
 }
 
 func NewTestBlockedStorageClient() *TestBlockingStorageClient {
-	return &TestBlockingStorageClient{
-		block:    make(chan bool),
-		numCalls: 0,
-	}
+	return &TestBlockingStorageClient{}
 }
 
-func (c *TestBlockingStorageClient) Store(ctx context.Context, _ *prompb.WriteRequest) error {
+func (c *TestBlockingStorageClient) Store(ctx context.Context, _ []byte) error {
 	atomic.AddUint64(&c.numCalls, 1)
-	select {
-	case <-c.block:
-	case <-ctx.Done():
-	}
+	<-ctx.Done()
 	return nil
 }
 
@@ -225,106 +344,6 @@ func (c *TestBlockingStorageClient) NumCalls() uint64 {
 	return atomic.LoadUint64(&c.numCalls)
 }
 
-func (c *TestBlockingStorageClient) unlock() {
-	close(c.block)
-}
-
 func (c *TestBlockingStorageClient) Name() string {
 	return "testblockingstorageclient"
 }
-
-func (t *QueueManager) queueLen() int {
-	t.shardsMtx.Lock()
-	defer t.shardsMtx.Unlock()
-	queueLength := 0
-	for _, shard := range t.shards.queues {
-		queueLength += len(shard)
-	}
-	return queueLength
-}
-
-func TestSpawnNotMoreThanMaxConcurrentSendsGoroutines(t *testing.T) {
-	// Our goal is to fully empty the queue:
-	// `MaxSamplesPerSend*Shards` samples should be consumed by the
-	// per-shard goroutines, and then another `MaxSamplesPerSend`
-	// should be left on the queue.
-	n := config.DefaultQueueConfig.MaxSamplesPerSend * 2
-
-	samples := make(model.Samples, 0, n)
-	for i := 0; i < n; i++ {
-		name := model.LabelValue(fmt.Sprintf("test_metric_%d", i))
-		samples = append(samples, &model.Sample{
-			Metric: model.Metric{
-				model.MetricNameLabel: name,
-			},
-			Value: model.SampleValue(i),
-		})
-	}
-
-	c := NewTestBlockedStorageClient()
-	cfg := config.DefaultQueueConfig
-	cfg.MaxShards = 1
-	cfg.Capacity = n
-	m := NewQueueManager(nil, cfg, nil, nil, c, defaultFlushDeadline)
-
-	m.Start()
-
-	defer func() {
-		c.unlock()
-		m.Stop()
-	}()
-
-	for _, s := range samples {
-		m.Append(s)
-	}
-
-	// Wait until the runShard() loops drain the queue.  If things went right, it
-	// should then immediately block in sendSamples(), but, in case of error,
-	// it would spawn too many goroutines, and thus we'd see more calls to
-	// client.Store()
-	//
-	// The timed wait is maybe non-ideal, but, in order to verify that we're
-	// not spawning too many concurrent goroutines, we have to wait on the
-	// Run() loop to consume a specific number of elements from the
-	// queue... and it doesn't signal that in any obvious way, except by
-	// draining the queue.  We cap the waiting at 1 second -- that should give
-	// plenty of time, and keeps the failure fairly quick if we're not draining
-	// the queue properly.
-	for i := 0; i < 100 && m.queueLen() > 0; i++ {
-		time.Sleep(10 * time.Millisecond)
-	}
-
-	if m.queueLen() != config.DefaultQueueConfig.MaxSamplesPerSend {
-		t.Fatalf("Failed to drain QueueManager queue, %d elements left",
-			m.queueLen(),
-		)
-	}
-
-	numCalls := c.NumCalls()
-	if numCalls != uint64(1) {
-		t.Errorf("Saw %d concurrent sends, expected 1", numCalls)
-	}
-}
-
-func TestShutdown(t *testing.T) {
-	deadline := 10 * time.Second
-	c := NewTestBlockedStorageClient()
-	m := NewQueueManager(nil, config.DefaultQueueConfig, nil, nil, c, deadline)
-	for i := 0; i < config.DefaultQueueConfig.MaxSamplesPerSend; i++ {
-		m.Append(&model.Sample{
-			Metric: model.Metric{
-				model.MetricNameLabel: model.LabelValue(fmt.Sprintf("test_metric_%d", i)),
-			},
-			Value:     model.SampleValue(i),
-			Timestamp: model.Time(i),
-		})
-	}
-	m.Start()
-
-	start := time.Now()
-	m.Stop()
-	duration := time.Since(start)
-	if duration > deadline+(deadline/10) {
-		t.Errorf("Took too long to shutdown: %s > %s", duration, deadline)
-	}
-}
diff --git a/storage/remote/storage.go b/storage/remote/storage.go
index 776edc07f0..428773c053 100644
--- a/storage/remote/storage.go
+++ b/storage/remote/storage.go
@@ -19,9 +19,12 @@ import (
 	"time"
 
 	"github.com/go-kit/kit/log"
+
+	"github.com/prometheus/client_golang/prometheus"
 	"github.com/prometheus/common/model"
 	"github.com/prometheus/prometheus/config"
 	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/pkg/timestamp"
 	"github.com/prometheus/prometheus/storage"
 )
 
@@ -35,7 +38,13 @@ type Storage struct {
 	mtx    sync.RWMutex
 
 	// For writes
-	queues []*QueueManager
+	walDir                 string
+	queues                 []*QueueManager
+	samplesIn              *ewmaRate
+	samplesInMetric        prometheus.Counter
+	highestTimestampMtx    sync.Mutex
+	highestTimestamp       int64
+	highestTimestampMetric prometheus.Gauge
 
 	// For reads
 	queryables             []storage.Queryable
@@ -44,15 +53,30 @@ type Storage struct {
 }
 
 // NewStorage returns a remote.Storage.
-func NewStorage(l log.Logger, stCallback startTimeCallback, flushDeadline time.Duration) *Storage {
+func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration) *Storage {
 	if l == nil {
 		l = log.NewNopLogger()
 	}
-	return &Storage{
+	shardUpdateDuration := 10 * time.Second
+	s := &Storage{
 		logger:                 l,
 		localStartTimeCallback: stCallback,
 		flushDeadline:          flushDeadline,
+		walDir:                 walDir,
+		// queues:                 make(map[*QueueManager]struct{}),
+		samplesIn: newEWMARate(ewmaWeight, shardUpdateDuration),
+		samplesInMetric: prometheus.NewCounter(prometheus.CounterOpts{
+			Name: "prometheus_remote_storage_samples_in_total",
+			Help: "Samples in to remote storage, compare to samples out for queue managers.",
+		}),
+		highestTimestampMetric: prometheus.NewGauge(prometheus.GaugeOpts{
+			Name: "prometheus_remote_storage_highest_timestamp_in",
+			Help: "Highest timestamp that has come into the remote storage via the Appender interface.",
+		}),
 	}
+	reg.MustRegister(s.samplesInMetric)
+	reg.MustRegister(s.highestTimestampMetric)
+	return s
 }
 
 // ApplyConfig updates the state as the new config requires.
@@ -61,7 +85,6 @@ func (s *Storage) ApplyConfig(conf *config.Config) error {
 	defer s.mtx.Unlock()
 
 	// Update write queues
-
 	newQueues := []*QueueManager{}
 	// TODO: we should only stop & recreate queues which have changes,
 	// as this can be quite disruptive.
@@ -74,13 +97,20 @@ func (s *Storage) ApplyConfig(conf *config.Config) error {
 		if err != nil {
 			return err
 		}
+		// Convert to int64 for comparison with timestamps from samples
+		// we will eventually read from the WAL on startup.
+		startTime := timestamp.FromTime(time.Now())
 		newQueues = append(newQueues, NewQueueManager(
 			s.logger,
+			s.walDir,
+			s.samplesIn,
+			&s.highestTimestamp,
 			rwConf.QueueConfig,
 			conf.GlobalConfig.ExternalLabels,
 			rwConf.WriteRelabelConfigs,
 			c,
 			s.flushDeadline,
+			startTime,
 		))
 	}
 
diff --git a/storage/remote/wal_watcher.go b/storage/remote/wal_watcher.go
new file mode 100644
index 0000000000..b3e93dcb46
--- /dev/null
+++ b/storage/remote/wal_watcher.go
@@ -0,0 +1,521 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+	"fmt"
+	"io"
+	"math"
+	"os"
+	"path"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/go-kit/kit/log"
+	"github.com/go-kit/kit/log/level"
+	"github.com/pkg/errors"
+
+	"github.com/prometheus/client_golang/prometheus"
+	"github.com/prometheus/tsdb"
+	"github.com/prometheus/tsdb/fileutil"
+	"github.com/prometheus/tsdb/wal"
+)
+
+const (
+	readPeriod         = 10 * time.Millisecond
+	checkpointPeriod   = 5 * time.Second
+	segmentCheckPeriod = 100 * time.Millisecond
+)
+
+var (
+	watcherSamplesRecordsRead = prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Namespace: "prometheus",
+			Subsystem: "wal_watcher",
+			Name:      "samples_records_read_total",
+			Help:      "Number of samples records read by the WAL watcher from the WAL.",
+		},
+		[]string{queue},
+	)
+	watcherSeriesRecordsRead = prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Namespace: "prometheus",
+			Subsystem: "wal_watcher",
+			Name:      "series_records_read_total",
+			Help:      "Number of series records read by the WAL watcher from the WAL.",
+		},
+		[]string{queue},
+	)
+	watcherTombstoneRecordsRead = prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Namespace: "prometheus",
+			Subsystem: "wal_watcher",
+			Name:      "tombstone_records_read_total",
+			Help:      "Number of tombstone records read by the WAL watcher from the WAL.",
+		},
+		[]string{queue},
+	)
+	watcherInvalidRecordsRead = prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Namespace: "prometheus",
+			Subsystem: "wal_watcher",
+			Name:      "invalid_records_read_total",
+			Help:      "Number of invalid records read by the WAL watcher from the WAL.",
+		},
+		[]string{queue},
+	)
+	watcherUnknownTypeRecordsRead = prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Namespace: "prometheus",
+			Subsystem: "wal_watcher",
+			Name:      "unknown_records_read_total",
+			Help:      "Number of records read by the WAL watcher from the WAL of an unknown record type.",
+		},
+		[]string{queue},
+	)
+	watcherRecordDecodeFails = prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Namespace: "prometheus",
+			Subsystem: "wal_watcher",
+			Name:      "record_decode_failures_total",
+			Help:      "Number of records read by the WAL watcher that resulted in an error when decoding.",
+		},
+		[]string{queue},
+	)
+	watcherSamplesSentPreTailing = prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Namespace: "prometheus",
+			Subsystem: "wal_watcher",
+			Name:      "samples_sent_pre_tailing_total",
+			Help:      "Number of sample records read by the WAL watcher and sent to remote write during replay of existing WAL.",
+		},
+		[]string{queue},
+	)
+	watcherCurrentSegment = prometheus.NewGaugeVec(
+		prometheus.GaugeOpts{
+			Namespace: "prometheus",
+			Subsystem: "wal_watcher",
+			Name:      "current_segment",
+			Help:      "Current segment the WAL watcher is reading records from.",
+		},
+		[]string{queue},
+	)
+)
+
+func init() {
+	prometheus.MustRegister(watcherSamplesRecordsRead)
+	prometheus.MustRegister(watcherSeriesRecordsRead)
+	prometheus.MustRegister(watcherTombstoneRecordsRead)
+	prometheus.MustRegister(watcherInvalidRecordsRead)
+	prometheus.MustRegister(watcherUnknownTypeRecordsRead)
+	prometheus.MustRegister(watcherRecordDecodeFails)
+	prometheus.MustRegister(watcherSamplesSentPreTailing)
+	prometheus.MustRegister(watcherCurrentSegment)
+}
+
+type writeTo interface {
+	Append([]tsdb.RefSample) bool
+	StoreSeries([]tsdb.RefSeries, int)
+	SeriesReset(int)
+}
+
+// WALWatcher watches the TSDB WAL for a given WriteTo.
+type WALWatcher struct {
+	name   string
+	writer writeTo
+	logger log.Logger
+	walDir string
+
+	currentSegment int
+	lastCheckpoint string
+	startTime      int64
+
+	samplesReadMetric       prometheus.Counter
+	seriesReadMetric        prometheus.Counter
+	tombstonesReadMetric    prometheus.Counter
+	invalidReadMetric       prometheus.Counter
+	unknownReadMetric       prometheus.Counter
+	recordDecodeFailsMetric prometheus.Counter
+	samplesSentPreTailing   prometheus.Counter
+	currentSegmentMetric    prometheus.Gauge
+
+	quit chan struct{}
+}
+
+// NewWALWatcher creates a new WAL watcher for a given WriteTo.
+func NewWALWatcher(logger log.Logger, name string, writer writeTo, walDir string, startTime int64) *WALWatcher {
+	if logger == nil {
+		logger = log.NewNopLogger()
+	}
+	w := &WALWatcher{
+		logger:    logger,
+		writer:    writer,
+		walDir:    path.Join(walDir, "wal"),
+		startTime: startTime,
+		name:      name,
+		quit:      make(chan struct{}),
+	}
+
+	w.samplesReadMetric = watcherSamplesRecordsRead.WithLabelValues(w.name)
+	w.seriesReadMetric = watcherSeriesRecordsRead.WithLabelValues(w.name)
+	w.tombstonesReadMetric = watcherTombstoneRecordsRead.WithLabelValues(w.name)
+	w.unknownReadMetric = watcherUnknownTypeRecordsRead.WithLabelValues(w.name)
+	w.invalidReadMetric = watcherInvalidRecordsRead.WithLabelValues(w.name)
+	w.recordDecodeFailsMetric = watcherRecordDecodeFails.WithLabelValues(w.name)
+	w.samplesSentPreTailing = watcherSamplesSentPreTailing.WithLabelValues(w.name)
+	w.currentSegmentMetric = watcherCurrentSegment.WithLabelValues(w.name)
+
+	return w
+}
+
+func (w *WALWatcher) Start() {
+	level.Info(w.logger).Log("msg", "starting WAL watcher", "queue", w.name)
+	go w.runWatcher()
+}
+
+func (w *WALWatcher) Stop() {
+	level.Info(w.logger).Log("msg", "stopping WAL watcher", "queue", w.name)
+	close(w.quit)
+}
+
+func (w *WALWatcher) runWatcher() {
+	// The WAL dir may not exist when Prometheus first starts up.
+	for {
+		if _, err := os.Stat(w.walDir); os.IsNotExist(err) {
+			time.Sleep(time.Second)
+		} else {
+			break
+		}
+	}
+
+	nw, err := wal.New(nil, nil, w.walDir)
+	if err != nil {
+		level.Error(w.logger).Log("err", err)
+		return
+	}
+
+	first, last, err := nw.Segments()
+	if err != nil {
+		level.Error(w.logger).Log("err", err)
+		return
+	}
+
+	if last == -1 {
+		level.Error(w.logger).Log("err", err)
+		return
+	}
+
+	// Backfill from the checkpoint first if it exists.
+	dir, _, err := tsdb.LastCheckpoint(w.walDir)
+	if err != nil && err != tsdb.ErrNotFound {
+		level.Error(w.logger).Log("msg", "error looking for existing checkpoint, some samples may be dropped", "err", errors.Wrap(err, "find last checkpoint"))
+	}
+
+	level.Debug(w.logger).Log("msg", "reading checkpoint", "dir", dir)
+	if err == nil {
+		w.lastCheckpoint = dir
+		err = w.readCheckpoint(dir)
+		if err != nil {
+			level.Error(w.logger).Log("msg", "error reading existing checkpoint, some samples may be dropped", "err", err)
+		}
+	}
+
+	w.currentSegment = first
+	w.currentSegmentMetric.Set(float64(w.currentSegment))
+	segment, err := wal.OpenReadSegment(wal.SegmentName(w.walDir, w.currentSegment))
+	// TODO: callum, is this error really fatal?
+	if err != nil {
+		level.Error(w.logger).Log("err", err)
+		return
+	}
+	reader := wal.NewLiveReader(segment)
+	tail := false
+
+	for {
+		// If we've replayed the existing WAL, start tailing.
+		if w.currentSegment == last {
+			tail = true
+		}
+		if tail {
+			level.Info(w.logger).Log("msg", "watching segment", "segment", w.currentSegment)
+		} else {
+			level.Info(w.logger).Log("msg", "replaying segment", "segment", w.currentSegment)
+		}
+
+		// On start, after reading the existing WAL for series records, we have a pointer to what is the latest segment.
+		// On subsequent calls to this function, currentSegment will have been incremented and we should open that segment.
+		err := w.watch(nw, reader, tail)
+		segment.Close()
+		if err != nil {
+			level.Error(w.logger).Log("msg", "runWatcher is ending", "err", err)
+			return
+		}
+
+		w.currentSegment++
+		w.currentSegmentMetric.Set(float64(w.currentSegment))
+
+		segment, err = wal.OpenReadSegment(wal.SegmentName(w.walDir, w.currentSegment))
+		// TODO: callum, is this error really fatal?
+		if err != nil {
+			level.Error(w.logger).Log("err", err)
+			return
+		}
+		reader = wal.NewLiveReader(segment)
+	}
+}
+
+// Use tail true to indicate that the reader is currently on a segment that is
+// actively being written to. If false, assume it's a full segment and we're
+// replaying it on start to cache the series records.
+func (w *WALWatcher) watch(wl *wal.WAL, reader *wal.LiveReader, tail bool) error {
+
+	readTicker := time.NewTicker(readPeriod)
+	defer readTicker.Stop()
+
+	checkpointTicker := time.NewTicker(checkpointPeriod)
+	defer checkpointTicker.Stop()
+
+	segmentTicker := time.NewTicker(segmentCheckPeriod)
+	defer segmentTicker.Stop()
+	// If we're replaying the segment we need to know the size of the file to know
+	// when to return from watch and move on to the next segment.
+	size := int64(math.MaxInt64)
+	if !tail {
+		segmentTicker.Stop()
+		checkpointTicker.Stop()
+		var err error
+		size, err = getSegmentSize(w.walDir, w.currentSegment)
+		if err != nil {
+			level.Error(w.logger).Log("msg", "error getting segment size", "segment", w.currentSegment)
+			return errors.Wrap(err, "get segment size")
+		}
+	}
+
+	for {
+		select {
+		case <-w.quit:
+			level.Info(w.logger).Log("msg", "quitting WAL watcher watch loop")
+			return errors.New("quit channel")
+
+		case <-checkpointTicker.C:
+			// Periodically check if there is a new checkpoint.
+			// As this is considered an optimisation, we ignore errors during
+			// checkpoint processing.
+
+			dir, _, err := tsdb.LastCheckpoint(w.walDir)
+			if err != nil && err != tsdb.ErrNotFound {
+				level.Error(w.logger).Log("msg", "error getting last checkpoint", "err", err)
+				continue
+			}
+
+			if dir == w.lastCheckpoint {
+				continue
+			}
+
+			level.Info(w.logger).Log("msg", "new checkpoint detected", "last", w.lastCheckpoint, "new", dir)
+
+			d, err := checkpointNum(dir)
+			if err != nil {
+				level.Error(w.logger).Log("msg", "error parsing checkpoint", "err", err)
+				continue
+			}
+
+			if d >= w.currentSegment {
+				level.Info(w.logger).Log("msg", "current segment is behind the checkpoint, skipping reading of checkpoint", "current", fmt.Sprintf("%08d", w.currentSegment), "checkpoint", dir)
+				continue
+			}
+
+			w.lastCheckpoint = dir
+			// This potentially takes a long time, should we run it in another go routine?
+			err = w.readCheckpoint(w.lastCheckpoint)
+			if err != nil {
+				level.Error(w.logger).Log("err", err)
+			}
+			// Clear series with a checkpoint or segment index # lower than the checkpoint we just read.
+			w.writer.SeriesReset(d)
+
+		case <-segmentTicker.C:
+			_, last, err := wl.Segments()
+			if err != nil {
+				return errors.Wrap(err, "segments")
+			}
+
+			// Check if new segments exists.
+			if last <= w.currentSegment {
+				continue
+			}
+
+			if err := w.readSegment(reader); err != nil {
+				// Ignore errors reading to end of segment, as we're going to move to
+				// next segment now.
+				level.Error(w.logger).Log("msg", "error reading to end of segment", "err", err)
+			}
+
+			level.Info(w.logger).Log("msg", "a new segment exists, we should start reading it", "current", fmt.Sprintf("%08d", w.currentSegment), "new", fmt.Sprintf("%08d", last))
+			return nil
+
+		case <-readTicker.C:
+			if err := w.readSegment(reader); err != nil && err != io.EOF {
+				level.Error(w.logger).Log("err", err)
+				return err
+			}
+			if reader.TotalRead() >= size && !tail {
+				level.Info(w.logger).Log("msg", "done replaying segment", "segment", w.currentSegment, "size", size, "read", reader.TotalRead())
+				return nil
+			}
+		}
+	}
+}
+
+func (w *WALWatcher) readSegment(r *wal.LiveReader) error {
+	for r.Next() && !isClosed(w.quit) {
+		err := w.decodeRecord(r.Record())
+
+		// Intentionally skip over record decode errors.
+		if err != nil {
+			level.Error(w.logger).Log("err", err)
+		}
+	}
+	return r.Err()
+}
+
+func (w *WALWatcher) decodeRecord(rec []byte) error {
+	var (
+		dec     tsdb.RecordDecoder
+		series  []tsdb.RefSeries
+		samples []tsdb.RefSample
+	)
+	switch dec.Type(rec) {
+	case tsdb.RecordSeries:
+		series, err := dec.Series(rec, series[:0])
+		if err != nil {
+			w.recordDecodeFailsMetric.Inc()
+			return err
+		}
+		w.seriesReadMetric.Add(float64(len(series)))
+		w.writer.StoreSeries(series, w.currentSegment)
+
+	case tsdb.RecordSamples:
+		samples, err := dec.Samples(rec, samples[:0])
+		if err != nil {
+			w.recordDecodeFailsMetric.Inc()
+			return err
+		}
+		var send []tsdb.RefSample
+		for _, s := range samples {
+			if s.T > w.startTime {
+				send = append(send, s)
+			}
+		}
+		if len(send) > 0 {
+			// We don't want to count samples read prior to the starting timestamp
+			// so that we can compare samples in vs samples read and succeeded samples.
+			w.samplesReadMetric.Add(float64(len(samples)))
+			// Blocks  until the sample is sent to all remote write endpoints or closed (because enqueue blocks).
+			w.writer.Append(send)
+		}
+
+	case tsdb.RecordTombstones:
+		w.tombstonesReadMetric.Add(float64(len(samples)))
+
+	case tsdb.RecordInvalid:
+		w.invalidReadMetric.Add(float64(len(samples)))
+		return errors.New("invalid record")
+
+	default:
+		w.recordDecodeFailsMetric.Inc()
+		return errors.New("unknown TSDB record type")
+	}
+	return nil
+}
+
+// Read all the series records from a Checkpoint directory.
+func (w *WALWatcher) readCheckpoint(checkpointDir string) error {
+	level.Info(w.logger).Log("msg", "reading checkpoint", "dir", checkpointDir)
+	sr, err := wal.NewSegmentsReader(checkpointDir)
+	if err != nil {
+		return errors.Wrap(err, "open checkpoint")
+	}
+	defer sr.Close()
+
+	size, err := getCheckpointSize(checkpointDir)
+	if err != nil {
+		level.Error(w.logger).Log("msg", "error getting checkpoint size", "checkpoint", checkpointDir)
+		return errors.Wrap(err, "get checkpoint size")
+	}
+
+	// w.readSeriesRecords(wal.NewLiveReader(sr), i, size)
+	r := wal.NewLiveReader(sr)
+	w.readSegment(r)
+	if r.TotalRead() != size {
+		level.Warn(w.logger).Log("msg", "may not have read all data from checkpoint")
+	}
+	level.Debug(w.logger).Log("msg", "read series references from checkpoint", "checkpoint", checkpointDir)
+	return nil
+}
+
+func checkpointNum(dir string) (int, error) {
+	// Checkpoint dir names are in the format checkpoint.000001
+	chunks := strings.Split(dir, ".")
+	if len(chunks) != 2 {
+		return 0, errors.Errorf("invalid checkpoint dir string: %s", dir)
+	}
+
+	result, err := strconv.Atoi(chunks[1])
+	if err != nil {
+		return 0, errors.Errorf("invalid checkpoint dir string: %s", dir)
+	}
+
+	return result, nil
+}
+
+func getCheckpointSize(dir string) (int64, error) {
+	i := int64(0)
+	segs, err := fileutil.ReadDir(dir)
+	if err != nil {
+		return 0, err
+	}
+	for _, fn := range segs {
+		num, err := strconv.Atoi(fn)
+		if err != nil {
+			return i, err
+		}
+		sz, err := getSegmentSize(dir, num)
+		if err != nil {
+			return i, err
+		}
+		i += sz
+	}
+	return i, nil
+}
+
+// Get size of segment.
+func getSegmentSize(dir string, index int) (int64, error) {
+	i := int64(-1)
+	fi, err := os.Stat(wal.SegmentName(dir, index))
+	if err == nil {
+		i = fi.Size()
+	}
+	return i, err
+}
+
+func isClosed(c chan struct{}) bool {
+	select {
+	case <-c:
+		return true
+	default:
+		return false
+	}
+}
diff --git a/storage/remote/wal_watcher_test.go b/storage/remote/wal_watcher_test.go
new file mode 100644
index 0000000000..9a68a5e3e3
--- /dev/null
+++ b/storage/remote/wal_watcher_test.go
@@ -0,0 +1,417 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package remote
+
+import (
+	"fmt"
+	"io/ioutil"
+	"math/rand"
+	"os"
+	"path"
+	"sync"
+	"testing"
+	"time"
+
+	"github.com/prometheus/prometheus/pkg/timestamp"
+	"github.com/prometheus/prometheus/util/testutil"
+	"github.com/prometheus/tsdb"
+	"github.com/prometheus/tsdb/labels"
+	"github.com/prometheus/tsdb/wal"
+)
+
+var defaultRetryInterval = 100 * time.Millisecond
+var defaultRetries = 100
+
+// retry executes f() n times at each interval until it returns true.
+func retry(t *testing.T, interval time.Duration, n int, f func() bool) {
+	t.Helper()
+	ticker := time.NewTicker(interval)
+	for i := 0; i <= n; i++ {
+		if f() {
+			return
+		}
+		t.Logf("retry %d/%d", i, n)
+		<-ticker.C
+	}
+	ticker.Stop()
+	t.Logf("function returned false")
+}
+
+type writeToMock struct {
+	samplesAppended      int
+	seriesLock           sync.Mutex
+	seriesSegmentIndexes map[uint64]int
+}
+
+func (wtm *writeToMock) Append(s []tsdb.RefSample) bool {
+	wtm.samplesAppended += len(s)
+	return true
+}
+
+func (wtm *writeToMock) StoreSeries(series []tsdb.RefSeries, index int) {
+	wtm.seriesLock.Lock()
+	defer wtm.seriesLock.Unlock()
+	for _, s := range series {
+		wtm.seriesSegmentIndexes[s.Ref] = index
+	}
+}
+
+func (wtm *writeToMock) SeriesReset(index int) {
+	// Check for series that are in segments older than the checkpoint
+	// that were not also present in the checkpoint.
+	wtm.seriesLock.Lock()
+	defer wtm.seriesLock.Unlock()
+	for k, v := range wtm.seriesSegmentIndexes {
+		if v < index {
+			delete(wtm.seriesSegmentIndexes, k)
+		}
+	}
+}
+
+func (wtm *writeToMock) checkNumLabels() int {
+	wtm.seriesLock.Lock()
+	defer wtm.seriesLock.Unlock()
+	return len(wtm.seriesSegmentIndexes)
+}
+
+func newWriteToMock() *writeToMock {
+	return &writeToMock{
+		seriesSegmentIndexes: make(map[uint64]int),
+	}
+}
+
+func Test_readToEnd_noCheckpoint(t *testing.T) {
+	pageSize := 32 * 1024
+	const seriesCount = 10
+	const samplesCount = 250
+
+	dir, err := ioutil.TempDir("", "readToEnd_noCheckpoint")
+	testutil.Ok(t, err)
+	defer os.RemoveAll(dir)
+	wdir := path.Join(dir, "wal")
+	err = os.Mkdir(wdir, 0777)
+	testutil.Ok(t, err)
+
+	w, err := wal.NewSize(nil, nil, wdir, 128*pageSize)
+	testutil.Ok(t, err)
+
+	var recs [][]byte
+
+	enc := tsdb.RecordEncoder{}
+
+	for i := 0; i < seriesCount; i++ {
+		series := enc.Series([]tsdb.RefSeries{
+			tsdb.RefSeries{
+				Ref:    uint64(i),
+				Labels: labels.Labels{labels.Label{"__name__", fmt.Sprintf("metric_%d", i)}},
+			},
+		}, nil)
+		recs = append(recs, series)
+		for j := 0; j < samplesCount; j++ {
+			sample := enc.Samples([]tsdb.RefSample{
+				tsdb.RefSample{
+					Ref: uint64(j),
+					T:   int64(i),
+					V:   float64(i),
+				},
+			}, nil)
+
+			recs = append(recs, sample)
+
+			// Randomly batch up records.
+			if rand.Intn(4) < 3 {
+				testutil.Ok(t, w.Log(recs...))
+				recs = recs[:0]
+			}
+		}
+	}
+	testutil.Ok(t, w.Log(recs...))
+
+	_, _, err = w.Segments()
+	testutil.Ok(t, err)
+
+	wt := newWriteToMock()
+	st := timestamp.FromTime(time.Now())
+	watcher := NewWALWatcher(nil, "", wt, dir, st)
+	go watcher.Start()
+
+	expected := seriesCount
+	retry(t, defaultRetryInterval, defaultRetries, func() bool {
+		return wt.checkNumLabels() >= expected
+	})
+	watcher.Stop()
+	testutil.Equals(t, expected, wt.checkNumLabels())
+}
+
+func Test_readToEnd_withCheckpoint(t *testing.T) {
+	pageSize := 32 * 1024
+	const seriesCount = 10
+	const samplesCount = 250
+
+	dir, err := ioutil.TempDir("", "readToEnd_withCheckpoint")
+	testutil.Ok(t, err)
+	defer os.RemoveAll(dir)
+
+	wdir := path.Join(dir, "wal")
+	err = os.Mkdir(wdir, 0777)
+	testutil.Ok(t, err)
+
+	os.Create(wal.SegmentName(wdir, 30))
+
+	enc := tsdb.RecordEncoder{}
+	w, err := wal.NewSize(nil, nil, wdir, 128*pageSize)
+	testutil.Ok(t, err)
+
+	// Write to the initial segment then checkpoint.
+	for i := 0; i < seriesCount*10; i++ {
+		ref := i + 100
+		series := enc.Series([]tsdb.RefSeries{
+			tsdb.RefSeries{
+				Ref:    uint64(ref),
+				Labels: labels.Labels{labels.Label{"__name__", fmt.Sprintf("metric_%d", i)}},
+			},
+		}, nil)
+		testutil.Ok(t, w.Log(series))
+
+		for j := 0; j < samplesCount*10; j++ {
+			inner := rand.Intn(ref + 1)
+			sample := enc.Samples([]tsdb.RefSample{
+				tsdb.RefSample{
+					Ref: uint64(inner),
+					T:   int64(i),
+					V:   float64(i),
+				},
+			}, nil)
+			testutil.Ok(t, w.Log(sample))
+		}
+	}
+	tsdb.Checkpoint(w, 30, 31, func(x uint64) bool { return true }, 0)
+	w.Truncate(32)
+
+	// Write more records after checkpointing.
+	for i := 0; i < seriesCount*10; i++ {
+		series := enc.Series([]tsdb.RefSeries{
+			tsdb.RefSeries{
+				Ref:    uint64(i),
+				Labels: labels.Labels{labels.Label{"__name__", fmt.Sprintf("metric_%d", i)}},
+			},
+		}, nil)
+		testutil.Ok(t, w.Log(series))
+
+		for j := 0; j < samplesCount*10; j++ {
+			sample := enc.Samples([]tsdb.RefSample{
+				tsdb.RefSample{
+					Ref: uint64(j),
+					T:   int64(i),
+					V:   float64(i),
+				},
+			}, nil)
+			testutil.Ok(t, w.Log(sample))
+		}
+	}
+
+	_, _, err = w.Segments()
+	testutil.Ok(t, err)
+	wt := newWriteToMock()
+	st := timestamp.FromTime(time.Now())
+	watcher := NewWALWatcher(nil, "", wt, dir, st)
+	go watcher.Start()
+
+	expected := seriesCount * 10 * 2
+	retry(t, defaultRetryInterval, defaultRetries, func() bool {
+		return wt.checkNumLabels() >= expected
+	})
+	watcher.Stop()
+	testutil.Equals(t, expected, wt.checkNumLabels())
+}
+
+func Test_readCheckpoint(t *testing.T) {
+	pageSize := 32 * 1024
+	const seriesCount = 10
+	const samplesCount = 250
+
+	dir, err := ioutil.TempDir("", "readCheckpoint")
+	testutil.Ok(t, err)
+	defer os.RemoveAll(dir)
+
+	wdir := path.Join(dir, "wal")
+	err = os.Mkdir(wdir, 0777)
+	testutil.Ok(t, err)
+
+	os.Create(wal.SegmentName(wdir, 30))
+
+	enc := tsdb.RecordEncoder{}
+	w, err := wal.NewSize(nil, nil, wdir, 128*pageSize)
+	testutil.Ok(t, err)
+
+	// Write to the initial segment then checkpoint.
+	for i := 0; i < seriesCount*10; i++ {
+		ref := i + 100
+		series := enc.Series([]tsdb.RefSeries{
+			tsdb.RefSeries{
+				Ref:    uint64(ref),
+				Labels: labels.Labels{labels.Label{"__name__", fmt.Sprintf("metric_%d", i)}},
+			},
+		}, nil)
+		testutil.Ok(t, w.Log(series))
+
+		for j := 0; j < samplesCount*10; j++ {
+			inner := rand.Intn(ref + 1)
+			sample := enc.Samples([]tsdb.RefSample{
+				tsdb.RefSample{
+					Ref: uint64(inner),
+					T:   int64(i),
+					V:   float64(i),
+				},
+			}, nil)
+			testutil.Ok(t, w.Log(sample))
+		}
+	}
+	tsdb.Checkpoint(w, 30, 31, func(x uint64) bool { return true }, 0)
+	w.Truncate(32)
+
+	// Start read after checkpoint, no more data written.
+	_, _, err = w.Segments()
+	testutil.Ok(t, err)
+
+	wt := newWriteToMock()
+	st := timestamp.FromTime(time.Now())
+	watcher := NewWALWatcher(nil, "", wt, dir, st)
+	go watcher.Start()
+
+	expected := seriesCount * 10
+	retry(t, defaultRetryInterval, defaultRetries, func() bool {
+		return wt.checkNumLabels() >= expected
+	})
+	watcher.Stop()
+	testutil.Equals(t, expected, wt.checkNumLabels())
+}
+
+func Test_checkpoint_seriesReset(t *testing.T) {
+	pageSize := 32 * 1024
+	const seriesCount = 10
+	const samplesCount = 250
+
+	dir, err := ioutil.TempDir("", "seriesReset")
+	testutil.Ok(t, err)
+	defer os.RemoveAll(dir)
+
+	wdir := path.Join(dir, "wal")
+	err = os.Mkdir(wdir, 0777)
+	testutil.Ok(t, err)
+
+	enc := tsdb.RecordEncoder{}
+	w, err := wal.NewSize(nil, nil, wdir, pageSize)
+	testutil.Ok(t, err)
+
+	// Write to the initial segment, then checkpoint later.
+	for i := 0; i < seriesCount*10; i++ {
+		ref := i + 100
+		series := enc.Series([]tsdb.RefSeries{
+			tsdb.RefSeries{
+				Ref:    uint64(ref),
+				Labels: labels.Labels{labels.Label{"__name__", fmt.Sprintf("metric_%d", i)}},
+			},
+		}, nil)
+		testutil.Ok(t, w.Log(series))
+
+		for j := 0; j < samplesCount*10; j++ {
+			inner := rand.Intn(ref + 1)
+			sample := enc.Samples([]tsdb.RefSample{
+				tsdb.RefSample{
+					Ref: uint64(inner),
+					T:   int64(i),
+					V:   float64(i),
+				},
+			}, nil)
+			testutil.Ok(t, w.Log(sample))
+		}
+	}
+
+	_, _, err = w.Segments()
+	testutil.Ok(t, err)
+
+	wt := newWriteToMock()
+	st := timestamp.FromTime(time.Now())
+	watcher := NewWALWatcher(nil, "", wt, dir, st)
+	go watcher.Start()
+
+	expected := seriesCount * 10
+	retry(t, defaultRetryInterval, defaultRetries, func() bool {
+		return wt.checkNumLabels() >= expected
+	})
+	watcher.Stop()
+	testutil.Equals(t, seriesCount*10, wt.checkNumLabels())
+
+	// If you modify the checkpoint and truncate segment #'s run the test to see how
+	// many series records you end up with and change the last Equals check accordingly
+	// or modify the Equals to Assert(len(wt.seriesLabels) < seriesCount*10)
+	_, err = tsdb.Checkpoint(w, 50, 200, func(x uint64) bool { return true }, 0)
+	testutil.Ok(t, err)
+	w.Truncate(200)
+
+	cp, _, err := tsdb.LastCheckpoint(path.Join(dir, "wal"))
+	testutil.Ok(t, err)
+	err = watcher.readCheckpoint(cp)
+	testutil.Ok(t, err)
+}
+
+func Test_decodeRecord(t *testing.T) {
+	dir, err := ioutil.TempDir("", "decodeRecord")
+	testutil.Ok(t, err)
+	defer os.RemoveAll(dir)
+
+	wt := newWriteToMock()
+	// st := timestamp.FromTime(time.Now().Add(-10 * time.Second))
+	watcher := NewWALWatcher(nil, "", wt, dir, 0)
+
+	// decode a series record
+	enc := tsdb.RecordEncoder{}
+	buf := enc.Series([]tsdb.RefSeries{tsdb.RefSeries{Ref: 1234, Labels: labels.Labels{}}}, nil)
+	watcher.decodeRecord(buf)
+	testutil.Ok(t, err)
+
+	testutil.Equals(t, 1, wt.checkNumLabels())
+
+	// decode a samples record
+	buf = enc.Samples([]tsdb.RefSample{tsdb.RefSample{Ref: 100, T: 1, V: 1.0}, tsdb.RefSample{Ref: 100, T: 2, V: 2.0}}, nil)
+	watcher.decodeRecord(buf)
+	testutil.Ok(t, err)
+
+	testutil.Equals(t, 2, wt.samplesAppended)
+}
+
+func Test_decodeRecord_afterStart(t *testing.T) {
+	dir, err := ioutil.TempDir("", "decodeRecord")
+	testutil.Ok(t, err)
+	defer os.RemoveAll(dir)
+
+	wt := newWriteToMock()
+	// st := timestamp.FromTime(time.Now().Add(-10 * time.Second))
+	watcher := NewWALWatcher(nil, "", wt, dir, 1)
+
+	// decode a series record
+	enc := tsdb.RecordEncoder{}
+	buf := enc.Series([]tsdb.RefSeries{tsdb.RefSeries{Ref: 1234, Labels: labels.Labels{}}}, nil)
+	watcher.decodeRecord(buf)
+	testutil.Ok(t, err)
+
+	testutil.Equals(t, 1, wt.checkNumLabels())
+
+	// decode a samples record
+	buf = enc.Samples([]tsdb.RefSample{tsdb.RefSample{Ref: 100, T: 1, V: 1.0}, tsdb.RefSample{Ref: 100, T: 2, V: 2.0}}, nil)
+	watcher.decodeRecord(buf)
+	testutil.Ok(t, err)
+
+	testutil.Equals(t, 1, wt.samplesAppended)
+}
diff --git a/storage/remote/write.go b/storage/remote/write.go
index 93beca2f5b..f44662af1f 100644
--- a/storage/remote/write.go
+++ b/storage/remote/write.go
@@ -14,44 +14,53 @@
 package remote
 
 import (
-	"github.com/prometheus/common/model"
 	"github.com/prometheus/prometheus/pkg/labels"
 	"github.com/prometheus/prometheus/storage"
 )
 
 // Appender implements scrape.Appendable.
 func (s *Storage) Appender() (storage.Appender, error) {
-	return s, nil
+	return &timestampTracker{
+		storage: s,
+	}, nil
+}
+
+type timestampTracker struct {
+	storage          *Storage
+	samples          int64
+	highestTimestamp int64
 }
 
 // Add implements storage.Appender.
-func (s *Storage) Add(l labels.Labels, t int64, v float64) (uint64, error) {
-	s.mtx.RLock()
-	defer s.mtx.RUnlock()
-	for _, q := range s.queues {
-		if err := q.Append(&model.Sample{
-			Metric:    labelsToMetric(l),
-			Timestamp: model.Time(t),
-			Value:     model.SampleValue(v),
-		}); err != nil {
-			panic(err) // QueueManager.Append() should always return nil as per doc string.
-		}
+func (t *timestampTracker) Add(_ labels.Labels, ts int64, v float64) (uint64, error) {
+	t.samples++
+	if ts > t.highestTimestamp {
+		t.highestTimestamp = ts
 	}
 	return 0, nil
 }
 
 // AddFast implements storage.Appender.
-func (s *Storage) AddFast(l labels.Labels, _ uint64, t int64, v float64) error {
-	_, err := s.Add(l, t, v)
+func (t *timestampTracker) AddFast(l labels.Labels, _ uint64, ts int64, v float64) error {
+	_, err := t.Add(l, ts, v)
 	return err
 }
 
 // Commit implements storage.Appender.
-func (*Storage) Commit() error {
+func (t *timestampTracker) Commit() error {
+	t.storage.samplesIn.incr(t.samples)
+	t.storage.samplesInMetric.Add(float64(t.samples))
+
+	t.storage.highestTimestampMtx.Lock()
+	defer t.storage.highestTimestampMtx.Unlock()
+	if t.highestTimestamp > t.storage.highestTimestamp {
+		t.storage.highestTimestamp = t.highestTimestamp
+		t.storage.highestTimestampMetric.Set(float64(t.highestTimestamp))
+	}
 	return nil
 }
 
 // Rollback implements storage.Appender.
-func (*Storage) Rollback() error {
+func (*timestampTracker) Rollback() error {
 	return nil
 }
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 625f27137b..d9b7f92626 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -243,12 +243,12 @@ github.com/prometheus/procfs/xfs
 github.com/prometheus/procfs/internal/util
 # github.com/prometheus/tsdb v0.4.0
 github.com/prometheus/tsdb
+github.com/prometheus/tsdb/fileutil
+github.com/prometheus/tsdb/wal
 github.com/prometheus/tsdb/labels
 github.com/prometheus/tsdb/chunkenc
 github.com/prometheus/tsdb/chunks
-github.com/prometheus/tsdb/fileutil
 github.com/prometheus/tsdb/index
-github.com/prometheus/tsdb/wal
 # github.com/samuel/go-zookeeper v0.0.0-20161028232340-1d7be4effb13
 github.com/samuel/go-zookeeper/zk
 # github.com/sasha-s/go-deadlock v0.0.0-20161201235124-341000892f3d
diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go
index 0efdd32d34..05043138e6 100644
--- a/web/api/v1/api_test.go
+++ b/web/api/v1/api_test.go
@@ -33,6 +33,7 @@ import (
 	"github.com/go-kit/kit/log"
 	"github.com/gogo/protobuf/proto"
 	"github.com/golang/snappy"
+	"github.com/prometheus/client_golang/prometheus"
 	config_util "github.com/prometheus/common/config"
 	"github.com/prometheus/common/model"
 	"github.com/prometheus/common/promlog"
@@ -279,9 +280,14 @@ func TestEndpoints(t *testing.T) {
 			Format: &af,
 		}
 
-		remote := remote.NewStorage(promlog.New(&promlogConfig), func() (int64, error) {
+		dbDir, err := ioutil.TempDir("", "tsdb-api-ready")
+		testutil.Ok(t, err)
+		defer os.RemoveAll(dbDir)
+
+		testutil.Ok(t, err)
+		remote := remote.NewStorage(promlog.New(&promlogConfig), prometheus.DefaultRegisterer, func() (int64, error) {
 			return 0, nil
-		}, 1*time.Second)
+		}, dbDir, 1*time.Second)
 
 		err = remote.ApplyConfig(&config.Config{
 			RemoteReadConfigs: []*config.RemoteReadConfig{
diff --git a/web/ui/assets_vfsdata.go b/web/ui/assets_vfsdata.go
index fd242177be..7e51f008fd 100644
--- a/web/ui/assets_vfsdata.go
+++ b/web/ui/assets_vfsdata.go
@@ -128,9 +128,9 @@ var Assets = func() http.FileSystem {
 		"/static/js/graph/index.js": &vfsgen۰CompressedFileInfo{
 			name:             "index.js",
 			modTime:          time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC),
-			uncompressedSize: 35311,
+			uncompressedSize: 35376,
 
-			compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xe4\x7d\xeb\x7a\xdb\x38\xb2\xe0\x7f\x3f\x05\xc2\xc9\x17\x51\xb1\x44\xd9\xe9\xe9\xde\x19\xc9\x72\x6f\x3a\x71\x26\x39\x27\xb7\xb1\x9d\xbe\x1c\xb7\x8f\x3f\x88\x84\x24\xc4\x14\xc9\x01\x20\xdb\xea\xc4\x8f\xb5\x2f\xb0\x4f\xb6\x1f\x0a\x77\x92\xba\xb8\x7b\xe6\x7c\xbb\x7b\xf2\x43\x0e\x41\xa0\x50\x28\x54\x15\x0a\x85\x42\xf1\x06\x33\xf4\x91\x95\x0b\x22\xe6\x64\xc9\xd1\xd8\x7f\xf8\xfa\x15\x7d\xb9\x1f\xed\xc9\x2a\x33\x86\xab\xf9\x39\x59\x54\x39\x16\x64\xb4\x07\x65\x67\x27\x2f\x3e\xbc\x7f\x89\xc6\xe8\xf0\xe0\xe0\x60\xb4\xb7\x37\x78\xfa\x74\x0f\x3d\x45\x7f\x93\x55\xf7\x9e\x0e\xf6\x1c\xa4\x04\xca\xd0\x18\x4d\x97\x45\x2a\x68\x59\xc4\x24\x27\x0b\x52\x88\x1e\x2a\x2b\xf9\xcc\x7b\x68\x8e\x8b\x2c\x27\x2f\xe6\xb8\x98\x11\xf3\x74\x4a\x16\xe5\x0d\xe9\xa2\x2f\x7b\x08\x89\x39\xe5\x09\xc9\xd1\x18\xe9\xb6\x23\x53\x08\xb8\xbd\x3e\x7f\xf7\x16\x8d\x51\xb1\xcc\x73\xfb\x42\xc3\x46\x63\xd3\x8b\x7d\xe3\x77\x86\xc6\x41\xdf\xb5\x3a\x0a\x05\x1f\x75\x85\x0e\x0a\x50\x8c\x65\x8b\xae\x6c\x7a\x6f\xdb\x33\x9a\x5e\xf3\x39\xbe\x35\x63\x0f\x50\xcb\xb0\xc0\x68\x8c\x2e\x2e\x47\x7b\xa6\x88\x16\x54\x50\x9c\xd3\xdf\x48\xdc\x1d\xed\xdd\x8f\xf6\x1a\x04\x4c\x04\x5d\x90\x57\x38\x15\x25\x93\x83\x92\x68\x44\xab\x68\x88\xbe\x3b\x40\x4f\xd5\xcf\xb3\x3f\xa3\xa7\xe8\x9b\xef\xbe\xed\xc9\x57\xb7\xcd\x57\xff\x03\x5e\x64\xb5\x17\x50\x38\x77\x85\xf0\xbc\x80\x67\xf8\x2f\x8f\x86\xe8\xb0\x1d\x23\x2e\x48\xf5\x23\xce\x97\x44\x22\x74\x21\x2b\x1f\xf2\xa8\x87\xa2\xc3\x03\xf5\x67\x21\x7f\xbf\x85\xdf\x43\xf5\xe7\x9b\x03\xf5\x34\x97\xbf\xcf\xe0\xf7\x3b\xf8\x3d\x54\x0f\x87\x19\xbc\xc8\x22\xe8\xfa\xf0\x16\x9e\xe0\xf7\xcf\xf0\xfb\x17\xf8\x3d\x5c\x41\xf9\x2a\xda\xbb\x6c\x43\xab\x58\x2e\xe0\x3f\x12\xab\x83\xb6\x0a\x15\x2b\x45\x29\x56\x15\xf1\xc8\xde\x9c\x64\xc9\xe5\x9c\xe4\x53\x34\x86\x29\x92\xb3\x27\x1f\x13\x9a\x05\x82\x52\xef\x74\x7f\x1f\x66\x75\x30\x40\x67\x44\xa0\x8c\x4c\xf1\x32\x17\x86\x07\x13\x03\xc4\x3c\x03\x30\x0d\x76\x54\x7f\xc9\x24\x4b\x5e\xd1\xa2\x5a\x0a\x53\xab\xed\xd5\xd7\xaf\x40\x51\xd9\x9c\x4e\x51\x1c\xd4\x13\x78\x82\xc6\xe3\x31\x5a\x16\x19\x99\xd2\x82\x64\x86\x81\x9b\xb5\xd0\x21\xb0\xb0\x46\xfe\x25\xc3\xb7\x4a\xf0\x51\x5a\x16\x82\x95\x39\x47\xb8\xc8\xe0\x01\xd3\x82\x30\x34\x65\xe5\x02\xbd\x06\x39\x98\x60\xc6\x91\xd0\x0a\x22\xd9\xd3\xc4\x73\x12\xa8\xba\xec\x54\x58\xcc\x3f\x32\x32\xa5\x77\x9d\x21\xfa\xf8\xfc\xfc\xf5\xd5\xc7\xd3\x93\x57\x6f\x7e\xee\xa9\xd7\x93\x25\xcd\xb3\x1f\x09\xe3\xb4\x2c\x3a\x43\xf4\xc3\xa7\x37\x6f\x5f\x5e\xfd\x78\x72\x7a\xf6\xe6\xc3\x7b\x23\x5c\x9f\xff\xbe\x24\x6c\x95\x90\x3b\x41\x8a\x2c\xb6\xfa\xc3\x1f\x4d\xd7\xd2\xd1\xd7\x0d\x8f\xe3\x77\x4b\x2e\x70\x3a\x27\x09\x23\x45\x46\x58\x1c\x68\x35\xab\x8b\xba\xae\x39\xc9\x13\x5c\x55\xb2\x9f\x10\x5a\xd7\x4c\xf0\xdf\x88\x40\x8c\x4c\x09\x23\x45\x4a\x38\x12\x25\xc2\x79\x8e\xc4\x9c\x20\x5a\x08\xc2\x08\x17\xb4\x98\x19\x8d\xc5\x11\x2d\xe0\x9d\x23\xaa\xa2\x23\x2e\x32\x05\x6e\x42\x8b\x0c\x91\x1b\x52\x08\xad\x5e\x18\xf0\x8b\xd5\xc0\x3f\x31\x89\x0e\x33\xac\x40\xf2\x64\x4a\x8b\x2c\x8e\xfe\x04\x6f\xaf\x6e\xd5\xeb\x08\xed\x1b\x86\x72\x43\xf9\x87\xa4\xda\xab\x92\x2d\xd0\x38\x80\xa5\x21\xa8\xf7\x57\xd3\x92\x2d\x22\x35\x3a\xd5\xc3\x5d\xc5\xda\x1b\x08\x72\x27\x30\x23\xf8\xa2\xc0\x0b\x32\x96\xf5\x2e\x23\x8f\x70\x77\x15\x4b\xae\xc9\xaa\x62\x84\xf3\xd8\xa9\x7d\xc3\x7b\x69\x59\x70\x81\x88\xa4\x91\x64\xbb\x6f\x46\x50\x2a\x79\x97\x24\xb7\x73\x9a\xce\xd1\x78\xac\x5f\x3f\x79\x82\x1e\x91\x84\xcf\xe9\x54\xfc\x3b\x59\x19\x00\xf5\x51\x25\x7c\x39\x59\x50\x11\x77\x47\xfa\x35\x49\x2a\x06\x94\x7c\xa9\xe4\xcf\xbc\x01\xee\x06\x62\x3f\x5f\x8a\xb2\xcf\x08\x97\xa2\x2f\x67\x45\x8e\x08\xc9\x21\xa1\xb2\x40\x20\x57\x09\x54\x05\x46\x9e\x4e\x39\x11\x5a\x0f\x24\xea\xe9\x35\xa1\xb3\xb9\x40\x7d\x55\x96\xe6\x94\x14\xba\x6c\x64\xdb\x29\xf0\xe7\x9a\x56\xe1\x0a\xe8\x86\x82\xd0\x63\xf9\x9c\xa4\x9c\xc7\x9d\x39\x80\xe8\xf4\x50\x07\x2f\x45\xd9\xa9\x97\x92\x3c\xe1\x29\x2b\xf3\x5c\x77\xbf\xaf\x71\x33\xc3\x53\x7f\x1e\xab\x15\x29\x29\x8b\xb8\x73\x4d\x56\xcb\x4a\x0d\xa8\xd3\x0b\x54\x5c\x0d\x3d\xbd\x8a\xa1\x7b\xb5\x92\xd5\x66\x33\x85\xe5\x51\x09\x82\xbf\x60\x7a\xdc\x02\x2a\xe9\x8d\xaf\xac\xdc\xfc\x28\xae\x01\x2c\x14\xcb\x78\xfa\xcb\xe7\x1c\x29\xa1\xd7\x24\xfb\x41\x14\xeb\x60\x98\x2a\x57\x13\x51\x34\x1b\xee\xd0\xb3\xae\xe9\xf7\x4a\x0b\x4e\x98\x78\x47\x04\xa3\xe9\x3a\x08\x9c\xe4\x24\xd5\x20\x54\xfd\xab\x05\x34\xf0\x01\x31\x32\x65\x84\xcf\xdf\x48\xd6\xbd\xc1\xf9\x2e\xb0\x74\x93\x4b\x5f\xee\xa4\x7c\x94\x39\x39\x07\xad\xdc\x26\xae\xba\x42\x54\x53\x75\xb2\x01\x5a\xd3\x44\xe9\x08\xab\x75\xfc\xee\x04\x9e\xf0\xf6\x56\xf8\x42\x9a\x2a\x7d\x51\xce\x66\x39\x19\x77\x04\x9e\x74\xfc\xe1\xca\x86\x09\xf9\x47\x63\xc5\xe9\xca\x9f\x38\xe2\xf3\xf2\xb6\x5e\xbb\x2c\x54\x79\x91\x4c\xa0\x6a\xe4\xf1\xa4\xd5\x0f\x52\x76\x04\x66\x33\x90\xb9\xc7\x31\x49\xd4\x83\x66\xf2\x96\x95\x4b\xbd\x4f\x2a\xcc\x48\x21\xe2\x6e\x42\x8b\x8c\xdc\xc5\x7e\x7d\x9f\x67\xcd\x0b\xa9\x70\x1e\xc7\xd1\x9f\xa4\xc6\xd4\x10\xb0\x10\x2c\x8e\x30\xa3\xb8\x6f\x56\xbd\xa8\xdb\x4d\xe6\x98\xbf\xc8\x31\xe7\x71\xc4\x48\x5e\xe2\x2c\xea\xd6\x34\x91\xd2\x3f\xb0\x36\x39\x55\xa3\xa4\x48\xe9\xf6\x53\x22\x96\xac\x40\xd2\x5c\xe4\x68\x5a\xa6\x4b\x8e\x26\x38\xbd\x96\x6b\x06\x68\x59\x5a\x70\x41\x70\x86\xca\x29\x52\xb0\xe4\xd2\x91\xb4\x31\x68\x32\x81\xa9\xb9\x26\xab\xac\xbc\x2d\xa4\x21\xc4\x00\x76\x2b\x25\x9d\x00\x43\x9f\x01\x49\xa0\xf8\x06\xe7\x71\xf8\xd4\xd5\x75\x14\xd4\x35\x9a\xf4\xbe\xeb\x16\x09\xc6\xca\x35\xab\x84\x7a\x17\x75\x93\x39\xcd\x34\xd5\xa1\xc9\x2d\x66\x85\x5c\x19\xdb\x1b\xe9\xb7\xcd\x66\x50\xf9\xb9\xd2\xa4\xeb\x59\x5c\xea\xb2\xba\x60\x18\x41\xb4\x10\x82\x26\x5e\xed\xd5\xf3\x3b\xca\xd7\xd6\x5e\x5d\xe1\x3b\xca\xbd\xea\x39\x99\x91\x22\x5b\x83\x8e\x7a\xe9\xeb\xa8\x8a\x16\x05\x59\x47\x2b\xfd\xd6\x5f\x46\x6f\x70\x7e\x26\xb0\x58\x23\x9c\xf0\xfe\x8a\xcb\x0a\xc1\xa2\x5d\x64\x2f\xb1\x20\xed\x6d\x3c\x3d\x48\x8a\xac\xa9\x7f\x75\x63\xb9\x43\x21\x72\xbf\x51\xd1\xf4\x9a\xb0\x58\x31\x53\x5e\xa6\x38\x27\x43\xd4\x21\x45\x47\x99\x6c\xd2\x60\xc0\x62\x88\x3a\xbf\xfc\xf2\xcb\x2f\xfd\x77\xef\xfa\x2f\x5f\xa2\xd7\xaf\x87\x8b\x85\x7e\x2f\xca\x32\x9f\x60\xf6\x31\xc7\x29\xd8\x40\x43\xd4\x99\x94\x42\x94\xe6\x3d\xa7\x19\xf9\x61\x75\x46\x33\x32\x44\x82\x2d\x89\x2e\x9d\x97\xb7\xe7\x65\x86\x57\x3f\x2c\x85\x28\x8b\xfa\xab\x17\x39\xc1\xac\x59\x58\xf2\x00\x88\xc4\xfe\x3f\xca\x42\xa2\xfb\xe9\xfc\x05\xf4\xa7\xd6\xb4\x86\x89\x6c\x09\x11\x0a\x8d\xa3\x04\x8e\x3b\xf2\xbf\xe7\x74\x41\x3e\x02\x3d\x3a\x5d\x20\xd0\x3a\x30\xca\x8c\xae\xc1\x91\x8a\x2f\xab\xf4\x3a\x1a\xd5\x56\xe2\x16\x1d\xe2\xaf\xc0\xb5\x65\xc5\x2c\xc6\x4d\x10\xcb\x4a\xe2\x75\xaa\xaa\x1b\x20\x56\x89\xf0\x33\xbb\x48\x36\xf6\xb3\x5a\xda\xfd\xb5\x54\x69\x03\xd8\x3d\x74\x0e\x3b\x7a\x7b\x6b\x80\x2d\x4a\x39\x9f\x5b\x99\x4c\x55\x6b\xf2\x99\x2a\xff\xc3\x6c\x36\xe4\xfc\xff\x25\x4e\x93\x35\xb9\xc0\x8b\xca\x5f\xe8\x32\x25\xac\x05\xb9\x45\x2f\x1b\x4c\x65\x5b\x3c\x3d\x3c\x38\x38\xe8\x3a\xf6\x74\x04\x5c\xcb\x9d\xf2\x47\xf1\x22\x22\x39\x27\x4d\x74\xfc\xc9\x09\x78\x7f\x07\xe0\xeb\x01\x05\xdc\xaf\x21\xfd\x2e\xe6\x37\xdb\x48\x2e\x56\x39\x01\xd6\x55\x66\x61\x83\x77\x65\x25\x9a\x96\xd6\x64\x74\x46\xa4\x62\xc8\x4e\x32\xcb\x57\xd5\x5c\x56\xe9\x78\x4b\x7f\x28\x14\x71\x63\x49\x77\x50\x70\x96\xe9\xe5\x7f\x22\x8a\x7e\xc5\xe8\x02\xb3\x55\x64\x37\x1b\x12\xb0\x57\xc7\x76\xd6\x4f\xe7\x24\xbd\xae\xd5\x63\xe0\x32\x6a\x54\x5d\x16\x50\x99\x64\xa6\xba\x9e\xb4\x75\x28\x05\x60\x1e\x86\x55\xa3\xab\xcd\x98\x05\x83\xb8\x37\xfb\xf0\x60\x52\x62\x4f\xcb\x78\x38\xa6\x39\x4d\xaf\xe3\xc6\x74\xb5\xd1\x5e\xee\xf3\xdc\x9a\xfb\x6f\x67\x1f\xde\xbb\xd9\x18\x0c\xd0\x9b\xa9\xb7\x73\xbe\xc5\x1c\xe9\x5e\x7a\x50\x5c\x32\x3a\xa3\x05\xce\x11\x27\x8c\x12\x8e\xc0\xbd\x36\x2b\x05\x5a\x2c\x05\x16\x24\x73\x70\x62\x2e\x55\x4b\xd6\x05\x4f\xc6\x2d\x41\x05\x21\x99\x34\xc1\x18\x81\x2d\x29\x5b\xa6\x02\x51\xa1\x3c\x1b\x01\x64\x89\x11\xc0\x4d\xfc\xf9\xd0\x7e\x3c\x65\xdd\x32\x5c\x70\xa9\xa8\x5e\x4a\xa9\xa9\x8d\xc5\x11\x0f\x35\x55\x6c\x83\x16\xdf\xa3\xce\x41\x07\x0d\xa5\xd6\x35\xf6\x5a\x9d\xda\x16\x90\xd2\xf8\xe0\x79\x8a\xed\xc6\xad\xb1\xeb\x37\xfb\xe3\xc6\x5c\xd4\xb6\x1b\x1e\xbf\x18\x43\xd7\xeb\xcb\xec\x31\x36\xd7\x6a\xb3\xb4\xd7\xd9\xc8\x7a\xd5\x99\xe2\x9c\x93\xda\xae\x53\x9b\x43\xd6\x06\x6c\x8e\x49\x2d\x36\x13\xd0\xdc\x66\x5f\x96\x5e\xc1\xc6\xf2\x32\xea\xb6\x70\x9f\xb1\xa5\x53\x46\x30\x27\xa7\x1a\x41\xbf\xd3\x4d\xc0\x33\xb2\x03\xf0\x8c\xb4\x00\xdf\x15\x75\x52\x64\xbb\x20\x7e\x52\x64\x0f\x44\x7b\x0b\x60\x83\xb4\x07\x78\x57\x94\x95\x7e\xdf\x05\xeb\x77\x50\xf3\x81\x88\x6f\x07\x6f\x70\x0f\xc1\xb7\xee\x9b\x5a\xac\xa6\xda\x66\x48\xed\xcb\xe5\xbb\x88\x91\x4a\x9a\x12\x51\x0f\x7d\x11\xe4\x4e\x0c\x5b\xe0\x81\x79\xd4\x43\x8b\x52\xda\x14\xd1\x84\x4c\x4b\x46\xa2\xfb\xc6\x0e\xcb\x6c\xbc\xa4\xfe\x61\x04\x9e\x68\x31\x73\x92\xaa\x1c\x45\x52\xf5\xaa\xe5\xad\xc5\xa0\x32\x9e\x02\x59\x49\x5b\x51\xb6\xc5\x3a\x2d\xab\x0d\x47\x38\x9f\xd8\x20\x6d\xd6\xe5\x20\xb5\xbc\x5c\xe6\x5f\x32\x3a\xd5\x9b\xbc\xc1\x00\x79\xfe\x71\x98\x2b\x34\xa7\x5c\x94\x6c\xa5\xad\x9b\x47\x60\xab\x9d\x89\x92\xe1\x19\x49\x66\x44\xbc\x11\x64\x11\x47\xba\x92\xdb\x25\x07\xd5\x78\xbd\x5a\x0f\xd4\x6b\xc2\x05\xa3\xc5\x8c\x4e\x57\xf1\xc5\x65\x37\x34\x23\xaa\xb2\x5a\xe6\x58\x90\x37\x40\x7f\x3c\xc9\x89\x9a\x03\xae\x35\x83\x5d\x52\xbc\x5d\xac\x4f\x87\x86\xea\xb9\x6f\x3f\xd0\x70\x07\x03\x21\x3d\xd6\x59\x1d\xb5\xe3\x01\x55\x38\x61\xe5\x2d\x27\x4c\x36\xf6\xed\xba\xae\xa4\x8f\x2c\x8c\xbb\x68\xa0\x4f\xcd\xc0\x59\x97\xe0\xcf\xf8\x2e\x76\xde\x40\x89\x52\x99\x0d\x51\xf4\xb7\x93\xf3\xa8\x67\x8b\x97\x2c\x0f\x9c\xe6\x68\x1f\x45\x03\x5c\xd1\xc1\xcd\xe1\x00\xe6\xe6\x7b\xf8\x1d\x0b\xe8\xc2\x6b\x28\x57\xa9\xf3\x55\x25\x99\xf4\x33\x2f\x0b\xef\x0d\xd0\x67\x99\xa6\x84\xf3\xa1\x1b\xa0\xac\xd4\x03\x7f\xa8\xdc\x7c\x2e\xb9\xef\xa9\x34\xeb\xb7\xac\x23\x17\x31\xb1\xe4\xe8\xd1\x78\x8c\x22\x0d\x26\xaa\x57\x76\x53\x30\x2f\x6f\x4f\x18\x2b\x59\x1c\xc1\x1f\xc5\x4f\xb4\x98\x81\xfd\x9c\x38\x5b\xc4\xfd\x53\xfc\x1a\x96\xdf\x07\x4f\x6a\x0e\xd8\x8d\xa5\x36\xe0\x05\xeb\x34\x23\x7c\x99\x8b\x8b\x83\xcb\x51\xa3\x45\x46\xa7\x72\xd6\xde\x61\x31\x4f\xf0\x84\xc7\xfe\x84\xf5\x3d\x78\x8a\xb7\xc2\x81\x43\xdb\xe3\x31\xfa\xe6\xa0\x39\x52\xf0\x04\xcb\x71\xfe\xa4\x3c\x18\x71\x63\x44\x08\x45\x47\x19\xbd\x41\xa9\x5c\x3d\xc7\xbf\x46\x38\x27\x4c\x20\xf8\xed\x6b\xb7\xc7\xaf\xd1\xf1\x11\x17\xac\x2c\x66\xc7\x1a\xcc\xa3\xa3\x81\x2e\x40\x2f\x89\x20\xa9\x20\x19\x8a\xd0\x7e\x0b\x70\x89\x5c\x22\xca\x57\xf4\x8e\x64\xf1\xb3\x6e\x6b\x9d\x08\x71\x69\xec\x64\x1c\xe8\x0e\x4d\xd4\xf9\x05\x9a\x10\x71\x4b\x48\x81\x56\xe5\xd2\x32\x31\x18\x4a\xd2\x12\x52\x54\x49\xfc\x03\x62\x46\x72\x69\x6d\x95\x05\xc2\x69\xba\x64\x72\x1b\x03\x20\xa1\x09\xc0\x06\xd1\x59\x80\xa3\x3a\xc5\x4b\x4e\xd0\xb2\x20\x77\x95\x1a\x81\x52\x27\x6a\x96\x78\x72\x34\xc8\xe8\xcd\x71\x54\xc3\xb7\xbb\x6e\xee\xef\x1d\x0f\x83\x8b\x69\xd8\x14\x4f\xf3\xaf\x9d\xf9\xa4\xd5\xd2\xca\x7b\xaa\x8f\xfb\x75\x67\xb0\x4e\x41\xac\x55\x49\x3b\x1d\x24\xd6\x84\xbe\x55\xe4\x37\x09\x7c\x8e\x27\x24\x1f\x5c\x5d\xc9\x85\xe1\xea\x6a\x70\x03\x87\xb0\xb6\xe5\x3a\x89\x7f\x98\xac\x3f\x40\xce\x37\x13\x19\xdf\x60\x9a\x4b\x0a\x21\xe5\x31\xe7\x8f\x42\x69\xaf\xcb\xf9\xbd\x13\xbb\x0a\xcf\xc8\x8b\xb2\x98\xd2\x59\x82\xf3\xdc\x51\xd8\xca\x39\x2c\xab\xa2\xcc\xca\x21\xca\x4a\x6b\xd2\x03\x3e\xae\xc1\xf7\xe8\x03\x43\x29\x2e\xa4\x6d\xff\x79\xc9\x05\xca\xe9\x0d\x91\x8c\x2b\x39\x5b\x76\x61\xfb\x9b\x96\x0c\xc5\xb0\x9f\x84\xb3\x63\x44\xd1\x51\x3b\x0e\x49\x4e\x8a\x99\x98\x8f\x10\xdd\xdf\x6f\xa1\x85\x6f\x28\x5c\x1c\x5c\xda\x9d\x32\xce\xb2\x58\xae\x08\x1f\xe0\x39\x6e\x05\x7d\x41\x2f\x7b\xed\x9d\x5e\xd0\xcb\x6e\xb7\x95\x4e\xd0\xe9\x74\xf9\xdb\x6f\xab\x53\x90\x28\x7b\xe6\xaa\xfe\x81\xb0\x0d\x21\x08\xa1\x17\x10\x5e\xd6\x6d\x96\x2f\x70\x35\x44\x5f\xee\xd7\x76\x24\xad\x02\xc9\x5f\x78\x4e\xb0\x3a\x1c\xb5\x58\x59\xc9\xdc\x24\x97\xbf\x9f\x5d\xee\x8d\x77\x65\x8b\x74\x06\x18\xfa\x12\x09\xc8\x02\x2a\xea\x10\x92\x97\x4b\x96\xca\x95\x03\x48\xf4\x5a\x59\x24\x09\xe5\x27\x85\xc4\x40\xed\xc6\xbc\xb9\xb0\xb5\x0c\x1b\xa4\x65\x91\x62\xd1\x3e\x91\x5d\x34\x6c\x9f\xc7\xf0\x68\x4d\x58\x4a\x2a\x0a\xe1\xa5\x28\xcf\xc0\x12\x1d\x2a\x5b\x4d\xfb\x9e\x00\xd3\xa1\xfe\xab\xca\xa8\x20\x0b\x3e\x04\x63\x42\x15\x2c\xb0\x48\xe7\xc4\xa3\x3b\x8a\x65\x9d\xfa\x66\xfa\x96\xa0\x39\xbe\x21\x9a\x01\x80\xeb\xd3\x25\x63\xa4\x10\x8a\x0e\x3d\xc4\xaf\x69\xb5\xe7\xf4\x40\x9d\xbf\x14\x21\x40\x25\xc0\xaa\x07\x8f\x8d\x29\x6e\x36\xf0\xab\x8f\xd6\x57\x5e\xe0\x4a\x72\xf0\xfd\x86\x2a\xcc\xf0\x39\x14\x26\x53\x9a\x0b\xc2\x62\x07\x3d\xd1\x16\x7c\x3c\x40\x83\x59\x0f\x45\x51\xb7\xa7\x17\x68\x45\xbf\x40\x3e\x2a\x26\x75\xa5\x59\x77\x03\x0b\xa9\x2a\xb9\x90\xef\xcc\x1a\xec\xd6\xa8\xfb\xee\x56\xf4\x92\x69\xc9\x4e\x70\x3a\x77\xe6\x39\x6b\x51\x16\xb5\x91\x5f\xb0\xc4\xf8\x1d\x2e\xd1\x18\xb1\x51\x4b\x8f\x56\x22\xb5\x4d\x2f\x27\x19\xd1\xa2\x15\x9e\x76\x3e\xf4\xf6\x34\x1b\x31\xd1\x64\x10\x4f\xf1\xc3\x63\x22\xab\x39\xac\x71\x6f\xe2\xe3\x6d\x14\x64\x2b\xf6\x93\xcb\x84\xa7\x25\x53\xa6\x54\xcb\x7b\xac\xdf\xbb\x61\x99\x31\x80\xab\xf9\x00\x7d\x8f\x70\xa2\x7c\xc0\x2f\xca\x45\x85\x19\x89\x27\x52\x92\xa8\x1d\xbb\xa5\x82\x37\x78\x1e\x1e\xba\x01\xa3\x9f\xcf\x29\x87\xf5\x00\x42\x33\xe6\x10\xcb\x81\xf0\x54\x48\xb3\x46\x08\x9c\xce\xc1\x02\x98\x13\x64\x25\x10\x55\xf9\x72\x46\x8b\x1e\xc2\x1c\x51\xa1\xa0\x94\x62\x4e\xd8\x2d\xe5\x04\x4d\x18\xc1\xd7\xbc\xd6\xc2\xd0\x08\xe7\x54\xac\x92\xbd\x35\x47\x6e\x81\x76\x99\xd0\x22\xd3\xff\x3f\xb9\x21\x85\xe0\x46\x85\xde\x6f\xd4\x69\x33\x22\x3e\xd8\x88\x9a\xed\x26\x46\x2d\x02\xe7\x7e\x14\x86\xe5\xc0\x41\xbd\x89\xdb\x42\x28\xf2\x0e\xe4\x35\xff\x47\xf6\x44\xc3\x14\x70\x41\xaa\x7a\x09\x38\xad\xcc\xa3\xef\x08\x96\x82\x72\xb9\xde\x93\xa0\xea\x74\x13\x12\x88\x07\x1c\xdc\xf6\x4c\xc8\x8c\xbf\xd5\x92\x96\x8e\x0b\xff\x4b\xe4\xa3\x77\x8a\x9b\xd0\xe2\x39\x63\x78\x15\xcb\xf2\x5e\x30\xc4\xae\x34\xd7\x3d\x6b\x1d\xc2\x4c\x34\x14\xb0\x9b\xf4\x52\x8e\x8e\x51\x60\xd3\x6b\xda\xc1\xde\xfb\xd2\xeb\x19\xda\x38\x39\x0c\xb9\xce\x1e\x06\xa9\xc8\x99\xda\x9e\xd4\xaf\xa1\x0e\xae\xeb\x67\xd9\x6a\x6b\x0f\x5c\x6d\xa3\x16\xb7\x19\xa2\x98\x71\xf2\x52\xda\xdf\xb4\x0c\xdc\xe3\x30\xa3\xe7\xe4\x4e\x38\x16\x81\xa2\xd3\x13\xbd\x2d\x3d\x25\xb3\x93\xbb\x2a\x8e\xfe\x33\xbe\x38\xe8\xff\xf5\x72\xbf\x1b\x5f\xac\x6e\xb3\xf9\x82\x5f\xee\x77\x1f\xab\x35\x57\x36\x52\x6b\x8a\x64\x15\x0b\x31\x81\xb2\x58\x83\xb3\x87\x1d\x8f\x74\x55\x15\x5c\x02\x46\x1d\xd0\x46\xbe\xd3\xaf\x0c\xb1\x1f\x8d\xd1\x37\xb5\xb3\xa7\xef\x0e\xcc\x9e\x5f\xf6\x0a\x64\x46\x63\x04\xc3\x7b\x53\x08\x03\xe0\xe2\xf0\xd2\x62\xb6\x2c\xa8\x5c\x01\xcc\x9b\x67\x97\x1e\xf9\x54\xfb\xa7\xcd\x40\x3d\x2f\x8c\xf2\x42\x02\xb8\xdc\xc1\x98\xf0\x9c\x7a\x3b\xcb\x1e\x10\xe7\x4c\xef\xb5\xf4\x4c\x07\x73\x15\xd7\xa2\x66\xbc\xd3\xf7\x36\x33\x74\x43\xf4\x65\x9b\x31\x2a\x69\x1e\xa0\x70\xd4\x86\xc2\x06\xa0\x60\x6c\x86\xe7\x0f\x35\x5c\xb7\x34\x1e\xd5\xed\x87\xa6\x73\x06\x6d\xf0\x0d\xbb\xfd\x9e\xbf\x3f\xb8\xdf\xc5\x79\x13\x78\x61\xff\xeb\x27\x6c\xfb\x4c\xa1\x3e\x3a\x94\xb3\x7a\xac\x66\xb7\xdf\x5f\x3b\x6b\xc7\xff\x7d\x66\x6d\x46\xc4\x89\x8d\x5d\xd8\x3e\x65\xa0\x70\x82\x88\x87\xaf\x5f\x51\x50\x10\x62\xcd\x4c\x04\x8e\x76\x14\x6b\x5d\xe3\x1f\x7c\xef\x72\xe6\xbf\x7d\xef\x21\xd7\x69\x76\xf6\xb0\xc1\x78\x07\xc1\xea\xa8\xc5\x36\xf7\xe2\x5f\xb8\x2b\xb4\x67\xbb\x1a\xfd\x0c\x02\xf3\xb7\x20\xc6\x5b\x71\x02\x50\x1b\x03\x9e\x77\x21\x8b\x46\x68\x47\x4d\x7a\x52\xb4\x04\x21\xac\x21\x4b\x41\x6e\x35\xca\x7a\xea\x0c\x81\x7c\x22\x6b\x31\xd4\x75\x61\x9b\xbd\xb3\xfc\xa2\x01\x7a\xd6\x43\x1d\xed\x16\xeb\xb4\xd2\x5b\x03\xf6\xde\x85\xac\xbf\xa3\x42\xfa\x57\x8f\x9b\x2f\x27\x82\xe1\x54\xfc\x5f\x35\xf8\x19\x11\xef\x4c\xb4\xc8\x43\xc4\x5a\x87\x98\x58\xa9\xd6\xb1\x04\x0f\x15\xea\x1d\x82\x19\x76\x97\xe9\x07\x0c\xa4\x45\xa4\xdf\x79\x68\x1a\x22\xeb\xb2\xdf\x2b\xd0\x4d\x84\xb6\xcb\xf3\xee\xb1\x23\x3b\x8a\xf3\x03\xa9\xb2\x99\xb3\x0d\x91\x1a\x02\x7d\x78\xb0\x8e\x51\x75\x93\x7f\x92\x90\xfe\xeb\x47\x63\xc5\xf4\x5f\x3d\x24\xaf\xf6\xee\xf7\x5b\xd2\x9c\x60\xa6\x3c\x73\xdd\xb0\xd0\x9c\x6b\x74\x6b\xeb\x6f\xc3\x42\x70\x6b\xff\xfd\x5e\xfd\x40\x9f\xcf\xcb\xdb\xb8\x25\xc8\x31\x21\x8b\x4a\xac\x62\x77\x28\xca\x05\x66\x62\xc3\x29\xda\x3f\xc3\x6e\xd3\x51\xfa\x65\xbe\xd4\xbb\x27\xbb\xdd\x58\xbf\x69\x35\x61\xe4\x66\x2f\x7c\x19\x75\xcd\xe8\xbf\x7e\x55\xa7\x4a\x0b\x7c\x17\xc3\x7f\xa6\x79\x59\xb2\xd0\xa2\x1b\xa0\x67\xdf\x1e\x74\x7b\xe8\xd0\x22\xe0\x22\x36\x1b\x6b\xbf\x77\x52\x08\x5b\x31\xc3\x9a\x75\x8d\x62\xeb\xf9\x87\xa1\x80\xfd\xcf\x73\x16\x1c\x85\x9a\xc2\x04\x4f\x4a\xe6\xa9\x4c\xd8\x4f\xb1\xdc\xf4\xa4\xfd\xfc\xe6\xb1\xc2\x0c\x2f\xdc\x4d\x9e\x08\xa0\x44\xc3\xfa\x06\xd7\x44\x07\xad\xbd\x86\x64\x77\xd8\x0a\x60\x02\x73\x2c\x37\xd7\x9a\x04\xfd\x60\x36\x47\x7e\x55\x15\x68\xab\x2b\x8e\x42\x20\xa4\x92\xbb\x53\x3b\x8f\xea\xed\x92\xe5\xd2\x18\xdf\x70\x62\xaa\xe2\x39\x22\x7d\x62\xaf\x46\xec\xcb\x49\xcb\xe9\x88\x1f\xe3\x02\xd2\x76\x4a\x78\x55\x16\x9c\x34\x2b\x8f\x14\x2d\x82\x40\x2e\x8d\xb1\x50\x5c\xad\xa6\x73\x57\x6c\x7f\x37\x9e\x2f\x54\x7c\xcf\x76\x4c\xed\x71\xbb\x99\x6d\xf5\x9f\x9a\x13\xe7\xe7\x39\x43\xe3\x75\x27\x58\x35\xb1\x51\xf1\xf4\xea\x65\xd4\x0d\x4e\xb6\x96\x2c\xdf\x76\x5e\x25\xcb\x87\x1a\x89\xff\xea\x33\x2c\x68\x05\x87\x18\xbb\x9e\x55\x05\x3e\x3e\x45\x79\xfd\x14\x1e\x91\xd8\x31\xc4\xf6\x14\x2b\x9c\x8d\x6d\x47\x29\x77\x73\xd6\x93\xdc\x5e\xd5\x47\x2a\xcb\xd0\xa3\x31\x8a\x40\xb6\x6b\xe3\x03\x4d\xc3\x98\x3f\x18\xd9\xe6\x6e\xce\x12\xa6\x39\x03\xa2\xdc\x1e\xb5\xdd\x16\x34\xff\x08\x93\x73\x5f\x6f\xa3\xe8\xe4\x43\xae\x45\x2f\xd6\x1b\xab\xd9\x38\x27\x77\x22\x68\xb4\xf5\x44\x91\xdc\x91\x74\x09\x97\xea\xf4\x89\x56\x84\xf6\x25\xd8\xc6\x01\xae\x47\xbd\xb4\x5c\x54\x39\x11\x64\x67\x02\x8e\xd7\x10\xb0\x39\xf5\x3e\x69\x33\xe7\x81\x6b\x0d\xf9\xe8\xbb\xf5\x6c\x14\x34\x14\xa5\xc0\xb9\x2c\x3e\x53\xd1\x8b\x70\x67\x75\xd3\x0c\xa9\xb0\xc3\x0d\xd3\xb4\xb6\x91\x3e\x95\x90\xa2\x06\xda\x38\xe2\x29\xce\x31\x6b\x04\x6d\x34\x51\x3a\x6c\x99\x5c\x3a\xdd\xd8\x0b\x60\x58\x2c\xf3\x7c\x3b\xf4\x4d\x60\x8c\x6f\xab\x95\x4f\xee\x43\x7f\x84\xb3\x25\xe6\x62\x91\xc7\xd1\xdb\x12\xab\x50\x04\xc5\x28\x76\x8a\xf6\x51\xb4\xe0\xe8\x68\xc2\xd0\xe0\x18\x9d\xda\x65\x43\xd5\xf2\xcc\x81\x7d\x14\x99\x6a\xf2\x4d\x74\x2e\x31\x57\xb1\x0d\x2a\xd4\x54\xb5\xa8\x0d\xa8\x7e\x2c\x54\x8f\x64\x74\xa8\xef\x70\x90\x69\x45\xc0\xd7\xf7\x0b\x3e\xdb\xb2\x63\x97\x2d\x12\xa9\x53\xa0\x6e\xad\xdc\x58\x60\xdb\x42\xa0\xac\x1d\xb8\xbb\xed\xe8\x75\xdc\xe9\xd4\xfb\x35\x04\xd8\x61\xc8\x3f\xd9\x1b\x42\xbb\x0f\x5a\x07\xd0\xa8\xb9\x0f\x86\x6d\xde\x3c\x64\xe0\x2d\x18\x3c\xa0\x7b\x7f\xf0\xf6\xc5\x6e\xc3\x0f\xee\x70\xec\xd0\xbd\x6f\x6e\x49\xd6\x2c\x97\xe2\xcd\x4b\x23\x73\xb7\xb4\xc8\xca\x5b\x35\xa2\x73\xf5\xb2\x5e\xd3\x5a\xe9\xb4\x76\x6b\xb1\xcd\x86\xae\x5d\x44\x71\x86\x34\xec\x06\x0c\x84\xd0\xff\x6f\xef\xff\x99\x2e\xd1\xd8\xe0\xc5\x95\x7a\x94\x58\xb5\x07\x30\xb6\x78\x18\x39\x69\xb9\xe8\x22\xc7\xd0\x73\x23\x78\xaa\xd3\x53\x6c\xa7\xb6\xba\x0b\xfe\x16\x4f\x48\x1e\x98\x54\x10\x60\xc3\x1d\xc9\xe1\xf9\x0c\x22\x14\xb9\x4e\xdd\xe0\x79\x7d\xe1\x2d\xa2\x05\xf2\x9b\x29\xa2\xa8\x57\x72\x51\x36\xd1\x3a\x9e\xba\xf5\xa1\x26\xd5\x92\xcf\x63\x77\x08\x8d\xf6\x35\xd8\x7d\xef\xf4\x59\xaf\x78\x3c\xc5\x15\x79\x7d\xfe\xee\xad\xc6\xf3\x02\xfe\xd8\xe0\x90\xfb\xd0\x0d\x92\x9b\xd1\x85\xc1\x67\xaa\xf8\xd7\xc8\x75\x65\x30\xf9\x5c\xd2\x22\x8e\x8e\x26\xec\x38\xea\xaa\xee\x21\x3a\x6b\x2b\x31\x55\xbc\xc6\x79\x79\xce\xdf\xab\xd3\xba\xb5\xe4\x14\xa6\x86\x7e\x93\x18\xe2\xc8\x2d\x54\xa7\x03\xbd\x7e\x89\x46\x9b\x88\xbf\x95\xfa\xdb\xc9\xdf\x42\x7f\x4b\xf2\xf1\xaf\x91\xa5\x8b\xa1\xaf\x2c\xff\x35\xb2\xd1\x28\xb0\xfa\xc8\x1f\x3d\x9a\xfd\x71\x1b\x19\x7b\x8a\x86\xf7\x91\xe7\xdd\x51\x0d\x76\x3b\xda\xfb\x51\x1f\x84\x59\x5a\xc2\xc9\x96\x23\xa5\x92\x58\xa8\xfa\x2a\x2f\xb1\xd0\xef\x8d\x50\x52\xfe\x1e\xbf\x97\x65\x76\x73\x3e\x18\xa0\x68\xff\x4d\x31\x8d\x7a\x28\xea\xeb\xbf\xf0\x8c\x6e\x69\x9e\xa3\x09\x51\xc0\x32\x29\x4e\x25\x7a\x8f\xdf\xa3\xc9\xca\x87\xdf\x4d\xd0\xf9\x9c\x18\x50\x29\x2e\x3a\x42\x36\x82\xb8\x65\x92\xf5\x10\x2f\xe1\x9e\x14\x12\x73\xb2\x40\x98\xa3\x19\xae\x38\x8a\xc1\x12\x48\x7c\xa7\x9d\x49\x89\x72\x1f\x1c\xda\x6d\x25\x4a\x70\xd1\xa2\xbe\x0b\xda\xe8\xa9\xa9\x70\x4e\x84\xbd\x6c\x75\xaa\x33\xb4\x24\x2f\xca\xbc\x64\xc9\x47\xf5\xd2\xf9\x36\xc0\x38\xf7\x0c\x26\xc9\x43\x0b\x2c\x18\xbd\x8b\x42\x15\xe5\x8c\x54\x1d\x7b\x45\x39\x2a\x4a\x81\xca\x29\x52\xf5\x21\x5a\xe0\x11\xfa\x98\x13\xcc\x89\xce\x6b\x80\x51\x5a\x32\x46\x52\x01\xd7\x7f\x09\xe7\xb4\x2c\x6c\x20\xa2\xa6\x86\xe2\xf3\x7b\xe7\x4b\xc4\x26\xf2\x8d\xd9\x70\x0a\xa7\x37\x05\xaf\x1f\x97\x8f\xec\x93\xe2\x62\x77\x5e\x2e\xb8\x96\x55\x30\x03\x43\x23\x4d\x1f\xb4\x1b\xdb\x70\xe4\xab\x2a\xee\x45\xe5\xd4\x4c\x7c\x73\x3e\xef\x54\x93\xf2\x35\x06\x2a\xc1\x75\xec\xe2\xd8\x2c\x60\xfb\xce\x29\x31\x4b\x0a\xbf\x97\x21\xfc\xf6\x82\xe6\x43\xfd\x37\xdc\x39\x0a\xae\x4e\xeb\x79\x48\x29\x4f\x80\xd4\xbf\x5a\x27\xf2\xdf\xdd\x50\x9d\x20\x5f\x1c\x5c\xfa\xd1\x40\xab\xa1\xb7\x36\x82\x64\x2a\x68\x17\x87\x97\x5d\x67\x95\xba\x48\x15\xb7\x09\xc9\xe5\x16\x4e\x73\x60\x02\x8f\xb1\x6a\xa1\xb6\xde\x40\x0e\x30\x7b\x1b\x21\x43\xdc\x13\x5c\x15\x6d\x0a\x33\xc6\x41\x01\xe2\x3c\x47\x0b\xca\xb9\x34\x55\xb8\x20\x15\x77\x59\x2b\x0a\x72\x6b\x2d\x6c\xad\x32\x95\x18\x94\xde\x26\xc3\x2a\x51\xe1\x2d\xfb\xd6\x33\x33\x42\x02\x1d\x85\xe5\xa4\xc8\x64\xe9\x7e\xbd\x36\xa9\x82\x20\xb3\xe7\x79\x5e\xde\x02\xf4\xa9\x54\x1a\x12\xbd\xaa\xa4\x85\x40\xb4\x50\xd1\xc2\xe9\x2a\xf1\x4f\x1a\x95\xc9\x6f\x23\x31\x24\x8e\x4f\x9e\x20\x55\x7c\x51\x95\xfc\x32\xb9\x43\x47\xb2\xdf\x46\xb7\xca\xf5\xe5\x4f\xa7\x1d\xb8\x52\xe9\x1e\x10\xcf\x34\xaf\x4a\xc8\xd4\xa3\x27\xaa\xbe\x5d\xad\x81\xf8\x72\x37\x44\xa2\x87\x74\x08\xe5\x7d\xb7\x19\xfe\x81\x90\x4d\xeb\x64\xdb\xba\x89\x75\x4e\x7d\xbc\xa3\xfd\xd7\xc8\x99\xb5\xf1\xc0\x04\x68\xe2\x53\xd0\xf8\xda\x42\x33\x0c\xf2\x10\x40\x46\x2b\x5c\xac\x90\x60\x38\x25\x5c\xaa\x29\x5c\x20\x72\x47\x55\xb6\x1a\x50\xe3\x49\x78\xc1\xdd\xb9\x66\xbd\xee\xdc\xed\xf8\x74\x4e\xf3\x8c\x91\x22\xee\xb6\x84\xd2\xb8\xba\xb5\xdb\x28\xf0\x02\xee\xdb\x07\x2f\xee\xeb\x17\xf7\x1f\xc7\x1d\xcf\x6c\x89\xd4\x8d\xfd\x63\x65\x92\x74\x9a\x37\xf7\x6b\xd5\xf5\x95\xfd\x66\x7d\x87\x7e\x23\xc7\xcf\xb6\x4a\xd0\x95\xf3\x53\x93\x22\xd3\x5e\xea\xb5\xee\x5b\x49\xf9\x17\x65\x71\x23\x65\x57\x94\xe8\xd3\xfb\x37\x3f\x23\x7b\xb5\xd7\xe4\xf8\xf1\x3c\x08\xbb\x1f\xdf\x7d\xfd\x8a\xbe\xf9\x4e\xf7\x70\x38\x37\xe9\xa6\x92\x16\x17\xba\x41\xb3\x6f\x3b\xb2\xc3\xdc\xae\x77\x3e\xe2\x0c\x62\x73\xf5\x0d\xcb\x5b\x2a\xe6\x88\x16\x37\x94\xd3\x49\x4e\x50\x24\xa5\x22\x52\x0a\x93\x23\x2c\x20\x44\x2e\x85\xa8\xd7\x25\x23\x19\xba\xeb\xcb\x49\x40\x93\x72\x59\x64\x18\x00\x90\x82\x2f\x19\xe1\x06\xbc\x98\x63\xa1\x38\x8f\x23\xcc\x08\xca\x28\xaf\x72\xbc\x22\x99\xea\x09\xa3\x29\xbd\x73\x70\x80\x0a\x41\xc6\x8c\x02\x57\x15\x04\xf3\x95\xd0\xb5\x8d\x20\xb6\xf0\xe5\xc0\x4d\x33\xa8\xe2\xee\x72\x3a\xf5\x73\x71\x20\xb5\xcc\xb1\xa3\x9a\x17\x48\xa1\x68\xb4\x2c\x20\x13\x11\xe8\x03\x5b\xab\xa1\x17\xee\xeb\x70\x43\xed\xd6\x47\x87\x4a\x9b\xe9\x19\x69\xf4\x62\x55\x8e\xae\xd0\xda\x81\x4b\x2d\xf2\xbe\xbc\x45\x29\x23\x70\xff\x62\x4e\xc0\xb6\x09\x85\xb8\x91\x8b\xce\xb7\x7e\xd4\xd5\x51\x85\x81\x0e\x71\x1b\x7a\xcc\x6f\xd7\x3f\x95\x82\x68\xe8\xce\x37\x3c\xc1\x06\xff\x86\xca\x48\x14\x77\x7b\xa0\x8e\x7b\x7a\xfb\x99\x89\xf9\x86\x36\x3f\xc9\xf7\xe0\x1c\xfb\xcb\x41\x0f\x3d\xb3\xed\xd4\xae\x8c\xb0\x61\xcb\x4d\xe1\xef\x75\xd4\x61\x84\x86\x28\xca\x69\x41\x8c\x5f\x19\x76\x7f\x55\x99\x63\xed\xcb\x91\xef\x30\xd3\xce\x64\xe3\xaf\xb1\xfc\xae\xe3\xa5\xa9\xac\x89\x97\xa2\x8c\x7a\x01\x51\x5f\xd1\x22\x83\x9b\x2c\x9c\x68\xce\xec\x70\xb4\xc0\x77\x83\x05\x2d\xf6\xd6\xdc\x61\x96\x4a\x57\x30\x67\x5a\x0c\x06\xe8\xa7\x39\x29\xcc\x65\x65\x69\x17\xaa\x4c\x3a\x99\x5d\x8b\x17\xf8\xce\xad\xc5\x1b\x64\x51\x38\xef\x92\xe5\x16\xd9\x3e\x5d\x32\xa6\xca\xdf\xf9\x90\x54\x52\x02\xbd\x82\xb5\x43\x94\xa5\x1f\xe5\x8a\x5c\xf7\x81\xda\x17\xc9\x0a\x1d\xd7\x3a\x78\xf2\x04\xf9\xaf\x1f\xb5\x39\xf8\xea\x28\x79\x0d\x5a\xbc\xb4\x76\x29\x95\x94\xd8\x1f\x87\xad\x35\xb7\xfb\x0b\x46\xc0\xcb\x89\x22\xdf\x02\xdf\x3d\x3d\x4c\x0e\xbe\x5d\x5f\x8d\x16\x86\x36\xc1\x4a\x0f\x33\x00\xef\xde\x14\x53\x5a\x50\xb1\x1a\xd5\x66\xa6\x1f\xbe\x78\xe0\x0c\xfd\x73\x26\xe1\x08\x70\xdc\x85\xf4\x6a\x2c\x1b\x09\xde\x36\xc7\x8b\x1d\x67\x76\xb1\xfb\x7c\xde\x7b\x79\x16\x00\xab\x31\x4c\x53\x3d\x32\xad\x7d\x32\xd1\xbe\xf3\x37\xaf\x9d\x4d\xf9\xdb\x37\xf5\xda\x92\x25\xac\x07\x1e\x1f\x24\x87\x4f\x63\x7b\xfd\x4f\x16\xf6\x25\xbc\xae\xdb\x94\x6c\xe9\x76\x2b\x84\x7b\xe3\x54\x93\xac\x74\xa7\x4d\x93\xa6\xde\x4d\xc0\xfc\x81\x13\x82\x2f\x4a\xcb\x0c\xdb\x54\xb6\x77\x53\x78\xb5\x05\xd6\x2f\x5a\x95\xaf\x05\xa6\xf4\x5e\xc9\x28\x29\x84\xd5\x94\x64\x6a\x22\xba\x05\x4d\xaf\x5f\xe9\x74\x2b\x70\x5d\x42\xe5\x5e\xf9\xf7\x77\x3f\x9c\xf7\x5a\xd6\x08\x40\x47\xaf\x11\xfe\x75\xe2\x90\x74\x3a\xe1\xa2\x1b\xc5\xbc\xbc\x21\xec\x25\x11\x98\xe6\xed\x63\x79\xed\x2a\xec\x36\x20\x85\x66\x70\xb3\x21\x56\x3a\xbf\x87\xee\x7a\x68\x15\xaa\x4d\x1d\x97\xd3\x39\xe2\x15\x2e\x8c\xa9\x28\x0b\xa3\xe3\x0e\xda\x77\x07\x38\x77\xe8\x29\x18\x70\xdd\x44\x94\x9f\xce\x5f\x28\xc7\x4e\xdc\x45\xfb\xa8\x73\x34\x90\x6d\x8f\x3b\x23\x0f\x2c\xbf\xc5\x22\x9d\x37\x01\xc3\x38\xae\xd4\xdb\x48\x25\x73\x18\x47\x13\x9c\x5e\xcf\x98\x34\x89\xfa\x7a\x77\xd8\x81\xdd\x0d\xa8\x0b\x28\x91\xdd\x48\xcb\xb5\xd9\x51\x5a\x16\x42\x07\x02\xa8\x2e\xf7\x91\x1e\x6d\xd2\xe6\x4f\x03\xc3\x4c\x39\xd5\x86\xc8\x77\x30\xae\xf4\x48\x54\x89\xed\xc2\x8b\x41\x82\x0a\x13\x06\x64\x31\xbd\x7a\x45\xda\x2b\xec\x7c\xa8\x21\x1a\x4d\x7b\x05\xbc\x11\x26\x6b\x56\xcb\xc4\xbf\x85\x77\xad\xf6\x88\x6a\x66\x0d\x92\x8d\x0c\xe1\xf5\x36\xa7\xb3\x79\x2e\x4d\x13\xc8\xbc\xd5\xd2\xe5\x0f\x64\x8e\x6f\x68\xc9\x12\xad\xaa\x5f\x9b\x06\x31\xda\x89\xf5\x14\x5e\x43\xfd\x37\xec\x9c\xcf\x49\x7e\xa3\x8e\x11\x76\xe8\xf9\x1c\xac\x83\xdd\x18\x7e\x5d\xaf\xfe\xb9\xbf\xcd\xc1\xb1\xd5\x09\xce\xe9\x6f\xbf\x67\xcb\x19\xaa\xa9\xfa\x81\x5f\x8b\x26\xb0\x9b\x02\x1b\x38\xf0\x7b\x4d\xc4\x0d\x56\x81\x53\x37\x3b\x44\x1d\xb7\x84\x72\x6c\x09\xad\x68\xa7\x89\xdc\x5b\x6b\x2c\x74\xde\x19\x8e\x2a\xcc\x39\xdc\x46\x75\x69\x69\xa6\x25\xb3\xf6\xa0\xda\xf0\x80\xc3\xd4\xcb\x45\xc3\xf1\x0d\xd9\xd3\xbb\x22\x2f\x03\xcd\xf3\x7f\x7b\xfe\x33\x32\x87\xa4\x72\x17\x53\xb2\x8c\x30\x95\xbc\xa6\x6f\x7d\xa2\x88\x0a\xe5\xb6\xf5\xfa\x54\xc0\x6e\xa5\x25\x2a\x21\x2e\x39\x61\x72\x83\x25\xf7\x47\xea\x32\x13\xe0\xe3\xa7\x1b\xb4\x89\x6b\xb4\xbf\x31\xd8\x28\xb6\x27\xbc\x01\xe7\xeb\x56\x77\x44\xab\xd7\xf4\x7d\x09\x68\x82\x7b\x88\xa3\xa9\xd4\x88\x35\x4f\x68\xd3\x2f\x70\x8e\x27\x61\xbe\x22\x3f\x11\x8d\x77\x42\x64\x13\xe3\xec\xc4\x05\xb5\x40\x99\x5a\x54\x25\xde\x89\x0f\x54\xb4\x9c\xcb\xa8\xb3\x19\x4b\x9f\xd2\xca\x1f\x6e\x0e\x48\x7e\x28\xb3\x95\x21\xb5\x07\x2e\x4c\x00\x7a\x05\x17\xcf\x91\x98\x94\x99\xce\xfc\x04\xed\x82\x50\x3a\x7e\x4b\x45\x3a\x8f\x6b\xe7\xff\xfa\x0e\x2c\xe6\x04\x45\x37\x24\x15\x25\x8b\x86\x7b\xbe\x79\x68\x2f\x58\x6a\xa1\x96\x0b\x49\xf3\x58\x3e\x9c\x59\xd3\xbd\x76\x96\x44\x47\x82\x1d\x1f\x89\x0c\xa5\x65\x2e\xd7\xb0\x71\xe7\x59\xe7\xf8\x88\x1e\x17\x6a\xc2\x8f\x06\xf4\xf8\x68\x20\x32\xf9\xc3\x8e\xa3\xda\xad\x3e\x7f\x2b\x1d\xe0\xa3\x17\x03\xba\xa0\x42\xa9\x4d\x1f\xdd\x6e\xcd\xab\xe9\xdd\x9f\x69\x89\x29\x08\x2f\x6f\xc3\x64\x6a\x03\xd7\xa4\x8f\xa0\x97\xfe\xb2\x6b\x4f\xad\xda\x5c\xdb\xd6\xb3\x3d\xda\x44\x8b\xe3\xda\xf9\x9d\x02\xa9\x4f\xd9\x24\x2d\x74\x15\xed\xb9\xbe\x38\xbc\x74\xaf\x7c\x32\x29\xc2\xc0\x8d\xc4\x91\x9d\x48\x7d\x3c\xd1\x3a\x91\xff\x9f\x4e\xd8\xcd\xef\x9f\xb0\x9b\xfa\x84\xd9\x6b\x67\xe7\xe4\x4e\xe2\x1d\xd9\xc3\x0f\x8b\xde\x67\x85\xde\x67\x74\x84\x6e\xcc\xd9\x82\xc1\xed\x73\x98\x09\xc0\x41\xda\x1f\xdb\xca\x17\x9f\x2f\xf5\x94\xa2\xff\x29\xa7\xd9\x2f\x3f\x50\x53\x3d\x61\x83\xe3\x28\x74\x30\xff\x41\x5e\xf2\x30\xd9\x99\x95\xf4\xe9\x8f\x62\xa5\xf6\xde\x55\x95\xa0\x27\x7f\x26\xd6\x71\x6e\xbd\x23\xb0\xa9\x37\x77\x04\x55\x82\x8e\xbc\x51\x87\x7d\x76\xb7\x74\xaa\x1d\xa4\xc3\xd6\x95\xe8\x53\xc1\x97\x55\x55\x32\x41\x32\x7d\x7f\x10\x4e\xee\x1a\x40\xee\x7f\xb7\x9d\xd5\xfe\x39\x89\xb6\x04\x21\xf5\x9c\xf3\x81\x93\xdc\xeb\xfc\xb4\xbd\x78\x67\x9c\xdc\xfe\xce\xc7\x6b\xe5\x10\xc3\x13\x7e\xb5\xf2\xd3\xe5\xac\xec\x3a\xaf\x5e\x1d\x8f\xd1\x21\x79\xf6\xe7\xda\x95\x8c\x78\x85\x06\xaa\x3c\x11\xa5\xb7\x71\x8a\x7e\x89\x46\x61\x4e\x4a\x1f\xca\xe1\x1a\x28\x87\x75\x28\xff\xb1\x01\xca\xe1\x5f\xda\xa1\x1c\xfe\xa5\x0e\xe5\x64\x13\x94\x6f\xd7\x40\xf9\xb6\x0e\xe5\xe3\x26\x28\xcf\xd6\x40\x79\x56\x87\x72\xbe\x01\xca\x5f\xdb\x81\xfc\xb5\x0e\xe3\x6f\x1b\x60\x7c\xd7\x0e\xe3\xbb\x3a\x8c\x77\x1b\x60\xd4\xaf\xec\x6a\x18\xdf\xd4\x61\x5c\xaf\x87\x51\x83\xb0\x6a\xab\x17\xac\x51\x9b\x2a\x1e\x49\xa4\xfa\xeb\x78\xaf\xdf\x64\xbe\x55\x3b\x62\x1a\xce\x1a\xee\xeb\x37\xd9\xef\xb7\x4d\x70\xd6\xf1\x5f\xbf\xc9\x80\x78\x23\x9c\x35\x1c\xd8\x6f\xb2\xe0\x74\x23\x9c\x35\x3c\xd8\x6f\x32\x61\xb5\x09\xce\x5f\xdd\xf2\x56\x03\xd4\x60\xc4\x62\x13\x9c\x35\x9c\xd8\x6f\xb0\xe2\xff\xfe\x5f\xeb\xc0\x1c\x92\xfe\x1a\x5e\xec\x37\x98\x71\xb1\x1e\x97\x36\x1e\xdb\x92\x85\xc1\xb3\x63\x82\xcb\xf6\xca\x9a\xd9\x14\x7e\xf2\xee\xf9\xcf\x57\x67\x27\xa7\x6f\x4e\xce\xae\xde\x7f\x7a\xa7\xbf\x9c\xe4\xae\x76\x10\xce\x31\xdc\x56\x8e\xd6\xe7\xff\x7a\x45\x44\x3a\xf7\xd2\x7f\x85\xa6\xdd\x3e\x24\xf7\x52\x3e\xaf\xc9\x52\x98\x53\x34\x5a\xcc\x50\x59\xe4\x2b\x34\xa5\x8c\x0b\xdb\xb6\x86\xce\x3e\x8a\x92\xc8\x46\x13\x86\x80\x8f\x6b\x95\x1b\x3b\x39\x73\x31\x48\x0f\x22\x0c\x63\xd1\xb0\x78\x95\xd3\x94\xc4\x07\xbd\x3a\xb0\x5a\x0c\x90\xaa\x0e\xc9\x30\xf4\x37\xa5\x3e\xe2\x19\xd9\x7b\x3a\xd8\x53\x19\x7b\x5c\x46\x1d\x7d\x1d\xc5\xe5\xd5\x19\xa2\x0b\x70\x0e\xa9\x8d\xb5\x79\xaa\xe7\xec\x19\xaa\x74\x83\x70\xbc\x1a\xb7\x26\x30\xec\xe8\xcc\x84\x1d\x75\xc0\x2a\xc1\xd4\x56\xd1\x8f\xb8\x71\xb1\xbc\xa5\x46\x2d\x21\x51\xfb\xc2\x0f\xc8\xba\xc4\x1e\xe0\x74\x05\xdc\x3e\x9d\xbe\x75\x47\xee\x7e\xad\x56\x53\x3e\xa8\xa0\x4e\x10\xef\x5d\x6c\x67\xf0\xd6\x1c\x43\x40\x57\x38\xcb\x94\x57\x09\xd9\xaf\x58\x3d\x8e\xa3\x3f\xe1\x2c\xbb\xd2\xc9\xf1\x75\x86\xc9\xa0\xb6\xfa\x08\x81\x2c\xea\xa1\x2f\xf7\xdd\xa6\x99\x51\x1b\xbe\x19\x50\x93\x04\x72\x70\x3a\x1a\x54\xce\x85\x7c\x93\x70\x82\x99\xfa\x08\x4c\x14\xd5\x44\xd4\xc4\x44\x69\xe2\xc1\xdc\x7e\x34\xd7\x55\xda\xe1\x24\x7c\x39\x51\x26\x64\x7c\xd8\x05\x36\x14\x71\xe7\x49\xc7\x5e\xbf\x72\x30\x5e\x93\xbc\xb2\x5e\xc2\xfa\x60\xfe\x5e\xab\x16\xfb\x91\x1d\x75\x18\x6a\xc0\xae\x09\x8f\x3d\x4c\xb7\x52\xcb\x50\xd9\xa7\x96\xf9\x3c\x51\xc8\x37\x4d\x5c\x95\xc7\x03\x48\xf6\xd8\x7e\x1a\xc8\xfb\xec\x87\xf6\xff\xeb\x0f\x27\x29\xab\x5b\xce\xac\xf2\x97\x7c\x3a\x7d\xeb\xa6\xb6\xeb\xbd\x56\xd6\x67\x6d\xee\xbb\x7b\x90\xa3\x6f\x2f\xc8\xd3\xa6\x84\x4f\xb1\xa0\x09\xa7\x00\xa6\x52\xb3\xd3\xd5\xde\xc6\x66\xc0\xaf\x09\x12\xb1\xbe\x48\x97\xd0\x58\x92\x6b\x30\x40\xef\x3f\x9c\x9f\x0c\x6b\x69\x7d\x26\x04\x5d\x93\x4a\x40\xf2\xa3\x55\x91\xaa\x80\x81\xc1\x52\xd0\x5c\x6a\x4d\xf3\x37\x2d\x8b\x9b\x64\x56\x0e\x01\xee\x5b\x5a\x5c\xbf\x2a\xd9\x89\x0d\xbc\xdb\x30\x15\x96\x2c\xed\xc2\x0b\xb3\xaa\x16\x1b\x08\xc2\xac\x53\x21\x88\x3b\x9b\x29\x39\x83\x1c\x37\x7e\xac\x5e\x4d\x03\x28\x3a\xb8\x6c\x3f\x26\x60\xe6\x0f\xf3\xaa\x07\xe2\xc3\xe4\x33\x49\x85\x49\x7b\xe6\x33\xee\x8c\x14\x84\x61\xa1\x78\x57\x55\x0b\x94\x8f\xc1\x3f\x50\xf1\x8f\x55\x40\x56\xec\xc1\x36\x31\xd9\xea\x4b\x44\x2a\x14\xf6\x89\xfe\x4e\x85\x56\xaf\xc0\x23\x67\x02\x0b\x12\x7f\xb9\xef\xa1\x28\xea\x21\x15\xde\xf3\xbd\xdc\xe2\x79\xa4\xdd\x2a\x30\x1e\x77\xfa\xf3\xa4\xd8\x0f\x28\xdd\x98\x9a\xd6\xe9\xd2\x59\xc4\x1c\x80\x2e\xfa\xa2\x87\x38\x03\xf7\x36\xd4\x6b\xb9\xb6\xd1\x4a\xf5\x1d\x16\x88\x7a\x93\xba\xca\xfc\x7b\xa0\xdf\x2c\x34\x5f\x99\x58\x5e\x04\x87\x30\xc9\xc2\x26\xea\x0c\x0f\x86\xf5\xa6\xb8\xc1\x39\xcd\x5a\xf4\x91\xca\xf6\xe5\xeb\x33\xd5\x4c\x5a\x1b\x7a\xda\x5f\xb1\x72\xf1\x41\x75\xa0\x01\x34\xbb\xeb\xa1\x83\x1d\x29\x93\xb8\xde\xd5\x61\x23\x1a\xa3\xc1\x7f\xce\x7e\xcd\xf6\x7f\x4d\x92\xfd\x71\xb2\xff\x78\xf0\x30\x62\xb5\x8c\xd0\xa7\x17\x70\xe7\xf9\xb2\xca\xcd\xe9\xbc\x1e\xa6\x57\xde\x98\x7b\xf7\xae\xb6\x04\x3d\x78\x70\x89\x20\x5c\xf8\xf0\x46\xed\x77\x7f\xb6\x0e\x72\xd3\x7c\xac\x61\x8f\x9e\x62\xd9\x37\x4e\xe7\xc8\x05\xd7\xab\xe0\x8c\x89\xc6\x96\xab\xb6\xd6\x56\xf0\xe5\xbe\x0f\x53\xa9\x7f\x01\x5e\x90\xcd\x0f\xa0\xa9\x8f\xfb\xc5\x5e\x97\x66\x91\x2d\x96\x8b\x09\x61\x1f\xa6\xaa\xd3\x57\x25\x93\x50\x8c\xc0\xfa\xe8\xec\x3c\x0d\xee\x85\x8a\x55\xe5\x3f\x51\x31\x8f\x1b\x48\x6a\x62\xdb\x6b\x64\x9a\x02\x9b\xf0\xd9\x4e\x89\x6d\x83\x70\xb6\xee\xfa\x7e\xcc\xc2\xd9\x02\xaa\x59\x18\x2e\x24\x3b\xd1\xc4\x1a\x3d\x0d\x92\x68\x5a\x58\x36\x6c\x24\x8b\x73\x36\xa8\x27\xdd\x1f\xa6\x1f\x0a\xbd\x2e\x37\xf1\xb3\xf3\xac\x80\x3c\x4f\xd3\xe5\x62\x99\x63\x01\x77\xc7\x76\x50\x26\x6b\x38\x16\xed\xeb\x6b\xf2\x0d\xb0\x36\x14\xcf\x7d\xf4\xb1\x9e\x3a\xcd\xab\xfd\x60\x51\x5b\x3f\xf8\xed\x6a\x38\xc8\xb9\x87\x42\xe6\x6e\x44\x0d\xf9\x93\xe8\x5a\xbf\xc7\x0b\xf2\xbc\xc8\xcc\xd5\x0f\xa1\x66\x54\x59\xae\xe3\x8e\xb7\x98\xbb\xea\xf6\x3b\xb7\x7e\x5b\x48\x7f\x5d\xab\x6c\x80\x66\x24\x2d\x33\xf2\xe9\xf4\xcd\x8b\x72\x51\x95\x05\x29\x0c\x2d\x03\x00\x87\x97\x2e\x91\xe6\xaf\xfb\x90\x49\x13\x45\x5d\x93\x19\x5b\x4a\x92\x8f\xc2\x18\x45\x02\x4f\xbc\x1b\x36\x61\x97\x36\xb9\x82\x57\xac\x12\xe3\x0b\x3c\x41\x94\x43\x08\xdf\x8c\x30\xed\x96\xf6\x2d\xd5\x0b\xd7\xcd\xa5\x1d\xea\x8f\x26\x15\xdf\x7d\xcb\xf4\x37\x33\xe7\x6d\x9b\xf4\xba\x1e\xf3\xa7\xda\x33\xda\x74\x2f\xd1\x4c\x5a\x29\x54\xb3\x29\x6c\x9e\x1f\xd8\x5f\x8b\xa9\xd5\xb0\x5e\x6a\x56\x97\xe5\xb2\xca\x60\xd8\xae\x81\x69\xa0\x7c\x43\x93\x4f\xb1\xa5\x7a\x4c\xae\xc9\x8a\x07\x3d\x75\x9b\x4c\x7a\xed\x3e\xbc\xe9\x41\xba\xd0\x28\xec\xa3\x6b\xb2\xba\x34\x76\xab\x86\x72\x21\xcb\x1a\xf1\xef\x5e\x6b\x6b\xd3\x9f\xcf\x09\x27\x48\xdc\x96\x3a\x85\x00\x47\x31\xe5\x2f\x49\xc5\x48\x8a\x05\x51\xfb\x20\x69\x7e\xe3\x22\x43\x8c\x64\x94\x91\x54\x9c\x97\xef\xe8\x4c\x52\x2e\xfb\x74\xfa\xb6\x2b\xa1\x60\x46\x10\xce\x32\x92\x69\x4f\x47\xc9\xe0\xf3\x7a\xb7\x98\x65\x70\x01\x1c\x0b\x3a\xa1\x39\x15\x2b\xb9\x65\x28\x73\x93\x7e\x5c\x39\xbf\x93\x3d\x9b\x0c\xb5\xad\xeb\x0d\x1b\xd5\x39\xe6\xf3\x0d\x0b\xa8\xfb\xd2\x82\xd1\xb1\x4a\xe8\xb2\x57\x0c\xcf\x74\x26\x8f\x16\x31\x6c\xeb\x45\x1d\xee\xb2\x95\x95\x2c\xcf\x87\x51\x03\xaa\x55\x7f\x7c\xd8\x55\xb2\x95\xb1\xb2\x82\x73\x7e\x09\x07\xfd\x09\x7c\x61\x29\x44\x0d\xc5\xa4\xe1\xd1\xf3\x50\x76\xc6\x20\x93\x52\x76\xbf\xe7\xc8\xd4\x3a\x11\xde\xb6\xe8\x8f\x0d\xb3\x65\x4f\xf4\x47\x46\xdb\x2e\x01\x75\xa7\x48\xb0\xc0\x96\xa1\xd4\x39\xf5\x6c\xc5\xae\x45\xfa\x65\x1d\x5f\xaa\xca\x5d\x04\x6a\xb3\x48\x95\x35\x69\x42\xc1\xd7\x59\xed\xc0\x20\x1d\x47\xfb\x0e\xac\x46\xe4\x96\x14\x23\xb5\x1d\x97\xef\x78\x53\x89\x83\x74\x12\x0b\x34\x57\x9b\x1a\x33\x0a\x2e\x6b\x3c\xb5\x6e\x39\x3f\xfb\x85\x76\xcc\xd9\x2c\xda\x2d\x69\xc0\x35\x6b\x6d\xf5\xc4\x11\x80\xd0\x07\xe8\x7d\xe7\x96\xdb\x33\x09\x8d\x1b\xb9\x74\x87\xad\xdb\x3f\x93\xee\x5b\x7d\xe6\xf3\x24\x57\x17\x45\x32\x7a\x93\xd4\x20\x8f\xbc\xca\xfa\x9b\x62\x8f\x63\xd3\xaa\xeb\xae\xba\x74\xa8\xa9\x6b\x5e\x26\xe5\x74\x1a\x77\xc0\x57\xd6\xf1\xd7\xc7\x75\x89\xc5\xbd\x33\x78\xa9\xc2\x55\x64\xf8\x87\xc2\x75\x36\xf2\x97\x41\xd7\x4b\x61\x3a\xe9\xb5\x25\x57\x87\x2b\x9f\xa9\x92\x26\x15\x74\xd2\x69\xf9\xf2\x57\x27\xc8\xe5\x18\x60\xb8\x0e\x8f\xc0\x83\xbe\xb6\x07\x80\xbf\x1d\xfa\x74\xda\x02\xde\xe7\x6f\x98\xdb\x20\x87\xca\xe6\x79\x35\x62\xa0\x7c\x27\x61\xc6\x5d\x3b\x9d\xfc\x8c\x2e\x2a\xf3\x9d\x86\x70\xff\xef\x25\xf6\x07\x99\xfe\x30\x8d\x75\x4a\x73\xb9\xeb\xef\x1f\xba\x68\xe6\x10\x4a\x5d\x7f\x06\x73\x96\x13\xa1\xaf\xd2\xfe\x3d\x14\x8e\x07\xf8\x9f\xfd\x01\xcc\x31\x97\x35\x35\x38\x63\x4f\x05\xc0\xb7\x61\x1f\x82\x70\xd8\xb7\x40\xd2\x7b\x98\xad\x7d\x98\x4c\x56\x76\xd8\x2d\x2d\xc0\x19\xa8\xaa\x8f\xea\xb3\xf6\xa2\x5c\xc2\x0a\xd1\xd2\xcc\x4f\xfd\xd1\x4a\xc8\x36\xb4\x01\x6b\x0f\x72\x1f\x7d\x7b\xd0\xf3\xba\x32\xb2\xd9\xfa\x4d\x22\x4b\xf9\xc6\x37\x89\x9a\x5d\x99\xc9\xd9\xf0\x79\x80\x56\x14\x47\x0f\x55\x0d\xca\x07\x69\x3f\x67\xa0\x60\x9f\x91\xe0\xab\x03\x0f\xfb\x30\x81\x1f\x97\x0b\xa2\x66\xa4\xde\x93\x32\x2b\xa0\x1a\x99\x7f\x1d\x2a\xda\x59\xbd\x46\xcb\x06\x11\x74\xed\xca\xcc\x7d\x90\xae\xa9\x8a\x6a\x4a\xda\xd5\xa4\xbc\x6f\x01\x8c\x36\x70\x44\xeb\x0a\xd4\x53\x17\x7d\x46\x75\x02\x4e\xa7\xff\x0c\x0a\x7a\xe4\xf9\xbd\xd4\xd1\xa3\x6f\xa5\x8c\x47\xb9\x1a\x75\x02\x60\x7f\x90\x40\x60\x49\x7a\x14\x5a\x37\x70\x8f\x60\x0b\x53\xe6\xb2\xc4\x35\x3c\xb4\x75\x5b\x2a\x58\x0c\xbc\xb3\x32\xb5\x08\xa8\x1c\x97\x36\x5b\x7f\xb8\x32\x6d\xaa\x97\xd8\x0f\x83\x58\xa4\x5a\x17\x2a\xd8\xc6\x58\xbb\xd8\xbf\xfd\xae\x3c\xd7\xd6\x24\x26\x85\xa0\x62\xf5\x4e\x7d\xd8\x02\x20\x44\x4f\xa2\x21\x8a\x9e\xe0\x45\x35\x32\xf9\xf3\x8f\xa0\x24\x17\xb6\xe0\x18\x0a\x66\xb6\xa0\x13\x75\x86\xa8\xf3\xe4\x1f\xcb\x52\x8c\xf4\x97\x6d\xa3\x4e\x24\x8b\xfe\xf4\xcd\x5f\x6d\xc9\x40\x95\xdc\x3d\x7b\x35\xea\xd8\xcf\xf6\x6a\x8b\x4b\x9f\x8c\x6b\xf4\xdc\xe6\xfe\xe2\xc9\xd1\x71\xd4\xf9\x75\x70\x29\xf7\xf8\xee\xe3\x10\xbc\x66\xb0\xd9\x61\x5c\x70\xbb\xbf\xf3\x77\x06\xb4\xa0\x42\x9b\x23\x2a\x43\xdc\x19\x11\xcb\x4a\x47\x7b\xa7\x38\x9d\x13\xfd\x51\x13\xb7\x6b\x0f\x32\xc9\xb5\x7e\xf4\x88\x0b\x2c\x68\x3a\xf8\xcc\x95\xb1\xaa\x7e\xaf\x04\x59\x54\x39\x16\x26\x78\x77\x82\xd9\xf7\x37\x63\x69\xc7\xfe\xf0\xe9\xcd\xdb\x97\x57\x3f\x9e\x9c\x9e\xbd\xf9\xf0\x5e\x5f\xff\x6b\x64\x8d\x93\x33\x2e\xf1\xdc\xf3\x78\xe1\x5c\x43\xd4\x31\x85\x66\xc2\xdf\x2d\xb9\x90\x98\x9b\x0d\x88\x6c\x39\xf2\xed\xad\xd6\x4d\x63\x98\x60\xa0\x75\xbf\x54\xb3\xac\xfc\xd8\x41\x86\xf4\x11\x73\xcb\xf1\x4e\xec\x5d\x63\x07\x1f\x83\x22\x7a\x8d\x3f\xdd\xe4\x3c\x8e\x65\x85\xee\x68\xef\xff\x04\x00\x00\xff\xff\xc2\x6c\xa4\x86\xef\x89\x00\x00"),
+			compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xe4\x7d\xeb\x7a\xdb\x38\xb2\xe0\x7f\x3f\x05\xc2\xce\x17\x51\xb1\x44\xd9\xe9\xe9\xde\x69\xc9\x72\x6f\x2e\xce\xc4\xe7\xc4\x49\xc6\x76\xfa\x72\x1c\x1f\x7f\x10\x09\x49\x8c\x29\x92\x03\x40\xb6\xd4\x89\x1e\x6b\x5f\x60\x9f\x6c\x3f\x14\x2e\x04\x48\xe8\xe2\xf4\xcc\xf9\x76\xf7\xe4\x87\x1c\xe1\x52\x28\x54\x15\x0a\x85\x42\xa1\x74\x87\x29\xfa\x40\x8b\x19\xe1\x53\x32\x67\x68\x68\x7f\xf9\xfa\x15\x7d\x59\x0d\xf6\x44\x93\x09\xc5\xe5\xf4\x92\xcc\xca\x0c\x73\x32\xd8\x83\xb2\x8b\x93\x97\xef\xdf\xbd\x42\x43\x74\x78\x70\x70\x30\xd8\xdb\xeb\x3d\x7d\xba\x87\x9e\xa2\xbf\x89\xa6\x7b\x4f\x7b\x7b\x15\xa4\x08\xca\xd0\x10\x8d\xe7\x79\xcc\xd3\x22\x0f\x49\x46\x66\x24\xe7\x1d\x54\x94\xe2\x3b\xeb\xa0\x29\xce\x93\x8c\xbc\x9c\xe2\x7c\x42\xf4\xb7\x73\x32\x2b\xee\x48\x1b\x7d\xd9\x43\x88\x4f\x53\x16\x91\x0c\x0d\x91\xea\x3b\xd0\x85\x80\xdb\x9b\xcb\xb3\xb7\x68\x88\xf2\x79\x96\x99\x0a\x05\x1b\x0d\xf5\x28\xa6\xc6\x1e\x0c\x0d\x9d\xb1\x6b\x6d\x24\x0a\x36\xea\x12\x1d\xe4\xa0\x18\x8a\x1e\x6d\xd1\x75\x65\xfa\xd3\x34\xbe\x65\x53\x7c\xaf\xe7\xee\xa0\x96\x60\x8e\xd1\x10\x5d\x5d\x0f\xf6\x74\x51\x9a\xa7\x3c\xc5\x59\xfa\x07\x09\xdb\x83\xbd\xd5\x60\xaf\x41\xc0\x88\xa7\x33\xf2\x1a\xc7\xbc\xa0\x62\x52\x02\x8d\x60\x19\xf4\xd1\x8f\x07\xe8\xa9\xfc\x78\xf6\x17\xf4\x14\x7d\xff\xe3\x0f\x1d\x51\x75\xdf\xac\xfa\x1f\x50\x91\xd4\x2a\xa0\x70\x5a\x15\xc2\xf7\x19\x7c\x87\xff\xb2\xa0\x8f\x0e\xfd\x18\x31\x4e\xca\x5f\x70\x36\x27\x02\xa1\x2b\xd1\xf8\x90\x05\x1d\x14\x1c\x1e\xc8\x3f\x33\xf1\xf9\x03\x7c\x1e\xca\x3f\xdf\x1f\xc8\x6f\x53\xf1\xf9\x0c\x3e\x7f\x84\xcf\x43\xf9\xe5\x30\x81\x8a\x24\x80\xa1\x0f\xef\xe1\x1b\x7c\xfe\x05\x3e\xff\x0a\x9f\x87\x4b\x28\x5f\x06\x7b\xd7\x3e\xb4\xf2\xf9\x0c\xfe\x23\xb0\x3a\xf0\x35\x28\x69\xc1\x0b\xbe\x2c\x89\x45\xf6\x26\x93\x85\x94\x33\x92\x8d\xd1\x10\x58\x24\xb8\x27\xbe\x46\x69\xe2\x2c\x94\xfa\xa0\xfb\xfb\xc0\xd5\x5e\x0f\x5d\x10\x8e\x12\x32\xc6\xf3\x8c\x6b\x19\x8c\x34\x10\xfd\x1d\x80\x29\xb0\x83\x7a\x25\x15\x22\x79\x93\xe6\xe5\x9c\xeb\x56\xbe\xaa\xaf\x5f\x81\xa2\xa2\x7b\x3a\x46\xa1\xd3\x8e\xe3\x11\x1a\x0e\x87\x68\x9e\x27\x64\x9c\xe6\x24\xd1\x02\xdc\x6c\x85\x0e\x41\x84\x15\xf2\xaf\x28\xbe\x97\x0b\x1f\xc5\x45\xce\x69\x91\x31\x84\xf3\x04\xbe\xe0\x34\x27\x14\x8d\x69\x31\x43\x6f\x60\x1d\x8c\x30\x65\x88\x2b\x05\x11\xed\x29\xe2\x55\x2b\x50\x0e\xd9\x2a\x31\x9f\x7e\xa0\x64\x9c\x2e\x5a\x7d\xf4\xe1\xf9\xe5\x9b\x9b\x0f\xe7\x27\xaf\x4f\x7f\xeb\xc8\xea\xd1\x3c\xcd\x92\x5f\x08\x65\x69\x91\xb7\xfa\xe8\xc5\xc7\xd3\xb7\xaf\x6e\x7e\x39\x39\xbf\x38\x7d\xff\x4e\x2f\xae\xcf\x7f\x9f\x13\xba\x8c\xc8\x82\x93\x3c\x09\x8d\xfe\xb0\x67\xd3\x36\x74\xb4\x75\xc3\xe3\xf0\x6c\xce\x38\x8e\xa7\x24\xa2\x24\x4f\x08\x0d\x1d\xad\x66\x74\x51\xbb\xea\x4e\xb2\x08\x97\xa5\x18\xc7\x85\xd6\xd6\x0c\xfe\x1b\xe1\x88\x92\x31\xa1\x24\x8f\x09\x43\xbc\x40\x38\xcb\x10\x9f\x12\x94\xe6\x9c\x50\xc2\x78\x9a\x4f\xb4\xc6\x62\x28\xcd\xa1\xae\x22\xaa\xa4\x23\xce\x13\x09\x6e\x94\xe6\x09\x22\x77\x24\xe7\x4a\xbd\x50\x90\x17\xa3\x81\x7f\xa5\x02\x1d\xaa\x45\x81\x64\xd1\x38\xcd\x93\x30\xf8\x0e\x6a\x6f\xee\x65\x75\x80\xf6\xb5\x40\x55\x53\xf9\x87\xa0\xda\xeb\x82\xce\xd0\xd0\x81\xa5\x20\xc8\xfa\x9b\x71\x41\x67\x81\x99\xdd\xf3\x39\x2f\xba\x94\x30\xb1\x38\x04\xde\x9c\x2c\x38\xc2\x94\x60\x54\xe4\x48\x4a\x5e\x41\xd1\xac\x98\x33\x12\x67\x69\x7c\xab\x50\x95\x3d\x2e\xc9\x82\x43\x5b\x47\xed\x6b\xe1\x03\xe9\x18\x8f\x19\xe1\xa0\xd1\x23\xf9\xff\x37\x24\x9d\x4c\x39\xea\x8a\x92\x38\x4b\x49\xae\x4a\x06\xd0\xe7\xb1\xe8\x1f\xc5\x8c\x85\xad\x29\x14\xb7\x3a\xa8\x85\xe7\xbc\x68\xd5\x4b\x49\x16\xb1\x98\x16\x59\xa6\x00\xee\xab\xb1\xb4\x8a\x36\xfc\x5d\x94\xd4\x4f\x0f\xae\xb0\xbf\xca\xf1\x8c\x0c\x45\xbb\xeb\xc0\x92\x8b\x45\x49\xa3\x5b\xb2\x2c\x29\x61\x2c\xac\xa6\xa7\x67\x17\x17\x39\xe3\x88\x08\x11\x10\xab\xea\x7b\x89\xbf\x58\x9a\x24\xba\x9f\xa6\xf1\x14\x0d\x87\xaa\xfa\xc9\x13\xf4\x88\x44\x6c\x9a\x8e\xf9\xbf\x93\xa5\x06\x50\x67\x5a\xc4\xe6\xa3\x59\xca\xc3\xf6\x40\x55\x93\xa8\xa4\x20\x28\xaf\xa4\x7a\xd1\x35\x2b\x45\x29\xd8\x90\xa2\x22\x0f\x5b\xb7\x64\x39\x2f\x25\xb7\x5a\x1d\x47\xc3\xd5\x18\xa5\x36\x31\xb4\x92\x54\x6a\xbb\x64\x8a\x80\xc3\x9e\xb9\x7a\x81\x78\x01\xc0\xfe\x2a\x57\x92\xbd\xe3\x5a\xed\x40\xa7\x9d\xda\xda\xae\xa2\x80\xe4\x0b\xcc\x43\x32\xc5\x52\x80\x36\x6f\xc4\x12\xbf\x25\xc9\x0b\x9e\xaf\x83\xa1\x9b\xdc\x8c\x78\xde\xec\xb8\xc3\xc8\xaa\xa5\x3d\x6a\x9a\x33\x42\xf9\x19\xe1\x34\x8d\xd7\x41\x60\x24\x23\xb1\x02\x21\xdb\xdf\xcc\xa0\x83\x0d\x88\x92\x31\x25\x6c\x7a\x2a\x84\xe3\x0e\x67\xbb\xc0\x52\x5d\xae\x03\x8b\x92\x42\x02\x8b\x8c\x5c\x82\x5a\xf7\xad\x77\xd5\x20\xa8\xe9\x4a\xd1\x01\xad\xe9\x22\x95\x8c\x51\x5b\xf6\x70\x1c\x8f\x98\xbf\x17\xbe\x12\xb6\x4e\x97\x17\x93\x49\x46\x86\x2d\x8e\x47\x2d\x7b\xba\xa2\x63\x44\xfe\xd1\xd8\xb2\xda\xe2\x23\x0c\xd8\xb4\xb8\xaf\xb7\x2e\x72\x59\x9e\x47\x23\x68\x1a\x58\x52\x4d\x6c\xfd\xc2\x31\x9d\x80\x7e\x79\x1c\x92\x48\x7e\x51\xab\xc4\xb3\xf5\xc9\xfa\xa8\xc4\x94\xe4\x3c\x6c\x47\x69\x9e\x90\x45\x68\xb7\xb7\x65\x56\x57\x88\x25\xfd\x38\x0c\xbe\x13\x2a\x57\x41\xc0\x9c\xd3\x30\xc0\x34\xc5\x5d\xbd\x6d\x06\xed\x76\x34\xc5\xec\x65\x86\x19\x0b\x03\x4a\xb2\x02\x27\x41\xbb\xb6\xd6\xe5\x0a\x87\xcd\xcd\x5e\xcc\x2b\xa3\x8d\xcf\x09\x9f\xd3\x1c\x09\x7b\x93\xa1\x71\x11\xcf\x19\x1a\xe1\xf8\x56\x6c\x3a\xa0\xc7\xd2\x9c\x71\x82\x13\x54\x8c\x91\x84\x25\xf6\x9e\xc8\x27\xa0\xd1\x08\x58\x73\x4b\x96\x49\x71\x9f\x0b\x4b\x8a\x02\x6c\x2f\x25\xab\x05\x0c\x63\x3a\x24\x81\xe2\x3b\x9c\x85\xee\xb7\xb6\x6a\x23\xa1\xae\xd1\x55\xab\x76\xa5\x1e\x28\x2d\xd6\xe8\x61\x59\x17\xb4\xa3\x69\x9a\x28\xaa\x43\x97\x7b\x4c\x73\xb1\xb5\xfa\x3b\xa9\xda\x66\x37\x68\xfc\x5c\xee\x4a\xeb\x45\x5c\x28\xb2\xfa\xc2\xd0\x0b\xd1\x40\x70\xba\x58\xad\x97\xcf\x17\x29\x5b\xdb\x7a\x79\x83\x17\x29\xb3\x9a\x67\x64\x42\xf2\x64\x0d\x3a\xb2\xd2\xd6\x51\x65\x9a\xe7\x64\x1d\xad\x54\xad\xbd\x51\xdd\xe1\xec\x82\x63\xbe\x66\x71\x42\xfd\x0d\x13\x0d\xec\xd5\x4c\xf2\xe4\x15\xe6\xc4\xdf\xc7\xd2\x83\x24\x4f\x9a\xfa\x57\x75\x16\x47\x1c\x22\x0e\x2c\x65\x1a\xdf\x12\x1a\x4a\x61\xca\x8a\x18\x67\xa4\x8f\x5a\x24\x6f\x49\x9b\x4f\x58\x1c\x98\xf7\x51\xeb\xf7\xdf\x7f\xff\xbd\x7b\x76\xd6\x7d\xf5\x0a\xbd\x79\xd3\x9f\xcd\x54\x3d\x2f\x8a\x6c\x84\xe9\x87\x0c\xc7\x60\x44\xf5\x51\x6b\x54\x70\x5e\xe8\x7a\x96\x26\xe4\xc5\xf2\x22\x4d\x48\x1f\x71\x3a\x27\xaa\x74\x5a\xdc\x5f\x16\x09\x5e\xbe\x98\x73\x5e\xe4\xf5\xaa\x97\x19\xc1\xb4\x59\x58\x30\x07\x88\xc0\xfe\x3f\x8a\x5c\xa0\xfb\xf1\xf2\x25\x8c\x27\xf7\xc6\x86\x8d\x6d\x08\xe1\x2e\x9a\x8a\x12\x38\x6c\x89\xff\x5e\xa6\x33\xf2\x01\xe8\xd1\x6a\x03\x81\xd6\x81\x91\x76\x78\x0d\x8e\x50\x7c\x49\xa9\xf6\xd1\xa0\xb6\x97\x7b\x74\x88\xc2\xd6\xb7\xad\xe8\xcd\xb8\x09\x62\x5e\x0a\xbc\xce\x65\x73\x0d\xc4\x28\x11\x76\x61\x36\xc9\xc6\x81\x58\xad\x76\x7b\x2f\x95\xda\x00\x8e\x1f\xad\xc3\x56\xcd\xf8\x9a\x15\x82\x9f\x5b\x85\x4c\x36\x6b\xca\x99\x2c\xff\xd3\x62\xd6\x67\xec\xff\x25\x49\x13\x2d\x19\xc7\xb3\xd2\xde\xe8\x12\xb9\x58\x73\x72\x8f\x5e\x35\x84\xca\xf4\x78\x7a\x78\x70\x70\xd0\xae\xc4\xb3\x22\xe0\x5a\xe9\x14\x1f\x52\x16\x11\xc9\x18\x69\xa2\x63\x33\xc7\x91\xfd\x1d\x80\xaf\x07\xe4\x48\xbf\x82\xf4\x4d\xc2\xaf\xcf\xa1\x8c\x2f\x33\x02\xa2\x2b\xcd\xc2\x86\xec\x8a\x46\x69\x5c\x18\x93\xb1\x32\x22\xa5\x40\xb6\xa2\x49\xb6\x2c\xa7\xa2\x49\xcb\xda\xfa\xdd\x45\x11\x36\xb6\xf4\x0a\x0a\x4e\x12\xb5\xfd\x8f\x78\xde\x2d\x69\x3a\xc3\x74\x19\x18\x73\x5e\x00\xb6\xda\x98\xc1\xba\xf1\x94\xc4\xb7\xb5\x76\x14\x7c\x4e\x8d\xa6\xf3\x1c\x1a\x93\x44\x37\x57\x4c\x5b\x87\x92\x03\xe6\x61\x58\x35\x86\xda\x8c\x99\x33\x89\x95\x3e\xc8\x3b\x4c\x09\x2d\x2d\x63\xe1\x58\x3b\x74\x68\xfa\xfa\x68\x2f\x4e\x52\xd5\x9e\xfb\x6f\x17\xef\xdf\x55\xdc\xe8\xf5\xd0\xe9\xd8\x3a\x7a\xdf\x63\x86\xd4\x28\x1d\x28\x2e\x68\x3a\x49\x73\x9c\x21\x46\x68\x4a\x18\x02\xff\xdc\xa4\xe0\x68\x36\xe7\x98\x93\xa4\x82\x13\x32\xa1\x5a\x92\x36\xb8\x42\xee\x09\xca\x09\x49\x84\x09\x46\x09\x1c\xfa\xe8\x3c\xe6\x28\xe5\xd2\x35\xe2\x40\x16\x18\x01\xdc\xc8\xe6\x87\x72\x04\x4a\xeb\x96\xe2\x9c\x09\x45\xf5\x4a\xac\x9a\xda\x5c\xec\xe3\x5d\x43\xc5\x36\x68\xf1\x33\x6a\x1d\xb4\x50\x5f\x68\x5d\x6d\xaf\xd5\xa9\x6d\x00\x49\x8d\x0f\xae\xab\xb0\x71\x72\x6b\x9c\x40\x1b\xbc\xa8\x1d\x37\x2c\x79\xd1\x86\xae\x35\x96\x3e\x63\x6c\x6e\xe5\xb3\xb4\xd7\xd9\xc8\x6a\xd7\x19\xe3\x8c\x91\x81\xd1\x9b\xb6\xb1\x64\x6c\xc0\xe6\x9c\xe4\x66\x33\x02\xcd\xad\xcf\x65\xf1\x0d\x1c\x2c\xaf\x83\xb6\x47\xfa\xb4\x2d\x1d\x53\x82\x19\x39\x57\x08\xda\x83\x6e\x02\x9e\x90\x1d\x80\x27\xc4\x03\x7c\x57\xd4\x49\x9e\xec\x82\xf8\x49\x9e\x3c\x10\xed\x2d\x80\x35\xd2\x16\xe0\x5d\x51\x96\xfa\x7d\x17\xac\xcf\xa0\xe5\x03\x11\xdf\x0e\x5e\xe3\xee\x82\xf7\x9e\x9b\x3c\x56\x53\xed\x30\x24\xcf\xe5\xa2\x2e\xa0\xa4\x14\xa6\x44\xd0\x41\x5f\x38\x59\xf0\xbe\x07\x1e\x98\x47\x1d\x34\x2b\x84\x4d\x11\x8c\xc8\xb8\xa0\x24\x58\x35\x4e\x58\xfa\xe0\x25\xf4\x0f\x25\xf0\x2d\xcd\x27\xd5\x4a\x95\x4e\x37\xa1\x7a\xe5\xf6\xe6\x31\xa8\xb4\xa7\x40\x34\x52\x56\x94\xe9\xb1\x4e\xcb\x2a\xc3\x11\x2e\x38\x36\xac\x36\xe3\x72\x10\x5a\x5e\x6c\xf3\xaf\x68\x3a\x56\x87\xbc\x5e\x0f\x59\x0e\x76\xe0\x15\x9a\xa6\x8c\x17\x74\xa9\xac\x9b\x47\x60\xab\x5d\xf0\x82\xe2\x09\x89\x26\x84\x9f\x72\x32\x0b\x03\xd5\xa8\x3a\x25\x3b\xcd\x58\xbd\x59\x07\xd4\x6b\xc4\x38\x4d\xf3\x49\x3a\x5e\x86\x57\xd7\x6d\xd7\x8c\x28\x8b\x72\x9e\x61\x4e\x4e\x81\xfe\x78\x94\x11\xc9\x03\xa6\x34\x83\xd9\x52\xac\x53\xac\x4d\x87\x86\xea\x59\xf9\x6f\x44\xaa\x9b\x05\x97\x1e\xeb\xac\x8e\xda\xfd\x82\x2c\x1c\xd1\xe2\x9e\x11\x2a\x3a\xdb\x76\x5d\x5b\xd0\x47\x14\x86\x6d\xd4\x53\xd7\x6e\xe0\xee\x8b\xf0\x67\xbc\x08\xf5\x6e\x87\x90\x40\xa9\x48\xfa\x28\xf8\xdb\xc9\x65\xd0\x31\xc5\x73\x9a\x39\x5e\x77\xb4\x8f\x82\x1e\x2e\xd3\xde\xdd\x61\x0f\x78\xf3\x33\x7c\x0e\x39\x0c\x61\x75\x14\xbb\xd4\xe5\xb2\x14\x42\xfa\x99\x15\xb9\x55\x03\xf4\x99\xc7\x31\x61\xac\x5f\x4d\x50\x34\xea\x80\xbb\x58\x1c\x3e\xe7\xac\xda\x89\x95\xb1\x30\x46\xd0\x46\x6c\x62\x7c\xce\xd0\xa3\xe1\x10\x05\x0a\x4c\x50\x6f\x5c\xb1\x60\x5a\xdc\x9f\x50\x5a\xd0\x30\x80\x3f\x52\x9e\xd2\x7c\x02\xf6\x73\x54\xd9\x22\xd5\x3f\x29\xaf\x6e\xf9\xca\xf9\x26\x79\x40\xef\x0c\xb5\x01\x2f\xd8\xa7\x29\x61\xf3\x8c\x5f\x1d\x5c\x0f\x1a\x3d\x92\x74\x2c\xb8\x76\x86\xf9\x34\xc2\x23\x16\xda\x0c\xeb\x5a\xf0\xa4\x6c\xb9\x13\x87\xbe\xc7\x43\xf4\xfd\x41\x73\xa6\x70\x13\x28\xe6\xf9\xab\xf4\x60\x84\x8d\x19\x21\x14\x1c\x25\xe9\x1d\x8a\xc5\xee\x39\xfc\x14\xe0\x8c\x50\x8e\xe0\xb3\xab\xdc\x1e\x9f\x82\xe3\x23\xc6\x69\x91\x4f\x8e\x15\x98\x47\x47\x3d\x55\x80\x5e\x11\x4e\x62\x4e\x12\x14\xa0\x7d\x0f\x70\x81\x5c\xc4\x8b\xd7\xe9\x82\x24\xe1\xb3\xb6\xb7\x4d\x80\x98\x30\x76\x12\x06\x74\x87\x2e\xf2\x02\x04\x8d\x08\xbf\x27\x24\x47\xcb\x62\x6e\x84\x18\x0c\x25\x61\x09\x49\xaa\x44\xf6\x0d\x33\x25\x99\xb0\xb6\x8a\x1c\xe1\x38\x9e\x53\x71\x8c\x01\x90\xd0\x05\x60\xc3\xd2\x99\x81\x13\x3f\xc6\x73\x46\xd0\x3c\x27\x8b\x52\xce\x40\xaa\x13\xc9\x25\x16\x1d\xf5\x92\xf4\xee\x38\xa8\xe1\xdb\x5e\xc7\xfb\x55\x25\xc3\xe0\x62\xea\x37\x97\xa7\xfe\xe7\x17\x3e\x61\xb5\x78\x65\x4f\x8e\xb1\x5a\x77\x89\x5b\x29\x88\xb5\x2a\x69\xa7\x9b\xc8\xda\xa2\xf7\x2e\xf9\x4d\x0b\x3e\xc3\x23\x92\xf5\x6e\x6e\xc4\xc6\x70\x73\xd3\xbb\x83\x5b\x5c\xd3\x73\xdd\x8a\x7f\xd8\x5a\x7f\xc0\x3a\xdf\x4c\x64\x7c\x87\xd3\x4c\x50\x08\x49\x8f\x39\x7b\xe4\xae\xf6\xfa\x3a\x5f\x55\xcb\xae\xc4\x13\xf2\xb2\xc8\xc7\xe9\x24\xc2\x59\x56\x51\xd8\xac\x73\xd8\x56\x79\x91\x14\x7d\x94\x14\xc6\xa4\x07\x7c\xaa\x0e\x3f\xa3\xf7\x14\xc5\x38\x17\xb6\xfd\xe7\x39\xe3\x28\x4b\xef\x88\x10\x5c\x21\xd9\x62\x08\x33\xde\xb8\xa0\x28\x84\xf3\x24\x5c\x3e\xa3\x14\x1d\xf9\x71\x88\x32\x92\x4f\xf8\x74\x80\xd2\xfd\x7d\x0f\x2d\x6c\x43\xe1\xea\xe0\xda\x9c\x94\x71\x92\x84\x62\x47\x78\x0f\xdf\x43\x2f\xe8\xab\xf4\xba\xe3\x1f\xf4\x2a\xbd\x6e\xb7\xbd\x74\x82\x41\xc7\xf3\x3f\xfe\x58\x9e\xc3\x8a\x32\x97\xb6\xf2\x1f\x2c\xb6\x3e\x44\x31\x74\x1c\xc2\x8b\xb6\xcd\xf2\x19\x2e\xfb\xe8\xcb\x6a\xed\x40\xc2\x2a\x10\xf2\x85\xa7\x04\xcb\xdb\x55\x83\x95\x59\x99\x9b\xd6\xe5\xb7\x8b\xcb\x4a\x7b\x57\xb6\xac\x4e\x07\x43\x7b\x45\x02\xb2\x80\x8a\xbc\xe6\x63\xc5\x9c\xc6\x62\xe7\x00\x12\xbd\x91\x16\x49\x94\xb2\x93\x5c\x60\x20\x4f\x63\x16\x2f\x4c\x2b\x2d\x06\x71\x91\xc7\x98\xfb\x19\xd9\x46\x7d\x3f\x1f\xdd\xab\x48\x6e\x28\x29\x29\x84\xe7\xbc\xb8\x00\x4b\xb4\x2f\x6d\x35\xe5\x7b\x02\x4c\xfb\xea\xaf\x2c\x4b\x39\x99\xb1\x3e\x18\x13\xb2\x60\x86\x79\x3c\x25\x16\xdd\x51\x28\xda\xd4\x0f\xd3\xf7\x04\x4d\xf1\x1d\x51\x02\x00\x52\x1f\xcf\x29\x25\x39\x97\x74\xe8\x20\x76\x9b\x96\x7b\x95\x1e\xa8\xcb\x97\x24\x04\xa8\x04\xd8\xf5\xe0\x6b\x83\xc5\xcd\x0e\x76\xf3\xc1\xfa\xc6\x33\x5c\x0a\x09\x5e\x6d\x68\x42\xb5\x9c\x43\x61\x34\x4e\x33\x4e\x68\x58\x41\x8f\x94\x05\x1f\xf6\x50\x6f\xd2\x41\x41\xd0\xee\xa8\x0d\x5a\xd2\xcf\x59\x1f\x25\x15\xba\x52\xef\xbb\x8e\x85\x54\x16\x8c\x8b\x3a\xbd\x07\x57\x7b\xd4\xaa\xbd\x15\xbd\x68\x5c\xd0\x13\x1c\x4f\x2b\xf3\x9c\x7a\x94\x45\x6d\xe6\x57\x34\xd2\x7e\x87\x6b\x34\x44\x74\xe0\x19\xd1\xac\x48\x65\xd3\x0b\x26\xa3\x34\xf7\xc2\x53\xce\x87\xce\x9e\x12\x23\xca\x9b\x02\x62\x29\x7e\xf8\x1a\x89\x66\x15\xd6\xb8\x33\xb2\xf1\xd6\x0a\xd2\x8b\xfd\xe8\x3a\x62\x71\x41\xa5\x29\xe5\xa9\xc7\xaa\xbe\x9a\x96\x9e\x03\xb8\x9a\x0f\xd0\xcf\x08\x47\xd2\x07\xfc\xb2\x98\x95\x98\x92\x70\x24\x56\x52\x6a\xe6\x6e\xa8\x60\x4d\x9e\xb9\x97\x6e\x20\xe8\x97\xd3\x94\xc1\x7e\x00\xb1\x1d\x53\x08\x06\x41\x78\xcc\x85\x59\xc3\x39\x8e\xa7\x60\x01\x4c\x09\x32\x2b\x10\x95\xd9\x7c\x92\xe6\x1d\x84\x19\x4a\xb9\x84\x52\xf0\x29\xa1\xf7\x29\x23\x68\x44\x09\xbe\x65\xb5\x1e\x9a\x46\x38\x4b\xf9\x32\xda\x5b\x73\xe5\xe6\x68\x97\x51\x9a\x27\xea\xff\x27\x77\x24\xe7\x4c\xab\xd0\xd5\x46\x9d\x36\x21\xfc\xbd\x09\xc9\xd9\x6e\x62\xd4\x42\x78\x56\x03\x37\xae\x07\x2e\xea\x75\xe0\x17\x42\x81\x75\x21\xaf\xe4\x3f\x30\x37\x1a\xba\x80\x71\x52\xd6\x4b\xc0\x69\xa5\xbf\xda\x8e\x60\xb1\x50\xae\xd7\x7b\x12\x64\x9b\x76\x44\x9c\xe5\x01\x17\xb7\x1d\x1d\x73\x63\x1f\xb5\x84\xa5\x53\xc5\x0f\x46\xe2\xab\x75\x8b\x1b\xa5\xf9\x73\x4a\xf1\x32\x14\xe5\x1d\x67\x8a\x6d\x61\xae\x5b\xd6\x3a\x04\x72\x28\x28\x60\x37\xa9\xad\x1c\x1d\x23\xc7\xa6\x57\xb4\x83\xb3\xf7\xb5\x35\x32\xf4\xa9\xd6\xa1\x2b\x75\xe6\x32\x48\xc6\xa6\xd4\xce\xa4\x76\x0b\x79\x71\x5d\xbf\xcb\x96\x47\x7b\x90\x6a\x13\xf6\xb8\xcd\x10\xc5\x94\x91\x57\xc2\xfe\x4e\x0b\xc7\x3d\x0e\x1c\xbd\x24\x0b\x5e\x89\x08\x14\x9d\x9f\xa8\x63\xe9\x39\x99\x9c\x2c\xca\x30\xf8\xcf\xf0\xea\xa0\xfb\xd3\xf5\x7e\x3b\xbc\x5a\xde\x27\xd3\x19\xbb\xde\x6f\x3f\x96\x7b\xae\xe8\x24\xf7\x14\x21\x2a\x06\x62\x04\x65\xa1\x02\x67\x2e\x3b\x1e\xa9\xa6\x32\x3c\x05\x8c\x3a\xa0\x8d\xa8\x53\x55\x9a\xd8\x8f\x86\xe8\xfb\xda\xdd\xd3\x8f\x07\xfa\xcc\x2f\x46\x05\x32\xa3\x21\x82\xe9\x9d\xe6\x5c\x03\xb8\x3a\xbc\x36\x98\xcd\xf3\x54\xec\x00\xba\xe6\xd9\xb5\x45\x3e\xd9\xff\x69\x33\xd2\xcf\x8a\xc3\xbc\x12\x00\xae\x77\x30\x26\x2c\xa7\xde\xce\x6b\x0f\x88\x73\xa1\xce\x5a\x8a\xd3\x0e\xaf\xc2\x5a\xd4\x8c\x75\xfb\xee\x33\x43\x37\x84\x6f\xfa\x8c\x51\x41\x73\x07\x85\x23\x1f\x0a\x1b\x80\x82\xb1\xe9\xde\x3f\xd4\x70\xdd\xd2\x79\x50\xb7\x1f\x9a\xce\x19\xb4\xc1\x37\x5c\x9d\xf7\xec\xf3\xc1\x6a\x17\xe7\x8d\xe3\x85\xfd\xaf\x67\xd8\x76\x4e\xa1\x2e\x3a\x14\x5c\x3d\x96\xdc\xed\x76\xd7\x72\xed\xf8\xbf\x0f\xd7\x26\x84\x9f\x98\xd8\x85\xed\x2c\x03\x85\xe3\x44\x3c\x7c\xfd\x8a\x9c\x02\x17\x6b\xaa\x23\x70\x94\xa3\x58\xe9\x1a\xfb\xe2\x7b\x97\x3b\xff\xed\x67\x0f\xb1\x4f\xd3\x8b\x87\x4d\xc6\xba\x08\x96\x57\x2d\xa6\xbb\x15\xff\xc2\xaa\x42\x73\xb7\xab\xd0\x4f\x20\xb2\x7f\x0b\x62\xcc\x8b\x13\x80\xda\x18\x31\xbd\x0b\x59\x14\x42\x3b\x6a\xd2\x93\xdc\x13\x84\xb0\x86\x2c\x39\xb9\x57\x28\x2b\xd6\x69\x02\xd9\x44\x56\xcb\x50\xb5\x85\x63\xf6\xce\xeb\x17\xf5\xd0\xb3\x0e\x6a\x29\xb7\x58\xcb\x4b\x6f\x05\xd8\xaa\x73\x45\x7f\x47\x85\xf4\xaf\x9e\x37\x9b\x8f\x38\xc5\x31\xff\xbf\x6a\xf2\x13\xc2\xcf\x74\xb4\xc8\x43\x96\xb5\x0a\x31\x31\xab\x5a\xc5\x12\x3c\x74\x51\xef\x10\xcc\xb0\xfb\x9a\x7e\xc0\x44\x3c\x4b\xfa\xcc\x42\x53\x13\x59\x95\x7d\xeb\x82\x6e\x22\xb4\x7d\x3d\xef\x1e\x3b\xb2\xe3\x72\x7e\x20\x55\x36\x4b\xb6\x26\x52\x63\x41\x1f\x1e\xac\x13\x54\xd5\xe5\x9f\xb4\x48\xff\xf5\xb3\x31\xcb\xf4\x5f\x3d\x25\xab\xf5\xee\x0f\x64\xe2\x8c\x60\x2a\x3d\x73\x6d\xb7\x50\xdf\x6b\xb4\x6b\xfb\x6f\xc3\x42\xa8\xf6\xfe\xd5\x5e\xfd\x42\x9f\x4d\x8b\xfb\xd0\x13\xe4\x18\x91\x59\xc9\x97\x61\x75\x29\xca\x38\xa6\x7c\xc3\x2d\xda\x3f\xc3\x6e\x53\x2f\x1e\x8a\x6c\xae\x4e\x4f\xe6\xb8\xb1\xfe\xd0\xaa\xc3\xc8\xf5\x59\xf8\x3a\x68\xeb\xd9\x7f\xfd\x2a\x6f\x95\x66\x78\x11\xc2\x7f\xc6\x59\x51\x50\xd7\xa2\xeb\xa1\x67\x3f\x1c\xb4\x3b\xe8\xd0\x20\x50\x45\x6c\x36\xf6\x7e\xeb\xa6\x10\x8e\x62\x5a\x34\xeb\x1a\xc5\xb4\xb3\x2f\x43\x01\xfb\xdf\xa6\xd4\xb9\x0a\xd5\x85\x11\x1e\x15\xd4\x52\x99\x70\x9e\xa2\x99\x1e\x49\xf9\xf9\xf5\xd7\x12\x53\x3c\xab\x9e\x02\x05\x00\x25\xe8\xd7\x0f\xb8\x3a\x3a\x68\xed\x3b\x26\x73\xc2\x96\x00\x23\xe0\xb1\x38\x5c\x2b\x12\x74\x1d\x6e\x0e\xec\xa6\x32\xd0\x56\x35\x1c\xb8\x40\x48\x29\x4e\xa7\x86\x8f\xb2\x76\x4e\x33\x61\x8c\x6f\xb8\x31\x95\xf1\x1c\x81\xba\xb1\x97\x33\xb6\xd7\x89\xe7\x76\xc4\x8e\x71\x81\xd5\x76\x4e\x58\x59\xe4\x8c\x34\x1b\x0f\x24\x2d\x9c\x40\x2e\x85\x31\x97\x52\x2d\xd9\xb9\x2b\xb6\xdf\x8c\xe7\x4b\x19\xdf\xb3\x1d\x53\x73\xdd\xae\xb9\x2d\xff\x53\x73\xe2\xfc\x36\xa5\x68\xb8\xee\x06\xab\xb6\x6c\x64\x3c\xbd\xac\x0c\xda\xce\xcd\xd6\x9c\x66\xdb\xee\xab\x44\x79\x5f\x21\xf1\x5f\x7d\x87\x05\xbd\xe0\x12\x63\xd7\xbb\x2a\xc7\xc7\x27\x29\xaf\xbe\xb9\x57\x24\x66\x0e\xa1\xb9\xc5\x72\xb9\xb1\xed\x2a\x65\x31\xa5\x1d\x21\xed\x65\x7d\xa6\xa2\x0c\x3d\x1a\xa2\x00\xd6\x76\x6d\x7e\xa0\x69\x28\xb5\x27\x23\xfa\x2c\xa6\x34\xa2\x4a\x32\x20\xca\xed\x91\xef\xb9\xa1\xfe\x47\xa8\xe0\x7d\xbd\x8f\xa4\x93\x0d\xb9\x16\xbd\x58\xef\x2c\xb9\x71\x49\x16\xdc\xe9\xb4\xf5\x46\x91\x2c\x48\x3c\x87\x57\x79\xea\x46\x2b\x40\xfb\x02\x6c\xe3\x02\xd7\xa2\x5e\x5c\xcc\xca\x8c\x70\xb2\x33\x01\x87\x6b\x08\xd8\x64\xbd\x4d\xda\xa4\xf2\xc0\x79\x43\x3e\xba\xd5\x7e\x36\x70\x3a\xf2\x82\xe3\x4c\x14\x5f\xc8\xe8\x45\x78\xf4\xba\x89\x43\x32\xec\x70\x03\x9b\xd6\x76\x52\xb7\x12\x62\xa9\x81\x36\x0e\x58\x8c\x33\x4c\x1b\x41\x1b\x4d\x94\x0e\x3d\xcc\x4d\xc7\x1b\x47\x01\x0c\xf3\x79\x96\x6d\x87\xbe\x09\x8c\xf6\x6d\x79\xe5\x64\xe5\xfa\x23\x2a\x5b\x62\xca\x67\x59\x18\xbc\x2d\xb0\x0c\x45\x90\x82\x62\x58\xb4\x8f\x82\x19\x43\x47\x23\x8a\x7a\xc7\xe8\xdc\x6c\x1b\xb2\x95\x65\x0e\xec\xa3\x40\x37\x13\x35\xc1\xa5\xc0\x5c\xc6\x36\xc8\x50\x53\xd9\xa3\x36\xa1\xfa\xb5\x50\x3d\x92\xb1\x42\x7d\x87\x8b\x4c\xb3\x04\x6c\x7d\x3f\x63\x93\x2d\x27\x76\xd1\x23\x12\x3a\x05\xda\xd6\xca\xb5\x05\xb6\x2d\x04\xca\xd8\x81\xbb\xdb\x8e\xd6\xc0\xad\x56\x7d\x5c\x4d\x80\x1d\xa6\xfc\xab\x79\x21\xb4\xfb\xa4\x55\x00\x8d\xe4\xbd\x33\x6d\x5d\xf3\x90\x89\x7b\x30\x78\xc0\xf0\xf6\xe4\x4d\xc5\x6e\xd3\x77\xde\x70\xec\x30\xbc\x6d\x6e\x09\xd1\x2c\xe6\xfc\xf4\x95\x5e\x73\xf7\x69\x9e\x14\xf7\x72\x46\x97\xb2\xb2\xde\xd2\x58\xe9\x69\xed\xd5\xa2\xcf\x86\xae\x3d\x44\xa9\x0c\x69\x38\x0d\x68\x08\xae\xff\xdf\xbc\xff\xd3\x43\xa2\xa1\xc6\x8b\x49\xf5\x28\xb0\xf2\x07\x30\x7a\x3c\x8c\x8c\x78\x1e\xba\x88\x39\x74\xaa\x19\x3c\x55\xf9\x2d\xb6\x53\x5b\x3e\x26\x7f\x8b\x47\x24\x73\x4c\x2a\x08\xb0\x61\x15\xc9\xe1\xfb\x05\x44\x28\x32\x95\xfb\xc1\xf2\xfa\x42\x2d\x4a\x73\x64\x77\x93\x44\x91\x55\x62\x53\xd6\xd1\x3a\x96\xba\xb5\xa1\x46\xe5\x9c\x4d\xc3\xea\x12\x1a\xed\x2b\xb0\xfb\xd6\xed\xb3\xda\xf1\x58\x8c\x4b\xf2\xe6\xf2\xec\xad\xc2\xf3\x0a\xfe\x98\xe0\x90\x95\xeb\x06\xc9\xf4\xec\xdc\xe0\x33\x59\xfc\x29\xa8\x86\xd2\x98\x7c\x2e\xd2\x3c\x0c\x8e\x46\xf4\x38\x68\xcb\xe1\x21\x3a\x6b\x2b\x31\x65\xbc\xc6\x65\x71\xc9\xde\xc9\xdb\xba\xb5\xe4\xe4\xba\x85\xaa\x89\x34\x71\xc4\x11\xaa\xd5\x82\x51\xbf\x04\x83\x4d\xc4\xdf\x4a\xfd\xed\xe4\xf7\xd0\xdf\x90\x7c\xf8\x29\x30\x74\xd1\xf4\x15\xe5\x9f\x02\x13\x8d\x02\xbb\x8f\xf8\x50\xb3\xd9\x1f\xfa\xc8\xd8\x91\x34\x5c\x05\x96\x77\x47\x76\xd8\xed\x6a\xef\x17\x75\x11\x66\x68\x09\x37\x5b\x15\x29\xe5\x8a\x85\xa6\xaf\xb3\x02\x73\x55\xaf\x17\x65\xca\xde\xe1\x77\xa2\xcc\x1c\xce\x7b\x3d\x14\xec\x9f\xe6\xe3\xa0\x83\x82\xae\xfa\x0b\xdf\xd1\x7d\x9a\x65\x68\x44\x24\xb0\x44\x2c\xa7\x02\xbd\xc3\xef\xd0\x68\x69\xc3\x6f\x47\xe8\x72\x4a\x34\xa8\x18\xe7\x2d\x2e\x3a\x41\xdc\x32\x49\x3a\x88\x15\xf0\x4e\x0a\xf1\x29\x99\x21\xcc\xd0\x04\x97\x0c\x85\x60\x09\x44\xb6\xd3\x4e\xe7\x54\x59\x39\x97\x76\x5b\x89\xe2\x3c\xb4\xa8\x9f\x82\x36\x7a\x6a\x4a\x9c\x11\x6e\x1e\x5b\x9d\xab\x14\x2f\xd1\xcb\x22\x2b\x68\xf4\x41\x56\x56\xbe\x0d\x30\xce\x2d\x83\x49\xc8\xd0\x0c\x73\x9a\x2e\x02\x57\x45\x55\x46\xaa\x8a\xbd\x4a\x19\xca\x0b\x8e\x8a\x31\x92\xed\x21\x5a\xe0\x11\xfa\x90\x11\xcc\x88\xca\x1c\x80\x51\x5c\x50\x4a\x62\x0e\xcf\x7f\x09\x63\x69\x91\x9b\x40\x44\x45\x0d\x29\xe7\xab\xca\x97\x88\x75\xe4\x1b\x35\xe1\x14\x95\xde\xe4\xac\x7e\x5d\x3e\x30\xdf\xa4\x14\x57\xf7\xe5\x9c\xa9\xb5\x0a\x66\xa0\x6b\xa4\xa9\x8b\x76\x6d\x1b\x0e\x6c\x55\xc5\xac\xa8\x9c\x9a\x89\xaf\xef\xe7\x2b\xd5\x24\x7d\x8d\x8e\x4a\xa8\x06\xae\xe2\xd8\x0c\x60\x53\x57\x29\x31\x43\x0a\x7b\x94\x3e\x7c\x76\x9c\xee\x7d\xf5\xd7\x3d\x39\x72\x26\x6f\xeb\x99\x4b\x29\x6b\x01\xc9\x7f\xb5\x41\xc4\xbf\x45\x5f\xde\x20\x5f\x1d\x5c\xdb\xd1\x40\xcb\xbe\xb5\x37\xc2\xca\x94\xd0\xae\x0e\xaf\xdb\x95\x55\x5a\x45\xaa\x54\x87\x90\x4c\x1c\xe1\x94\x04\x46\xf0\x35\x94\x3d\xe4\xd1\x1b\xc8\x01\x66\x6f\x23\x64\x88\x59\x0b\x57\x46\x9b\x02\xc7\x18\x28\x40\x9c\x65\x68\x96\x32\x26\x4c\x15\xc6\x49\xc9\xa2\x4a\x04\xc8\xbd\xb1\xb0\x95\xca\x94\xcb\xa0\xb0\x0e\x19\x46\x89\x72\x6b\xdb\x37\x9e\x99\x01\xe2\xe8\xc8\x2d\x27\x79\x22\x4a\xf7\xeb\xad\x49\xe9\x04\x99\x3d\xcf\xb2\xe2\x1e\xa0\x8f\x85\xd2\x10\xe8\x95\x45\x9a\x73\x94\xe6\x32\x5a\x38\x5e\x46\xf6\x4d\xa3\x34\xf9\x4d\x24\x86\xc0\xf1\xc9\x13\x24\x8b\xaf\xca\x82\x5d\x47\x0b\x74\x24\xc6\x6d\x0c\x2b\x5d\x5f\x36\x3b\xcd\xc4\xa5\x4a\xb7\x80\x58\xa6\x79\x59\x40\xaa\x1f\xc5\xa8\xfa\x71\xb5\x06\xe2\xcb\xa2\x8f\x78\x07\xa9\x10\xca\x55\xbb\x19\xfe\x81\x90\xc9\x0b\x65\xfa\x56\x8c\xad\x9c\xfa\x78\x47\xfb\xaf\x91\x74\x6b\xe3\x85\x09\xd0\xc4\xa6\xa0\xf6\xb5\xb9\x66\x18\xe4\x21\x80\x94\x58\x38\x5f\x22\x4e\x71\x4c\x98\x50\x53\x38\x47\x64\x91\xca\x74\x37\xa0\xc6\x23\xf7\x81\x7b\xe5\x9a\xb5\x86\xab\x5e\xc7\xc7\xd3\x34\x4b\x28\xc9\xc3\xb6\x27\x94\xa6\x6a\x5b\x7b\x8d\x02\x15\xf0\xde\xde\xa9\x58\xd5\x1f\xee\x3f\x0e\x5b\x96\xd9\x12\xc8\x17\xfb\xc7\xd2\x24\x69\x35\x5f\xee\xd7\x9a\xab\x27\xfb\xcd\xf6\x15\xfa\x8d\x24\x41\xdb\x1a\xc1\x50\x95\x9f\x9a\xe4\x89\xf2\x52\xaf\x75\xdf\x0a\xca\xbf\x2c\xf2\x3b\xb1\x76\x79\x81\x3e\xbe\x3b\xfd\x0d\x99\xa7\xbd\x3a\x49\x90\xe5\x41\xd8\xfd\xfa\xee\xeb\x57\xf4\xfd\x8f\x6a\x84\xc3\xa9\xce\x57\x15\x79\x5c\xe8\x1a\xcd\xae\x19\xc8\x4c\x73\xbb\xde\xf9\x80\x13\x88\xcd\x55\x2f\x2c\xef\x53\x3e\x45\x69\x7e\x97\xb2\x74\x94\x11\x14\x88\x55\x11\x48\x85\xc9\x10\xe6\x10\x22\x17\x43\xd4\xeb\x9c\x92\x04\x2d\xba\x82\x09\x68\x54\xcc\xf3\x04\x03\x00\x92\xb3\x39\x25\x4c\x83\xe7\x53\xcc\xa5\xe4\x31\x84\x29\x41\x49\xca\xca\x0c\x2f\x49\x22\x47\xc2\x68\x9c\x2e\x2a\x38\x40\x05\x27\x63\x46\x8e\xcb\x12\x82\xf9\x0a\x18\xda\x44\x10\x1b\xf8\x62\xe2\xba\x1b\x34\xa9\xde\x72\x56\xea\xe7\xea\x40\x68\x99\xe3\x8a\x6a\x56\x20\x85\xa4\xd1\x3c\x87\x5c\x3f\xa0\x0f\x4c\xab\x86\x5e\x58\xd5\xe1\xba\xda\xad\x8b\x0e\xa5\x36\x53\x1c\x69\x8c\x62\x54\x8e\x6a\xe0\x1d\xa0\x4a\x2d\xf2\xae\xb8\x47\x31\x25\xf0\xfe\x62\x4a\xc0\xb6\x71\x17\x71\x23\x99\x9d\x6d\xfd\xc8\xa7\xa3\x12\x03\x15\xe2\xd6\xb7\x84\xdf\xec\x7f\x32\x3d\x53\xbf\xba\xdf\xb0\x16\x36\xf8\x37\x64\xb6\xa6\xb0\xdd\x01\x75\xdc\x51\xc7\xcf\x84\x4f\x37\xf4\xf9\x55\xd4\x83\x73\xec\xaf\x07\x1d\xf4\xcc\xf4\x93\xa7\x32\x42\xfb\x9e\x97\xc2\x3f\xab\xa8\xc3\x00\xf5\x51\x90\xa5\x39\xd1\x7e\x65\x38\xfd\x95\x45\x86\x95\x2f\x47\xd4\x61\xaa\x9c\xc9\xda\x5f\x63\xe4\x5d\xc5\x4b\xa7\xa2\x25\x9e\xf3\x22\xe8\x38\x44\x7d\x9d\xe6\x09\xbc\x64\x61\x44\x49\x66\x8b\xa1\x19\x5e\xf4\x66\x69\xbe\xb7\xe6\x0d\xb3\x50\xba\x9c\x56\xa6\x45\xaf\x87\x7e\x9d\x92\x5c\x3f\x56\x16\x76\xa1\xcc\xa4\x93\x98\xbd\x78\x86\x17\xd5\x5e\xbc\x61\x2d\xf2\xca\xbb\x64\xa4\x45\xf4\x8f\xe7\x94\xca\xf2\x33\x1b\x92\x4c\x4a\xa0\x76\x30\x3f\x44\x51\xfa\x41\xec\xc8\x75\x1f\xa8\xa9\x88\x96\xe8\xb8\x36\xc0\x93\x27\xc8\xae\x7e\xe4\x73\xf0\xd5\x51\xb2\x3a\x78\xbc\xb4\x66\x2b\x15\x94\xd8\x1f\xba\xbd\x95\xb4\xdb\x1b\x86\x23\xcb\x91\x24\xdf\x0c\x2f\x9e\x1e\x46\x07\x3f\xac\x6f\x96\xe6\x9a\x36\xce\x4e\x0f\x1c\x80\xba\xd3\x7c\x9c\xe6\x29\x5f\x0e\x6a\x9c\xe9\xba\x15\x0f\xe4\xd0\x3f\x87\x09\x47\x80\xe3\x2e\xa4\x97\x73\xd9\x48\x70\x1f\x8f\x67\x3b\x72\x76\xb6\x3b\x3f\x57\x56\x9e\x05\xc0\x6a\x08\x6c\xaa\x47\xa6\xf9\x99\x89\xf6\x2b\x7f\xf3\x5a\x6e\x8a\xcf\xae\x6e\xe7\x4b\x96\xb0\x1e\x78\x78\x10\x1d\x3e\x0d\xcd\xf3\x3f\x51\xd8\x15\xf0\xda\xd5\xa1\x64\xcb\xb0\x5b\x21\xac\xb4\x53\x4d\x88\xd2\x42\x99\x26\x4d\xbd\x1b\x81\xf9\x03\x37\x04\x5f\xa4\x96\xe9\xfb\x54\xb6\xf5\x52\x78\xb9\x05\xd6\xef\x4a\x95\xaf\x05\x26\xf5\x5e\x41\x53\x92\x73\xa3\x29\xc9\x58\x47\x74\xf3\x34\xbe\x7d\xad\xd2\xad\xc0\x73\x09\x99\x7b\xe5\xdf\xcf\x5e\x5c\x76\x3c\x7b\x04\xa0\xa3\xf6\x08\xfb\x39\xb1\x4b\x3a\x95\xb1\xb1\x9a\xc5\xb4\xb8\x23\xf4\x15\xe1\x38\xcd\xfc\x73\x79\x53\x35\xd8\x6d\x42\x12\x4d\xe7\x65\x43\x28\x75\x7e\x07\x2d\x3a\x68\xe9\xaa\x4d\x15\x97\xd3\x3a\x62\x25\xce\xb5\xa9\x28\x0a\x83\xe3\x16\xda\xaf\x2e\x70\x16\xe8\x29\x18\x70\xed\x88\x17\x1f\x2f\x5f\x4a\xc7\x4e\xd8\x46\xfb\xa8\x75\xd4\x13\x7d\x8f\x5b\x03\x0b\x2c\xbb\xc7\x3c\x9e\x36\x01\xc3\x3c\x6e\x64\x6d\x20\x93\x39\x0c\x83\x11\x8e\x6f\x27\x54\x98\x44\x5d\x75\x3a\x6c\xc1\xe9\x06\xd4\x05\x94\x88\x61\x84\xe5\xda\x1c\x28\x2e\x72\xae\x02\x01\xe4\x90\xfb\x48\xcd\x36\xf2\xf9\xd3\xc0\x30\x93\x4e\xb5\x3e\xb2\x1d\x8c\x4b\x35\x13\x59\x62\x86\xb0\x62\x90\xa0\xc1\x88\x02\x59\xf4\xa8\x56\x91\xf2\x0a\x57\x3e\x54\x17\x8d\xa6\xbd\x02\xde\x08\x9d\x35\xcb\xc3\xf8\xb7\x50\xe7\xb5\x47\x64\x37\x63\x90\x6c\x14\x08\x6b\xb4\x69\x3a\x99\x66\xc2\x34\x81\xcc\x5b\x9e\x21\x5f\x90\x29\xbe\x4b\x0b\x1a\x29\x55\xfd\x46\x77\x08\xd1\x4e\xa2\x27\xf1\xea\xab\xbf\xee\xe0\x6c\x4a\xb2\x3b\x79\x8d\xb0\xc3\xc8\x97\x60\x1d\xec\x26\xf0\xeb\x46\xb5\xef\xfd\x4d\x0e\x8e\xad\x4e\x70\x96\xfe\xf1\x2d\x47\x4e\x57\x4d\xd5\x2f\xfc\x3c\x9a\xc0\x1c\x0a\x4c\xe0\xc0\xb7\x9a\x88\x1b\xac\x82\x4a\xdd\xec\x10\x75\xec\x09\xe5\xd8\x12\x5a\xe1\xa7\x89\x38\x5b\x2b\x2c\x54\xde\x19\x86\x4a\xcc\x18\xbc\x46\xad\xd2\xd2\x8c\x0b\x6a\xec\x41\x79\xe0\x01\x87\xa9\x95\x8b\x86\xe1\x3b\xb2\xa7\x4e\x45\x56\x06\x9a\xe7\xff\xf6\xfc\x37\xa4\x2f\x49\xc5\x29\xa6\xa0\x09\xa1\x32\x79\x4d\xd7\xf8\x44\x51\xca\xa5\xdb\xd6\x1a\x53\x02\xbb\x17\x96\xa8\x80\x38\x67\x84\x8a\x03\x96\x38\x1f\xc9\xc7\x4c\x80\x8f\x9d\x6e\xd0\x24\xae\x51\xfe\x46\xe7\xa0\xe8\x4f\x78\x03\xce\xd7\xad\xee\x08\xaf\xd7\xf4\x5d\x01\x68\x82\x7b\x88\xa1\xb1\xd0\x88\x35\x4f\x68\xd3\x2f\x70\x89\x47\x6e\xbe\x22\x3b\x11\x8d\x75\x43\x64\x12\xe3\xec\x24\x05\xb5\x40\x99\x5a\x54\x25\xde\x49\x0e\x64\xb4\x5c\x95\x51\x67\x33\x96\x36\xa5\xa5\x3f\x5c\x5f\x90\xbc\x28\x92\xa5\x26\xb5\x05\xce\x4d\x00\x7a\x03\x0f\xcf\x11\x1f\x15\x89\xca\xfc\x04\xfd\x9c\x50\x3a\x76\x9f\xf2\x78\x1a\xd6\xee\xff\xd5\x1b\x58\xcc\x08\x0a\xee\x48\xcc\x0b\x1a\xf4\xf7\x6c\xf3\xd0\x3c\xb0\x54\x8b\x5a\x6c\x24\xcd\x6b\x79\x97\xb3\x7a\x78\xe5\x2c\x09\x8e\x38\x3d\x3e\xe2\x09\x8a\x8b\x4c\xec\x61\xc3\xd6\xb3\xd6\xf1\x51\x7a\x9c\x4b\x86\x1f\xf5\xd2\xe3\xa3\x1e\x4f\xc4\x07\x3d\x0e\x6a\xaf\xfa\xec\xa3\xb4\x83\x8f\xda\x0c\xd2\x59\xca\xa5\xda\xb4\xd1\x6d\xd7\xbc\x9a\xd6\xfb\x19\x4f\x4c\x81\xfb\x78\x1b\x98\xa9\x0c\x5c\x9d\x3e\x22\xbd\xb6\xb7\x5d\x73\x6b\xe5\x73\x6d\x1b\xcf\xf6\x60\x13\x2d\x8e\x6b\xf7\x77\x12\xa4\xba\x65\x13\xb4\x50\x4d\x94\xe7\xfa\xea\xf0\xba\xaa\xb2\xc9\x24\x09\x03\x2f\x12\x07\x86\x91\xea\x7a\xc2\xcb\xc8\xff\x4f\x19\x76\xf7\xed\x0c\xbb\xab\x33\xcc\x3c\x3b\xbb\x24\x0b\x81\x77\x60\x2e\x3f\x0c\x7a\x9f\x25\x7a\x9f\xd1\x11\xba\xd3\x77\x0b\x1a\xb7\xcf\x6e\x26\x80\x0a\xd2\xfe\xd0\x34\xbe\xfa\x7c\xad\x58\x8a\xfe\xa7\x60\xb3\x5d\x7e\x20\x59\x3d\xa2\xbd\xe3\xc0\x75\x30\xff\x49\x59\xb2\x30\xd9\x59\x94\xd4\xed\x8f\x14\x25\xff\xe8\xb2\x89\x33\x92\xcd\x89\x75\x92\x5b\x1f\x08\x6c\xea\xcd\x03\x41\x13\x67\x20\x6b\xd6\xee\x98\xed\x2d\x83\x2a\x07\x69\xdf\xbb\x13\x7d\xcc\xd9\xbc\x2c\x0b\xca\x49\xa2\xde\x0f\xc2\xcd\x5d\x03\xc8\xea\x9b\xed\x2c\xff\xef\x51\xf8\x12\x84\xd4\x93\xd6\x3b\x4e\x72\x6b\xf0\x73\x7f\xf1\xce\x38\x55\xe7\x3b\x1b\xaf\x65\x85\x18\x1e\xb1\x9b\xa5\x9d\x2e\x67\x69\xf6\x79\x59\x75\x3c\x44\x87\xe4\xd9\x5f\x6a\x4f\x32\xc2\x25\xea\xc9\xf2\x88\x17\xd6\xc1\x29\xf8\x3d\x18\xb8\x39\x29\x6d\x28\x87\x6b\xa0\x1c\xd6\xa1\xfc\xc7\x06\x28\x87\x7f\xf5\x43\x39\xfc\x6b\x1d\xca\xc9\x26\x28\x3f\xac\x81\xf2\x43\x1d\xca\x87\x4d\x50\x9e\xad\x81\xf2\xac\x0e\xe5\x72\x03\x94\x9f\xfc\x40\x7e\xaa\xc3\xf8\xdb\x06\x18\x3f\xfa\x61\xfc\x58\x87\x71\xb6\x01\x46\xfd\xc9\xae\x82\xf1\x7d\x1d\xc6\xed\x7a\x18\x35\x08\x4b\x5f\x3b\x67\x8f\xda\xd4\xf0\x48\x20\xd5\x5d\x27\x7b\xdd\xa6\xf0\x2d\xfd\x88\x29\x38\x6b\xa4\xaf\xdb\x14\xbf\x3f\x36\xc1\x59\x27\x7f\xdd\xa6\x00\xe2\x8d\x70\xd6\x48\x60\xb7\x29\x82\xe3\x8d\x70\xd6\xc8\x60\xb7\x29\x84\xe5\x26\x38\x3f\x55\xdb\x5b\x0d\x50\x43\x10\xf3\x4d\x70\xd6\x48\x62\xb7\x21\x8a\xff\xfb\x7f\xad\x03\x73\x48\xba\x6b\x64\xb1\xdb\x10\xc6\xd9\x7a\x5c\x7c\x32\xb6\x25\x0b\x83\x65\xc7\x38\x8f\xed\xa5\x35\xb3\x29\xfc\xe4\xec\xf9\x6f\x37\x17\x27\xe7\xa7\x27\x17\x37\xef\x3e\x9e\xa9\x9f\x5e\xaa\x9e\x76\x10\xc6\x30\xbc\x56\x0e\xd6\xe7\xff\x7a\x4d\x78\x3c\xb5\xd2\x7f\xb9\xa6\xdd\x3e\x24\xf7\x92\x3e\xaf\xd1\x9c\xeb\x5b\xb4\x34\x9f\xa0\x22\xcf\x96\x68\x9c\x52\xc6\x4d\xdf\x1a\x3a\xfb\x28\x88\x02\x13\x4d\xe8\x02\x3e\xae\x35\x6e\x9c\xe4\xf4\xc3\x20\x35\x09\x37\x8c\x45\xc1\x62\x65\x96\xc6\x24\x3c\xe8\xd4\x81\xd5\x62\x80\x64\x73\x48\x86\xa1\x7e\x94\xea\x03\x9e\x90\xbd\xa7\xbd\x3d\x99\xb1\xa7\xca\xa8\xa3\x9e\xa3\x54\x79\x75\xfa\xe8\x0a\x9c\x43\xf2\x60\xad\xbf\xd5\x73\xf6\xf4\x65\xba\x41\xb8\x5e\x0d\xbd\x09\x0c\x5b\x2a\x33\x61\x4b\x5e\xb0\x0a\x30\xb5\x5d\xf4\x03\x6e\x3c\x2c\xf7\xb4\xa8\x25\x24\xf2\x6f\xfc\x80\x6c\x95\xd8\x03\x9c\xae\x80\xdb\xc7\xf3\xb7\xd5\x95\xbb\xdd\xca\x6b\xca\x3b\x0d\xe4\x0d\xe2\xaa\x8a\xed\x74\x6a\xf5\x35\x04\x0c\x85\x93\x44\x7a\x95\x90\xf9\xf1\x8f\xc7\x61\xf0\x1d\x4e\x92\x1b\x95\x1c\x5f\x65\x98\x74\x5a\xcb\x1f\x21\x10\x45\x1d\xf4\x65\xd5\x6e\x9a\x19\xb5\xe9\xeb\x09\x35\x49\x20\x26\xa7\xa2\x41\x05\x2f\x44\x4d\xc4\x08\xa6\xf2\x67\x56\x82\xa0\xb6\x44\x75\x4c\x94\x22\x1e\xf0\xf6\x83\x7e\xae\xe2\x87\x13\xb1\xf9\x48\x9a\x90\xe1\x61\x1b\xc4\x90\x87\xad\x27\x2d\xf3\xfc\xaa\x82\xf1\x86\x64\xa5\xf1\x12\xd6\x27\xf3\xf7\x5a\xb3\xd0\x8e\xec\xa8\xc3\x90\x13\xae\xba\xb0\xd0\xc2\x74\x2b\xb5\x34\x95\x6d\x6a\xe9\xdf\x37\x72\xe5\xa6\x89\xab\xf4\x78\x00\xc9\x1e\x9b\xdf\x16\xb2\x7e\xf6\x43\xf9\xff\xd5\x2f\x2f\x49\xab\x5b\x70\x56\xfa\x4b\x3e\x9e\xbf\xad\x58\xdb\xb6\xaa\xa5\xf5\x59\xe3\x7d\x7b\x0f\x72\xf4\xed\x39\x79\xda\xe4\xe2\x93\x22\xa8\xc3\x29\x40\xa8\x24\x77\xda\xca\xdb\xd8\x0c\xf8\xd5\x41\x22\xc6\x17\x59\x25\x34\x16\xe4\xea\xf5\xd0\xbb\xf7\x97\x27\xfd\x5a\x5a\x9f\x11\x41\xb7\xa4\xe4\x90\xfc\x68\x99\xc7\x32\x60\xa0\x37\xe7\x69\x26\xb4\xa6\xfe\x1b\x17\xf9\x5d\x34\x29\xfa\x00\xf7\x6d\x9a\xdf\xbe\x2e\xe8\x89\x09\xbc\xdb\xc0\x0a\x43\x16\xff\xe2\x05\xae\xca\xcd\x06\x82\x30\xeb\x54\x70\xe2\xce\x26\x72\x9d\x41\x8e\x1b\x3b\x56\xaf\xa6\x01\x24\x1d\xaa\x6c\x3f\x3a\x60\xe6\x4f\xcb\xaa\x05\xe2\xfd\xe8\x33\x89\xb9\x4e\x7b\x66\x0b\xee\x84\xe4\x84\x62\x2e\x65\x57\x36\x73\x94\x8f\xc6\xdf\x51\xf1\x8f\x65\x40\x56\x68\xc1\xd6\x31\xd9\xf2\xb7\x8c\x64\x28\xec\x13\xf5\x3b\x15\x4a\xbd\x82\x8c\x5c\x70\xcc\x49\xf8\x65\xd5\x41\x41\xd0\x41\x32\xbc\xe7\x67\x71\xc4\xb3\x48\xbb\x75\xc1\x58\xd2\x69\xf3\x49\x8a\x1f\x50\xba\xc1\x1a\x2f\xbb\x54\x16\xb1\x0a\x40\x1b\x7d\x51\x53\x9c\x80\x7b\x1b\xda\x79\x9e\x6d\x78\xa9\xbe\xc3\x06\x51\xef\x52\x57\x99\x7f\x77\xf4\x9b\x81\x66\x2b\x13\x23\x8b\xe0\x10\x26\x89\xdb\x45\xde\xe1\xc1\xb4\x4e\xf3\x3b\x9c\xa5\x89\x47\x1f\xc9\x6c\x5f\xb6\x3e\x93\xdd\x84\xb5\xa1\xd8\xfe\x9a\x16\xb3\xf7\x72\x00\x05\xa0\x39\x5c\x07\x1d\xec\x48\x99\xa8\x1a\x5d\x5e\x36\xa2\x21\xea\xfd\xe7\xe4\x53\xb2\xff\x29\x8a\xf6\x87\xd1\xfe\xe3\xde\xc3\x88\xe5\x99\xa1\x4d\x2f\x90\xce\xcb\x79\x99\xe9\xdb\x79\x35\x4d\xab\xbc\xc1\xfb\xaa\xae\xb6\x05\x3d\x78\x72\x11\x27\x8c\xdb\xf0\x06\xfe\xb7\x3f\x5b\x27\xb9\x89\x1f\x6b\xc4\xa3\x23\x45\xf6\xb4\xd2\x39\x62\xc3\xb5\x1a\x54\xc6\x44\xe3\xc8\x55\xdb\x6b\x4b\xf8\xe9\xbf\xf7\x63\xa1\x7f\x01\x9e\x93\xcd\x0f\xa0\xc9\x5f\x07\x0c\xad\x21\xf5\x26\x9b\xcf\x67\x23\x42\xdf\x8f\xe5\xa0\xaf\x0b\x2a\xa0\xe8\x05\x6b\xa3\xb3\x33\x1b\xaa\x0a\x19\xab\xca\x7e\x4d\xf9\x34\x6c\x20\xa9\x88\x6d\x9e\x91\x29\x0a\x6c\xc2\x67\x3b\x25\xb6\x4d\xa2\xb2\x75\xd7\x8f\xa3\x37\x4e\x0f\xa8\x66\xa1\xbb\x91\xec\x44\x13\x63\xf4\x34\x48\xa2\x68\x61\xc4\xb0\x91\x2c\xae\xb2\x41\xad\xd5\xfd\x7e\xfc\x3e\x57\xfb\x72\x13\x3f\xc3\x67\x09\xe4\x79\x1c\xcf\x67\xf3\x0c\x73\x78\x3b\xb6\x83\x32\x59\x23\xb1\x68\x5f\x3d\x93\x6f\x80\x35\xa1\x78\xd5\xaf\x46\xd6\x53\xa7\x59\xad\x1f\xbc\xd4\xd6\x4f\x7e\xbb\x1a\x76\x72\xee\x21\x57\xb8\x1b\x51\x43\x36\x13\xab\xde\xef\xf0\x8c\x3c\xcf\x13\xfd\xf4\x83\x4b\x8e\x4a\xcb\x75\xd8\xb2\x36\xf3\xaa\xb9\xf9\xa1\x5c\xbb\x2f\xa4\xbf\xae\x35\xd6\x40\x13\x12\x17\x09\xf9\x78\x7e\xfa\xb2\x98\x95\x45\x4e\x72\x4d\x4b\x07\xc0\xe1\x75\x95\x48\xf3\xd3\x3e\x64\xd2\x44\x41\x5b\x67\xc6\x16\x2b\xc9\x46\x61\x88\x02\x8e\x47\xd6\x0b\x1b\x77\x48\x93\x5c\xc1\x2a\x96\x89\xf1\x39\x1e\xa1\x94\x41\x08\xdf\x84\x50\xe5\x96\xb6\x2d\xd5\xab\x6a\x98\x6b\x33\xd5\x5f\x74\x2a\xbe\x95\x87\xfd\xcd\xcc\x79\xdb\x98\x5e\xd7\x63\x36\xab\x2d\xa3\x4d\x8d\x12\x4c\x84\x95\x92\x2a\x31\x85\xc3\xf3\x03\xc7\xf3\x98\x5a\x0d\xeb\xa5\x66\x75\x19\x29\x2b\x35\x86\x7e\x0d\x9c\x3a\xca\xd7\x35\xf9\xa4\x58\xca\xaf\xd1\x2d\x59\x32\x67\xa4\x76\x53\x48\x6f\xab\x9f\xb6\xb4\x20\x5d\x29\x14\xf6\xd1\x2d\x59\x5e\x6b\xbb\x55\x41\xb9\x12\x65\x8d\xf8\x77\xab\xb7\xb1\xe9\x2f\xa7\x84\x11\xc4\xef\x0b\x95\x42\x80\xa1\x30\x65\xaf\x48\x49\x49\x8c\x39\x91\xe7\x20\x61\x7e\xe3\x3c\x41\x94\x24\x29\x25\x31\xbf\x2c\xce\xd2\x89\xa0\x5c\xf2\xf1\xfc\x6d\x5b\x40\xc1\x94\x20\x9c\x24\x24\x51\x9e\x8e\x82\xc2\xcf\xeb\xdd\x63\x9a\xc0\x03\x70\xcc\xd3\x51\x9a\xa5\x7c\x29\x8e\x0c\x45\xa6\xd3\x8f\x4b\xe7\x77\xb4\x67\x92\xa1\xfa\x86\xde\x70\x50\x9d\x62\x36\xdd\xb0\x81\x56\xbf\xb4\xa0\x75\xac\x5c\x74\xc9\x6b\x8a\x27\x2a\x93\x87\x67\x19\xfa\x46\x91\x97\xbb\x74\x69\x56\x96\xe5\xc3\xa8\x01\x55\xaa\x3f\x3c\x6c\xcb\xb5\x95\xd0\xa2\x84\x7b\x7e\x01\x07\x7d\x07\xbe\xb0\x18\xa2\x86\x42\xd2\xf0\xe8\x59\x28\x57\xc6\x20\x15\xab\x6c\xb5\x57\x91\xc9\xcb\x08\xeb\x58\xf4\xe7\xa6\xe9\x39\x13\xfd\x99\xd9\xfa\x57\x40\xdd\x29\xe2\x6c\xb0\x85\xbb\xea\x2a\xf5\x6c\x96\x9d\x67\xf5\x8b\x36\xf6\xaa\x2a\x76\x59\x50\x9b\x97\x54\x51\x5b\x4d\xc8\xfa\x7d\x57\x6b\x62\x90\x8e\xc3\x7f\x02\xab\x11\xd9\x93\x62\xa4\x76\xe2\xb2\x1d\x6f\x32\x71\x90\x4a\x62\x81\xa6\xf2\x50\xa3\x67\xc1\x44\x8b\xa7\xc6\x2d\x67\x67\xbf\x50\x8e\x39\x93\x45\xdb\x93\x06\x5c\x89\xd6\x56\x4f\x1c\x01\x08\x5d\x80\xde\xad\xdc\x72\x7b\x3a\xa1\x71\x23\x97\x6e\xdf\x7b\xfc\xd3\xe9\xbe\xe5\xcf\x7c\x9e\x64\xf2\xa1\x48\x92\xde\x45\x35\xc8\x03\xab\xb1\xfa\x4d\xb1\xc7\xa1\xee\xd5\xae\x9e\xba\xb4\x52\xdd\x56\x57\x46\xc5\x78\x1c\xb6\xc0\x57\xd6\xb2\xf7\xc7\x75\x89\xc5\xad\x3b\x78\xa1\xc2\x65\x64\xf8\xfb\xbc\x1a\x6c\x60\x6f\x83\xd5\x28\xb9\x1e\xa4\xe3\x4b\xae\x0e\x4f\x3e\x63\xb9\x9a\x64\xd0\x49\xcb\xf3\xcb\x5f\x2d\x27\x97\xa3\x83\xe1\x3a\x3c\x1c\x0f\xfa\xda\x11\x00\xfe\x76\xe8\xe3\xb1\x07\xbc\x2d\xdf\xc0\x5b\x27\x87\xca\x66\xbe\xea\x65\x20\x7d\x27\x6e\xc6\x5d\xc3\x4e\x76\x91\xce\x4a\xfd\x3b\x0d\xee\xf9\xdf\x4a\xec\x0f\x6b\xfa\xfd\x38\x54\x29\xcd\xc5\xa9\xbf\x7b\x58\x45\x33\xbb\x50\xea\xfa\xd3\xe1\x59\x46\xb8\x7a\x4a\xfb\x77\x77\x71\x3c\xc0\xff\x6c\x4f\x60\x8a\x99\x68\xa9\xc0\x69\x7b\xca\x01\xbe\x0d\x7b\x17\x44\x85\xbd\x07\x92\x3a\xc3\x6c\x1d\x43\x67\xb2\x32\xd3\xf6\xf4\x00\x67\xa0\x6c\x3e\xa8\x73\xed\x65\x31\x87\x1d\xc2\xd3\xcd\x4e\xfd\xe1\x25\xa4\x0f\x6d\xc0\xda\x82\xdc\x45\x3f\x1c\x74\xac\xa1\xf4\xda\xf4\xfe\x26\x91\xa1\x7c\xe3\x37\x89\x9a\x43\x69\xe6\x6c\xf8\x79\x00\x2f\x8a\x83\x87\xaa\x06\xe9\x83\x34\x3f\x67\x20\x61\x5f\x10\xe7\x57\x07\x1e\xf6\xc3\x04\x76\x5c\x2e\x2c\x35\xbd\xea\xad\x55\x66\x16\xa8\x42\xe6\x5f\x87\x8a\x72\x56\xaf\xd1\xb2\x4e\x04\x9d\x5f\x99\x55\x3f\x48\xd7\x54\x45\x35\x25\x5d\xb5\x4c\x59\xd7\x00\x18\x6c\x90\x08\xef\x0e\xd4\x91\x0f\x7d\x06\x75\x02\x8e\xc7\xff\x0c\x0a\x5a\xe4\xf9\x56\xea\xa8\xd9\x7b\x29\x63\x51\xae\x46\x1d\x07\xd8\x9f\x24\x10\x58\x92\x16\x85\xd6\x4d\xdc\x22\xd8\x4c\x97\x55\x59\xe2\x1a\x1e\xda\xba\x2d\xe5\x6c\x06\xd6\x5d\x99\xdc\x04\x64\x8e\x4b\x93\xad\xdf\xdd\x99\x36\xb5\x8b\xcc\x0f\x83\x18\xa4\xbc\x1b\x15\x1c\x63\x8c\x5d\x6c\xbf\x7e\x97\x9e\x6b\x63\x12\x93\x9c\xa7\x7c\x79\x26\x7f\xd8\x02\x20\x04\x4f\x82\x3e\x0a\x9e\xe0\x59\x39\xd0\xf9\xf3\x8f\xa0\x24\xe3\xa6\xe0\x18\x0a\x26\xa6\xa0\x15\xb4\xfa\xa8\xf5\xe4\x1f\xf3\x82\x0f\xd4\x2f\xdb\x06\xad\x40\x14\x7d\xf7\xfd\x4f\xa6\xa4\x27\x4b\x16\xcf\x5e\x0f\x5a\xe6\x67\x7b\x95\xc5\xa5\x6e\xc6\x15\x7a\xd5\xe1\xfe\xea\xc9\xd1\x71\xd0\xfa\xd4\xbb\x16\x67\xfc\xea\xc7\x21\x58\xcd\x60\x33\xd3\xb8\x62\xe6\x7c\x67\x9f\x0c\xd2\x3c\xe5\xca\x1c\x91\x19\xe2\x2e\x08\x9f\x97\x2a\xda\x3b\xc6\xf1\x94\xa8\x1f\x35\xa9\x4e\xed\x4e\x26\x39\xef\x8f\x1e\x31\x8e\x79\x1a\xf7\x3e\x33\x69\xac\xca\xcf\x1b\x4e\x66\x65\x86\xb9\x0e\xde\x1d\x61\xfa\xf3\xdd\x50\xd8\xb1\x2f\x3e\x9e\xbe\x7d\x75\xf3\xcb\xc9\xf9\xc5\xe9\xfb\x77\xea\xf9\x5f\x23\x6b\x9c\xe0\xb8\xc0\x73\xcf\x92\x85\x4b\x05\x51\xc5\x14\x6a\x86\x9f\xcd\x19\x17\x98\xeb\x03\x88\xe8\x39\xb0\xed\x2d\xef\xa1\xd1\x4d\x30\xe0\x3d\x2f\xd5\x2c\x2b\x3b\x76\x90\x22\x75\xc5\xec\xb9\xde\x09\xad\x67\xec\xe0\x63\x90\x44\xaf\xc9\x67\xc5\x9c\xc7\xa1\x68\xd0\x1e\xec\xfd\x9f\x00\x00\x00\xff\xff\xa2\x82\xd6\xde\x30\x8a\x00\x00"),
 		},
 		"/static/js/prom_console.js": &vfsgen۰CompressedFileInfo{
 			name:             "prom_console.js",
@@ -142,9 +142,9 @@ var Assets = func() http.FileSystem {
 		"/static/js/targets.js": &vfsgen۰CompressedFileInfo{
 			name:             "targets.js",
 			modTime:          time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC),
-			uncompressedSize: 1628,
+			uncompressedSize: 2094,
 
-			compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x54\xc1\x6e\xdb\x30\x0c\xbd\xfb\x2b\x08\x2d\x07\x19\x48\x8d\x62\xc7\x65\x39\x0c\xc3\x80\x6d\x97\x1d\xda\x9d\x0b\xc5\x62\x2c\xad\x8a\x14\x58\xf4\xd6\xa1\xc8\xbf\x0f\xb4\x65\xc5\x6e\xdc\x0e\xc9\x31\x22\x1f\xdf\x7b\x7c\xf4\xbe\xf3\x35\xd9\xe0\x81\x42\xd3\x38\xfc\x1e\x76\xf7\x6a\xe7\x50\xee\x3a\xa2\xe0\xd7\x10\x4d\xe8\x9c\xfe\xf2\x74\x54\x5e\x97\xcf\x05\x80\xdd\x43\x7a\xac\x1c\xfa\x86\x0c\x6c\xb7\x5b\xb8\x2d\xe1\x19\x5a\xa4\xae\xf5\x1b\x38\x15\xa9\x6e\xd6\x0c\xdc\x0d\x90\x7a\x5b\x3c\x84\xdf\xf8\xd9\xa9\x18\xa5\xa8\x83\x73\xea\x18\x51\xdf\x10\x0f\x17\x65\xa5\xb4\x4e\x6f\xd8\x77\x4f\x9e\x0c\x1d\x9c\x14\xd1\x84\x3f\xe0\x30\x46\x51\x6e\x0a\x80\x13\xa0\x8b\xf8\xc6\x88\x0b\x98\xf3\x84\xcb\xe9\x93\x11\x87\xd0\x62\x1a\xc1\xaa\x12\xf4\x51\xb5\xe8\x29\x4a\x51\xf5\x2d\x37\x75\xf0\xa4\xac\xc7\x56\x94\xd5\xde\x7a\x2d\xc5\x08\x35\xd8\x3a\x77\x62\x73\x0d\x50\x75\xa6\x87\x0e\x0f\xe8\xe9\x55\xd4\x53\x51\xe4\x75\x32\xf9\x4f\xce\xc9\x87\x35\x64\xcc\x61\x05\x2b\x79\xfe\xa3\xe2\x32\xb9\xd0\xfa\xd3\x1b\x54\x8e\xcc\xdf\x05\x80\x3a\xf8\x48\x60\xe3\xd7\xa1\x02\xb6\x73\xc8\x81\xb6\x79\xcf\x1e\x13\xb5\x52\xd4\x6c\xb3\x28\x2b\xeb\x35\x3e\xfd\xd8\x4b\xa1\x95\x6f\x58\x21\x7c\x84\xdb\x4d\x8a\x4a\x86\xe3\x20\xcd\xf0\x8c\xd5\x28\x4b\x4e\xd5\x94\xa4\xf5\x96\xe4\x28\x48\x24\x37\x49\xb5\x0d\x12\xcf\xaa\x9d\xad\x1f\x65\xae\x96\x63\xfa\x06\xee\xbd\xd7\xf7\x96\x1c\xf6\xe4\xc9\xd8\xc8\x2d\x21\x62\xa4\x44\x7d\x50\xa1\xb2\x08\xab\x39\x05\x3d\x06\xd3\x1d\x9b\x8c\x8a\xaf\xa5\x68\x1c\x09\xe0\x42\xad\xdc\x1d\x85\x56\x35\x58\x45\xa4\x6f\x84\x07\x79\xe6\xb0\x86\x9c\xce\x21\x69\xfc\x7b\x71\x8e\x69\xde\x1a\xa8\xed\x30\x15\xa5\xcc\x2f\xd3\x79\x99\xf7\x6b\xd8\x64\x29\xff\xa7\xb3\x57\x2e\x66\x3e\x7c\x24\x83\x49\x2b\x29\xaa\x5f\x61\xf7\x60\x50\x69\x6c\x81\x6d\x44\x55\x9b\xc9\x42\x1e\xd6\xe0\xac\x7f\x9c\xef\xa5\x56\xb5\x41\xdd\x8f\xb8\x23\x45\xbc\x9d\x19\xdb\x26\xb1\x5d\xc9\xbe\x77\xb2\x9a\x44\x81\xbd\xb8\x04\xd9\x6e\x67\x9a\xb2\x13\xcb\xa2\xaa\x68\x77\xce\xfa\x26\x8e\xb1\x12\x6f\xea\x7c\xc7\xe7\x72\x3f\x04\x0f\x3e\x58\x7f\xec\xf8\x40\x6b\xc3\x19\xcf\x7a\x2f\xf2\xc7\xe5\x93\xec\x2d\x66\x6c\xac\x62\xfa\xca\xb9\x9b\x9c\xee\x2c\x60\xb5\xf8\xdd\xe8\x9d\x4e\xf7\x7f\x19\x95\x29\x6a\x37\x5e\xf9\xf5\xd8\xf9\x03\xf1\xc2\x94\x53\x51\xac\x24\x1f\x67\xb9\x29\xfe\x05\x00\x00\xff\xff\x8e\x4f\x80\xff\x5c\x06\x00\x00"),
+			compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x54\x3d\x6f\xdb\x30\x10\xdd\xf5\x2b\xae\x8c\x07\x0a\x70\x84\xa0\x63\x5d\x0f\x45\x51\xa0\xed\xd2\x21\xee\x1c\x50\xd4\x59\x64\x43\x93\x86\x78\x4a\x53\x24\xfe\xef\x05\x25\xea\xcb\x92\xd3\x04\x5d\x49\xde\x7b\xef\xee\xbd\xe3\xbe\xb6\x92\xb4\xb3\x40\xae\x2c\x0d\x7e\x77\xf9\x4e\xe4\x06\x79\x5e\x13\x39\xbb\x06\xaf\x5c\x6d\x8a\x2f\x8f\x47\x61\x8b\xf4\x29\x01\xd0\x7b\x88\x97\x99\x41\x5b\x92\x82\xed\x76\x0b\x37\x29\x3c\x41\x85\x54\x57\x76\x03\xa7\x24\xbe\x9b\x14\x43\xa8\x06\x88\xb5\x15\x1e\xdc\x03\x7e\x36\xc2\x7b\xce\xa4\x33\x46\x1c\x3d\x16\xd7\x14\xc8\x59\x9a\x89\xa2\x88\x77\xd8\x54\x8f\xae\x14\x1d\x0c\x67\x5e\xb9\xdf\x60\xd0\x7b\x96\x6e\x12\x80\x13\xa0\xf1\xf8\x02\xc5\x0c\x66\x60\x98\xb3\x8f\x28\x0e\xae\xc2\x48\x11\xba\x8a\xd0\x47\x51\xa1\x25\xcf\x59\xd6\x94\x5c\x4b\x67\x49\x68\x8b\x15\x4b\xb3\xbd\xb6\x05\x67\x1d\x54\x3b\xd6\xe9\x24\x36\x6f\x01\xca\x06\x79\x68\xf0\x80\x96\x2e\xa2\x9e\x92\xa4\xb7\x33\x88\xff\x64\x0c\xbf\x5b\x43\x8f\xd9\x5a\xb0\xe2\xc3\x41\x16\x9e\xf1\x85\xd2\x9f\x56\xa1\x30\xa4\xfe\x2c\x00\x48\x67\x3d\x81\xf6\x5f\xdb\x17\xb0\x9d\x42\xb6\xb2\xd5\xfb\x30\x63\xa2\x8a\x33\x19\xc6\xcc\xd2\x4c\xdb\x02\x1f\x7f\xec\x39\x2b\x84\x2d\x43\x87\xf0\x11\x6e\x36\x31\x2a\x3d\x5c\x08\xd2\x04\x4f\xe9\x02\x79\x1a\x52\x35\x16\xa9\xad\x26\xde\xea\x09\xe5\xef\x8c\x93\xc2\xdc\x92\xab\x44\x89\x99\x47\x83\x92\xb0\xd8\x89\x1c\x9e\x9f\xe1\xe2\xdd\x76\x0b\x4c\x18\x73\x4d\xa2\x2a\x91\x3c\x4b\xdb\xfc\xac\x38\xbb\x9a\x1c\x47\x9b\xf8\x38\x35\x42\x92\x7e\x88\xd1\x68\x4a\x16\x1c\x44\x21\x15\x8f\x4e\x8c\x63\x1a\x14\xbf\x28\xaa\xee\xc6\x3f\x68\x80\x41\xdb\xc2\xed\x7f\x2b\xec\x0d\x1f\xb2\xbe\xe2\x2c\xa6\x74\xa0\x91\x46\xcb\x7b\xde\xbb\xc0\x3b\x59\x6d\x26\x1a\xfc\x9d\x26\x83\x4d\x28\x48\x69\x1f\x4a\x9c\x47\x4f\x31\x12\x6d\x3a\x44\x1f\x0e\x5d\x04\x81\x0d\x46\x98\x4a\x57\xa4\x84\xbf\xb4\x9d\x1d\x25\x9c\xfb\x4a\xdf\x08\x0f\x7c\xd0\xb0\x86\x7e\xeb\xbb\x21\xc0\xf9\x37\x17\xf9\xd6\x40\x55\x8d\xf1\xd1\xc8\xa4\xb9\x9c\xf3\x7f\xe4\x2d\x6a\xfa\x56\xfe\x2d\x67\x2f\x8c\xef\xf5\x04\x43\xda\x21\x05\x13\x7f\xb9\xfc\x4e\xa1\x28\xb0\x02\xd1\x39\x38\x18\x72\xb7\x06\xa3\xed\xfd\xd4\x17\x29\xa4\x6a\xc2\x65\xf0\x96\x04\x05\x77\x26\x6a\xcb\xa8\x76\xc5\x9b\xda\x91\x35\x51\x42\x98\xc5\x1c\x24\x24\x75\xd4\x53\x3f\x89\xe5\xa6\x32\xaf\x73\xa3\x6d\xe9\xbb\x58\xb1\x17\xfb\xbc\x0a\xa9\xdc\xb5\xc1\x83\x0f\xda\x1e\xeb\xf0\xf1\x49\x15\xfe\x8e\xbe\xdf\x59\xfe\xc2\xf3\x51\xf6\x16\x33\xd6\xbd\x9a\x6d\x7f\xdf\xc0\x2b\xb7\xf9\xa2\xeb\x6c\xb4\xcf\x6c\x7d\xc6\x32\x0b\xd9\x58\xcf\xe5\xc5\x7f\xe3\x06\xbf\x5a\xdb\x02\xe3\xd4\x8e\x53\x92\xac\x78\xf8\x6e\xd3\x4d\xf2\x37\x00\x00\xff\xff\x81\x58\x5c\xf6\x2e\x08\x00\x00"),
 		},
 		"/static/vendor": &vfsgen۰DirInfo{
 			name:    "vendor",
@@ -680,9 +680,9 @@ var Assets = func() http.FileSystem {
 		"/templates/targets.html": &vfsgen۰CompressedFileInfo{
 			name:             "targets.html",
 			modTime:          time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC),
-			uncompressedSize: 3278,
+			uncompressedSize: 3271,
 
-			compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x56\xdd\x8f\xdc\x34\x10\x7f\xdf\xbf\x62\x14\x56\x08\xa4\x66\xa3\xf6\xb1\x64\x83\x80\x56\x2a\x52\x41\xa5\xd7\xe3\x81\x97\xca\x89\x67\x37\xbe\xf3\xda\xc1\x9e\x2c\xb7\xe4\xfc\xbf\x23\xdb\xf9\xda\xaf\xa3\xc7\x03\x2f\x51\xc6\x9e\xef\xf9\xcd\x8c\xbb\x8e\xe3\x46\x28\x84\xa4\x46\xc6\x13\xe7\x16\xb9\x14\xea\x1e\xe8\xd0\xe0\x3a\x21\x7c\xa0\xac\xb2\x36\x01\x83\x72\x9d\x58\x3a\x48\xb4\x35\x22\x25\x50\x1b\xdc\xac\x93\xae\x83\x86\x51\xfd\xc1\xe0\x46\x3c\x80\x73\x99\x25\x46\xa2\xf2\x32\x19\x31\xb3\x45\xb2\xab\xca\xda\xef\xf7\xeb\xae\x83\xb2\x15\x92\xff\x8e\xc6\x0a\xad\xc0\xb9\xa4\x58\xe4\xb6\x32\xa2\x21\xb0\xa6\xba\xae\xeb\x6e\x52\x75\x77\x4d\x53\x9e\x45\x4d\xc5\xa2\xeb\x50\x71\xe7\x16\x8b\xc5\x14\x5a\xa5\x15\xa1\x22\x1f\x1d\x40\xce\xc5\x1e\x2a\xc9\xac\x5d\x87\x0b\x26\x14\x9a\x74\x23\x5b\xc1\x93\x62\x01\x00\x90\xd7\x2f\x8b\x4f\xd1\x62\x9e\xd5\x2f\xfb\x43\x2f\x26\xf8\x3a\xb1\xb5\xfe\xab\xbf\x4d\x06\x3d\x25\xa9\x74\x6b\x74\xdb\xc0\xf8\x97\x92\xde\x6e\x25\x26\xc0\x19\xb1\x9e\x58\x27\x65\x4b\xa4\x95\xed\x0d\x01\xe4\x92\x95\x28\x67\x6a\x82\x82\xc6\x88\x1d\x33\x07\x60\x15\x89\x3d\x8e\xcc\x00\xb9\x50\x4d\x4b\x7d\x71\x0c\xe3\x42\x27\xa0\xd8\xce\x57\x6a\xf0\xc8\xbb\xc8\xa4\x4c\xc7\x03\xd6\x92\xae\xf4\xae\x91\x48\xb8\x4e\xf4\x66\x93\x40\x55\x63\x75\x8f\xbc\x80\x1f\xa4\x1c\x1c\xc9\x82\x27\x5f\xe6\xd7\x4e\xa6\x2f\x9f\xef\x55\xab\x6a\x64\x92\xea\xc3\x53\xbe\x15\x70\x3b\xb0\x5d\xf1\x2c\x2b\x8d\xff\xcf\x33\x2e\xf6\xc5\x22\x9c\x76\x9d\x61\x6a\x8b\xb0\xbc\xd3\xe5\x0b\x58\x36\x5a\x4b\x78\xbd\x86\x55\x2c\xd3\x07\xad\xa5\x0d\xa5\xf7\x9c\xcb\x5e\xbb\x67\x50\xed\xee\x5d\x4f\x05\xa1\x89\x89\x34\xb1\xa0\x43\xa2\x1a\xef\x26\x1c\xf4\x79\x21\x56\x4a\x4c\x47\x10\x4d\x55\xad\x5f\x0d\x2c\x77\xba\xfc\xec\x3b\x0b\x4d\xd7\x89\x0d\x48\x82\xd1\x81\x68\xc4\x39\xe0\xde\x79\xd3\x03\x77\x9e\x57\x16\xf2\x76\xa7\xcb\xb4\xeb\x7c\x6c\xce\x0d\x7d\xf7\xd5\xd1\x61\x31\xfc\xc1\x37\x53\x80\xce\x65\x43\x20\xce\x41\xdb\x7c\x9b\x67\x6c\xa6\x3b\x22\xb1\x2f\x5a\x24\x92\x29\xae\x50\x1f\xc0\x87\x86\x29\x8e\x3c\x0d\x81\xc2\x09\x0c\x92\xc2\xb7\x02\x48\xb4\x36\xcf\xa2\x86\xa9\x48\xf5\xab\xf1\x3f\x0a\xcf\x53\x06\x31\x71\x76\xd7\xff\x94\xda\x70\x34\xc8\x87\x73\x32\xa2\x19\xa9\x5a\xef\x67\xa9\xf5\xfa\x7c\x3e\xe7\xf9\xe5\x48\x4c\x48\x3b\xe3\xf1\x5c\x66\x4e\x06\xb1\xe2\xad\xe2\x8d\x16\x8a\xf2\x8c\xea\xf3\xdb\x1b\x62\x84\x97\xaf\xde\x7b\x04\xda\x6b\x77\x96\xe0\xa6\x32\xac\xb9\x22\x1c\xef\xe0\x4d\x6b\x18\x09\xad\x2e\x33\xbd\x35\x46\x9b\xd3\xab\x3c\x9b\x47\xe1\x6f\x91\xf1\x79\x26\x4a\xcd\x0f\x13\x3d\xb6\xc1\x0c\xcb\x57\xb3\x31\x66\x10\xfb\xa4\x24\xc7\x1c\x01\x7e\xc3\x90\x5f\xdd\x7e\x7c\x0f\x8f\xb0\x95\xba\x64\xf2\xf6\xe3\xfb\x88\x39\x7f\xba\xba\xa9\x6a\xdc\xa1\x73\xaf\xb3\xac\x3f\x79\xa7\x2d\x39\xd7\x13\x1f\x18\xd5\xce\x79\xe8\xe5\xa5\x39\xb5\x30\x3a\x1c\x3a\xfc\x05\x2c\xf7\x4c\xb6\x68\x43\xef\x7a\xe1\xdf\x5a\x34\x07\x38\x8a\xe4\x44\x50\x0c\x42\x5e\xa6\x17\xbf\xc0\x0f\x90\xdb\x86\xa9\x71\x9e\x31\xbe\x45\x08\xdf\x09\xcc\x5d\x17\xdd\x70\xce\x47\x1c\x75\x39\x97\xe4\x99\x97\x3c\xf5\xdc\xbb\x10\xf7\xcc\xbf\x9f\xe6\x19\xf1\xab\xd9\xf7\x3b\x0e\xcf\x53\x3f\xf7\x96\x49\x34\x04\xe1\x9b\x76\x1d\xac\xe2\xc8\x82\x47\x88\x6d\x1a\xc9\x4f\xfa\x27\xcf\x0d\xce\x41\x50\xf9\x59\x28\x2e\x2a\x46\xda\x80\x5f\xe2\x69\xdb\x34\x68\x2a\x66\xcf\x6d\x79\xa7\x7b\x9d\x67\xe1\x5c\x0a\xfe\xc9\x70\x42\x06\xed\xd3\xf1\x54\xad\xb1\xda\xa4\x01\x74\x68\x4e\x16\x24\x69\x2d\x49\x34\x09\x90\x20\x4f\xf7\xd7\x35\xed\xe4\x9a\x4c\x8b\x91\xd4\x46\x6c\x85\x62\x32\xed\xb9\xf2\xb2\xf8\x11\x37\xda\xa0\x7f\xa7\x78\x17\x84\xda\xbe\xce\xb3\xb2\x18\x81\x72\xef\x81\x12\x80\xf5\x46\xd8\xca\x8f\x14\xe4\xb1\xab\x57\xbf\xb0\xc6\x39\x8f\xce\xae\x5b\xe2\x3e\x02\x09\x1e\xc1\x9b\xf4\x30\x5e\xde\x3b\xb7\xfe\xfa\xcf\x56\xd3\x77\x81\xc1\xb9\x81\x38\x9d\xd7\x53\x3e\x23\x92\x02\x90\xc3\x30\x8b\x96\x60\x35\x59\x04\x3f\xb9\x92\xa7\xa0\x7d\xd4\x13\xc1\xab\xa8\xf4\x7f\x83\xb7\xb4\xf8\x3c\x5b\x1c\x37\xac\x95\x94\x14\x4a\x2b\x7c\x5e\xdf\xfc\x07\xa0\x59\x4a\x6d\x98\xac\x67\x15\x08\x9b\x76\xe5\xe7\x72\x1c\xbd\xab\x9f\xed\x1f\x68\xb4\x73\xbf\xe2\x3e\xec\xd9\x10\x59\xd7\x59\xa1\x2a\x9c\x33\x3a\x07\x6c\xab\x9f\xdf\xc3\x41\x38\xe5\xfd\x80\xbf\xe0\x4f\xdd\xee\x98\x12\x7f\xe3\xb0\x03\xe6\x46\x87\xb3\xd5\x0d\x56\x5a\xf1\xd3\xfa\x3e\x69\x19\xfd\xd6\x38\x6f\xb7\x29\x01\x61\xad\x9c\x67\xfb\xda\x78\x89\x2f\x91\xd3\x09\x12\x66\xfd\x4c\xdb\xe5\xd2\x7e\x49\xda\x8e\x17\xda\xa9\x44\x9e\x1d\x2d\xb4\x3c\x0b\x0f\x80\xfe\xf9\x1d\x9f\x7b\x51\x0c\x50\xf1\xb8\x16\xfa\xe3\x41\xd3\x3f\x01\x00\x00\xff\xff\x20\xc8\x3a\x47\xce\x0c\x00\x00"),
+			compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x56\xdd\x8f\xdc\x34\x10\x7f\xdf\xbf\x62\x14\x56\x08\xa4\x66\xa3\xf6\xb1\x64\x83\x80\x56\x2a\x52\x41\xa5\xd7\xe3\x81\x97\xca\x89\x67\x37\xbe\xf3\xda\xc1\x9e\x2c\xb7\xe4\xfc\xbf\x23\xdb\xf9\xda\xaf\xa3\xc7\x03\x2f\x51\xc6\x9e\xef\xf9\xcd\x8c\xbb\x8e\xe3\x46\x28\x84\xa4\x46\xc6\x13\xe7\x16\xb9\x14\xea\x1e\xe8\xd0\xe0\x3a\x21\x7c\xa0\xac\xb2\x36\x01\x83\x72\x9d\x58\x3a\x48\xb4\x35\x22\x25\x50\x1b\xdc\xac\x93\xae\x83\x86\x51\xfd\xc1\xe0\x46\x3c\x80\x73\x99\x25\x46\xa2\xf2\x32\x19\x31\xb3\x45\xb2\xab\xca\xda\xef\xf7\xeb\xae\x83\xb2\x15\x92\xff\x8e\xc6\x0a\xad\xc0\xb9\xa4\x58\xe4\xb6\x32\xa2\x21\xb0\xa6\xba\xae\xeb\x6e\x52\x75\x77\x4d\x53\x9e\x45\x4d\xc5\xa2\xeb\x50\x71\xe7\x16\x8b\xc5\x14\x5a\xa5\x15\xa1\x22\x1f\x1d\x40\xce\xc5\x1e\x2a\xc9\xac\x5d\x87\x0b\x26\x14\x9a\x74\x23\x5b\xc1\x93\x62\x01\x00\x90\xd7\x2f\x8b\x4f\xd1\x62\x9e\xd5\x2f\xfb\x43\x2f\x26\xf8\x3a\xb1\xb5\xfe\xab\xbf\x4d\x06\x3d\x25\xa9\x74\x6b\x74\xdb\xc0\xf8\x97\x92\xde\x6e\x25\x26\xc0\x19\xb1\x9e\x58\x27\x65\x4b\xa4\x95\xed\x0d\x01\xe4\x92\x95\x28\x67\x6a\x82\x82\xc6\x88\x1d\x33\x87\x91\x0b\x20\x17\xaa\x69\xa9\xaf\x8a\x61\x5c\xe8\x04\x14\xdb\xf9\x12\x0d\xae\x78\xdf\x98\x94\xe9\x78\xc0\x5a\xd2\x95\xde\x35\x12\x09\xd7\x89\xde\x6c\x12\xa8\x6a\xac\xee\x91\x17\xf0\x83\x94\x83\x07\x59\x70\xe1\x8b\x1c\x82\x9d\x4c\x5f\x3e\xdf\xab\x56\xd5\xc8\x24\xd5\x87\xa7\x7c\x2b\xe0\x76\x60\xbb\xe2\x59\x56\x1a\xff\x9f\x67\x5c\xec\x8b\x45\x38\xed\x3a\xc3\xd4\x16\x61\x79\xa7\xcb\x17\xb0\x6c\xb4\x96\xf0\x7a\x0d\xab\x58\x9f\x0f\x5a\x4b\x1b\x6a\xee\x39\x97\xbd\x76\xcf\xa0\xda\xdd\xbb\x9e\x0a\x42\x13\x13\x69\x62\x41\x87\x44\x35\xde\x4d\x00\xe8\xf3\x42\xac\x94\x98\x8e\xe8\x99\xca\x59\xbf\x1a\x58\xee\x74\xf9\xd9\xb7\x14\x9a\xae\x13\x1b\x90\x04\xa3\x03\xd1\x88\x73\xc0\xbd\xf3\xa6\x47\xec\x3c\xaf\x2c\xe4\xed\x4e\x97\x69\xd7\xf9\xd8\x9c\x1b\x1a\xee\xab\xa3\xc3\x62\xf8\x83\x6f\xa6\x00\x9d\xcb\x86\x40\x9c\x83\xb6\xf9\x36\xcf\xd8\x4c\x77\x84\x60\x5f\xb4\x48\x24\x53\x5c\xa1\x3e\x80\x0f\x0d\x53\x1c\x79\x1a\x02\x85\x33\x5c\xfa\x1e\x00\x89\xd6\xe6\x59\xd4\x30\x15\xa9\x7e\x35\xfe\x47\xe1\x79\xca\x20\x26\xce\xee\xfa\x9f\x52\x1b\x8e\x06\xf9\x70\x4e\x46\x34\x23\x55\xeb\xfd\x2c\xb5\x5e\x9f\xcf\xe7\x3c\xbf\x1c\x89\x09\x69\x67\x3c\x9e\xcb\xcc\xc9\x20\x56\xbc\x55\xbc\xd1\x42\x51\x9e\x51\x7d\x7e\x7b\x43\x8c\xf0\xf2\xd5\x7b\x8f\x40\x7b\xed\xce\x12\xdc\x54\x86\x35\x57\x84\xe3\x1d\xbc\x69\x0d\x23\xa1\xd5\x65\xa6\xb7\xc6\x68\x73\x7a\x95\x67\xf3\x28\xfc\x2d\x32\x3e\xcf\x44\xa9\xf9\x61\xa2\xc7\x36\x98\x61\xf9\x6a\x36\xc6\x0c\x62\x9f\x94\xe4\x98\x23\xc0\x6f\x98\xee\xab\xdb\x8f\xef\xe1\x11\xb6\x52\x97\x4c\xde\x7e\x7c\x1f\x31\xe7\x4f\x57\x37\x55\x8d\x3b\x74\xee\x75\x96\xf5\x27\xef\xb4\x25\xe7\x7a\xe2\x03\xa3\xda\x39\x0f\xbd\xbc\x34\xa7\x16\x46\x87\x43\x87\xbf\x80\xe5\x9e\xc9\x16\x6d\xe8\x5d\x2f\xfc\x5b\x8b\xe6\x00\x47\x91\x9c\x08\x8a\x41\xc8\xcb\xf4\xe2\x17\xf8\x01\x72\xdb\x30\x35\xce\x33\xc6\xb7\x08\xe1\x3b\x81\xb9\xeb\xa2\x1b\xce\xf9\x88\xa3\x2e\xe7\x92\x3c\xf3\x92\xa7\x9e\x7b\x17\xe2\x82\xf9\xf7\xd3\x3c\x23\x7e\x35\xfb\x7e\xb9\xe1\x79\xea\xe7\xde\x32\x89\x86\x20\x7c\xd3\xae\x83\x55\x1c\x59\xf0\x08\xb1\x4d\x23\xf9\x49\xff\xe4\xb9\xc1\x39\x08\x2a\x3f\x0b\xc5\x45\xc5\x48\x1b\xf0\xdb\x3b\x6d\x9b\x06\x4d\xc5\xec\xb9\x2d\xef\x74\xaf\xf3\x2c\x9c\x4b\xc1\x3f\x19\x4e\xc8\xa0\x7d\x3a\x9e\xaa\x35\x56\x9b\x34\x80\x0e\xcd\xc9\x66\x24\xad\x25\x89\x26\x01\x12\xe4\xe9\xfe\xba\xa6\x9d\x5c\x93\x69\x31\x92\xda\x88\xad\x50\x4c\xa6\x3d\x57\x5e\x16\x3f\xe2\x46\x1b\xf4\x0f\x14\xef\x82\x50\xdb\xd7\x79\x56\x16\x23\x50\xee\x3d\x50\x02\xb0\xde\x08\x5b\xf9\x91\x82\x3c\x76\xf5\xea\x17\xd6\x38\xe7\xd1\xd9\x75\x4b\xdc\x47\x20\xc1\x23\x78\x93\x1e\xc6\xcb\x7b\xe7\xd6\x5f\xff\xd9\x6a\xfa\x2e\x30\x38\x37\x10\xa7\xf3\x7a\xca\x67\x44\x52\x00\x72\x18\x66\xd1\x12\xac\x26\x8b\xe0\x27\x57\xf2\x14\xb4\x8f\x7a\x22\x78\x15\x95\xfe\x6f\xf0\x96\x16\x9f\x67\x8b\xe3\x86\xb5\x92\x92\x42\x69\x85\xcf\xeb\x9b\xff\x00\x34\x4b\xa9\x0d\x93\xf5\xac\x02\x61\xd3\xae\xfc\x5c\x8e\xa3\x77\xf5\xb3\xfd\x03\x8d\x76\xee\x57\xdc\x87\x3d\x1b\x22\xeb\x3a\x2b\x54\x85\x73\x46\xe7\x80\x6d\xf5\xf3\x7b\x38\x08\xa7\xbc\x1f\xf0\x17\xfc\xa9\xdb\x1d\x53\xe2\x6f\x1c\x76\xc0\xdc\xe8\x70\xb6\xba\xc1\x4a\x2b\x7e\x5a\xdf\x27\x2d\xa3\xdf\x1a\xe7\xed\x36\x25\x20\xac\x95\xf3\x6c\x5f\x1b\x2f\xf1\x25\x72\x3a\x41\xc2\xac\x9f\x69\xbb\x5c\xda\x2f\x49\xdb\xf1\x42\x3b\x95\xc8\xb3\xa3\x85\x96\x67\xe1\x01\xd0\xbf\xbb\xe3\x73\x2f\x8a\x01\x2a\x1e\xd7\x42\x7f\x3c\x68\xfa\x27\x00\x00\xff\xff\xfc\x46\x31\xa6\xc7\x0c\x00\x00"),
 		},
 	}
 	fs["/"].(*vfsgen۰DirInfo).entries = []os.FileInfo{
diff --git a/web/ui/static/js/graph/index.js b/web/ui/static/js/graph/index.js
index c81f3975f3..4ce5c17747 100644
--- a/web/ui/static/js/graph/index.js
+++ b/web/ui/static/js/graph/index.js
@@ -62,6 +62,12 @@ Prometheus.Graph.prototype.initialize = function() {
   var graphWrapper = self.el.find("#graph_wrapper" + self.id);
   self.queryForm = graphWrapper.find(".query_form");
 
+  // Auto-resize the text area on input or mouseclick
+  var resizeTextarea = function(el) {
+    var offset = el.offsetHeight - el.clientHeight;
+    $(el).css('height', 'auto').css('height', el.scrollHeight + offset);
+  };
+
   self.expr = graphWrapper.find("textarea[name=expr]");
   self.expr.keypress(function(e) {
     const enter = 13;
@@ -69,14 +75,13 @@ Prometheus.Graph.prototype.initialize = function() {
       self.queryForm.submit();
       e.preventDefault();
     }
-
-    // Auto-resize the text area on input.
-    var offset = this.offsetHeight - this.clientHeight;
-    var resizeTextarea = function(el) {
-        $(el).css('height', 'auto').css('height', el.scrollHeight + offset);
-    };
     $(this).on('keyup input', function() { resizeTextarea(this); });
   });
+
+  self.expr.click(function(e) {
+    resizeTextarea(this);
+  });
+
   self.expr.change(self.handleChange);
 
   self.rangeInput = self.queryForm.find("input[name=range_input]");
diff --git a/web/ui/static/js/targets.js b/web/ui/static/js/targets.js
index b6cfe2660f..c92139d8be 100644
--- a/web/ui/static/js/targets.js
+++ b/web/ui/static/js/targets.js
@@ -21,6 +21,14 @@ function showUnhealthy(_, container) {
 }
 
 function init() {
+  if (!localStorage.selectedTab || localStorage.selectedTab == "all-targets"){
+    $("#all-targets").parent().addClass("active");
+    $(".table-container").each(showAll);
+  } else if (localStorage.selectedTab == "unhealthy-targets") {
+    $("#unhealthy-targets").parent().addClass("active");
+    $(".table-container").each(showUnhealthy);
+  }
+
   $("button.targets").click(function () {
     const tableTitle = $(this).closest("h2").find("a").attr("id");
 
@@ -45,8 +53,10 @@ function init() {
 
     if (target === "all-targets") {
       $(".table-container").each(showAll);
+      localStorage.setItem("selectedTab", "all-targets");
     } else if (target === "unhealthy-targets") {
       $(".table-container").each(showUnhealthy);
+      localStorage.setItem("selectedTab", "unhealthy-targets");
     }
   });
 }
diff --git a/web/ui/templates/targets.html b/web/ui/templates/targets.html
index 2b3987ac3c..2a19004d52 100644
--- a/web/ui/templates/targets.html
+++ b/web/ui/templates/targets.html
@@ -8,7 +8,7 @@
   <div class="container-fluid">
     <h1>Targets</h1>
     <div id="showTargets" class="btn-group btn-group-toggle" data-toggle="buttons">
-      <label class="btn btn-primary active">
+      <label class="btn btn-primary">
         <input type="radio" name="targets" id="all-targets" autocomplete="off" checked> All
       </label>
       <label class="btn btn-primary ml-1">
diff --git a/web/web_test.go b/web/web_test.go
index 4c8ec63ecc..06115aaa04 100644
--- a/web/web_test.go
+++ b/web/web_test.go
@@ -25,6 +25,10 @@ import (
 	"testing"
 	"time"
 
+	"github.com/prometheus/prometheus/config"
+	"github.com/prometheus/prometheus/notifier"
+	"github.com/prometheus/prometheus/rules"
+	"github.com/prometheus/prometheus/scrape"
 	"github.com/prometheus/prometheus/storage/tsdb"
 	"github.com/prometheus/prometheus/util/testutil"
 	libtsdb "github.com/prometheus/tsdb"
@@ -101,8 +105,8 @@ func TestReadyAndHealthy(t *testing.T) {
 		Context:        nil,
 		Storage:        &tsdb.ReadyStorage{},
 		QueryEngine:    nil,
-		ScrapeManager:  nil,
-		RuleManager:    nil,
+		ScrapeManager:  &scrape.Manager{},
+		RuleManager:    &rules.Manager{},
 		Notifier:       nil,
 		RoutePrefix:    "/",
 		EnableAdminAPI: true,
@@ -118,6 +122,10 @@ func TestReadyAndHealthy(t *testing.T) {
 	opts.Flags = map[string]string{}
 
 	webHandler := New(nil, opts)
+
+	webHandler.config = &config.Config{}
+	webHandler.notifier = &notifier.Manager{}
+
 	go func() {
 		err := webHandler.Run(context.Background())
 		if err != nil {
@@ -159,6 +167,46 @@ func TestReadyAndHealthy(t *testing.T) {
 	testutil.Ok(t, err)
 	testutil.Equals(t, http.StatusServiceUnavailable, resp.StatusCode)
 
+	resp, err = http.Get("http://localhost:9090/graph")
+
+	testutil.Ok(t, err)
+	testutil.Equals(t, http.StatusServiceUnavailable, resp.StatusCode)
+
+	resp, err = http.Get("http://localhost:9090/alerts")
+
+	testutil.Ok(t, err)
+	testutil.Equals(t, http.StatusServiceUnavailable, resp.StatusCode)
+
+	resp, err = http.Get("http://localhost:9090/flags")
+
+	testutil.Ok(t, err)
+	testutil.Equals(t, http.StatusServiceUnavailable, resp.StatusCode)
+
+	resp, err = http.Get("http://localhost:9090/rules")
+
+	testutil.Ok(t, err)
+	testutil.Equals(t, http.StatusServiceUnavailable, resp.StatusCode)
+
+	resp, err = http.Get("http://localhost:9090/service-discovery")
+
+	testutil.Ok(t, err)
+	testutil.Equals(t, http.StatusServiceUnavailable, resp.StatusCode)
+
+	resp, err = http.Get("http://localhost:9090/targets")
+
+	testutil.Ok(t, err)
+	testutil.Equals(t, http.StatusServiceUnavailable, resp.StatusCode)
+
+	resp, err = http.Get("http://localhost:9090/config")
+
+	testutil.Ok(t, err)
+	testutil.Equals(t, http.StatusServiceUnavailable, resp.StatusCode)
+
+	resp, err = http.Get("http://localhost:9090/status")
+
+	testutil.Ok(t, err)
+	testutil.Equals(t, http.StatusServiceUnavailable, resp.StatusCode)
+
 	// Set to ready.
 	webHandler.Ready()
 
@@ -191,6 +239,41 @@ func TestReadyAndHealthy(t *testing.T) {
 
 	testutil.Ok(t, err)
 	testutil.Equals(t, http.StatusOK, resp.StatusCode)
+
+	resp, err = http.Get("http://localhost:9090/alerts")
+
+	testutil.Ok(t, err)
+	testutil.Equals(t, http.StatusOK, resp.StatusCode)
+
+	resp, err = http.Get("http://localhost:9090/flags")
+
+	testutil.Ok(t, err)
+	testutil.Equals(t, http.StatusOK, resp.StatusCode)
+
+	resp, err = http.Get("http://localhost:9090/rules")
+
+	testutil.Ok(t, err)
+	testutil.Equals(t, http.StatusOK, resp.StatusCode)
+
+	resp, err = http.Get("http://localhost:9090/service-discovery")
+
+	testutil.Ok(t, err)
+	testutil.Equals(t, http.StatusOK, resp.StatusCode)
+
+	resp, err = http.Get("http://localhost:9090/targets")
+
+	testutil.Ok(t, err)
+	testutil.Equals(t, http.StatusOK, resp.StatusCode)
+
+	resp, err = http.Get("http://localhost:9090/config")
+
+	testutil.Ok(t, err)
+	testutil.Equals(t, http.StatusOK, resp.StatusCode)
+
+	resp, err = http.Get("http://localhost:9090/status")
+
+	testutil.Ok(t, err)
+	testutil.Equals(t, http.StatusOK, resp.StatusCode)
 }
 
 func TestRoutePrefix(t *testing.T) {