Explorar o código

node/pkg/telemetry: prometheus remote write

Paul Noel %!s(int64=2) %!d(string=hai) anos
pai
achega
dee0d1532b

+ 21 - 0
node/cmd/ccq/query_server.go

@@ -14,6 +14,7 @@ import (
 
 	"github.com/certusone/wormhole/node/pkg/common"
 	"github.com/certusone/wormhole/node/pkg/telemetry"
+	promremotew "github.com/certusone/wormhole/node/pkg/telemetry/prom_remote_write"
 	"github.com/certusone/wormhole/node/pkg/version"
 	ethCrypto "github.com/ethereum/go-ethereum/crypto"
 	ipfslog "github.com/ipfs/go-log/v2"
@@ -39,6 +40,7 @@ var (
 	telemetryLokiURL  *string
 	telemetryNodeName *string
 	statusAddr        *string
+	promRemoteURL     *string
 )
 
 const DEV_NETWORK_ID = "/wormhole/dev"
@@ -58,6 +60,7 @@ func init() {
 	telemetryLokiURL = QueryServerCmd.Flags().String("telemetryLokiURL", "", "Loki cloud logging URL")
 	telemetryNodeName = QueryServerCmd.Flags().String("telemetryNodeName", "", "Node name used in telemetry")
 	statusAddr = QueryServerCmd.Flags().String("statusAddr", "[::]:6060", "Listen address for status server (disabled if blank)")
+	promRemoteURL = QueryServerCmd.Flags().String("promRemoteURL", "", "Prometheus remote write URL (Grafana)")
 }
 
 var QueryServerCmd = &cobra.Command{
@@ -183,6 +186,24 @@ func runQueryServer(cmd *cobra.Command, args []string) {
 		}()
 	}
 
+	// Start the Prometheus scraper
+	usingPromRemoteWrite := *promRemoteURL != ""
+	if usingPromRemoteWrite {
+		var info promremotew.PromTelemetryInfo
+		info.PromRemoteURL = *promRemoteURL
+		info.Labels = map[string]string{
+			"node_name": *telemetryNodeName,
+			"network":   *p2pNetworkID,
+			"version":   version.Version(),
+			"product":   "ccq_server",
+		}
+
+		err := RunPrometheusScraper(ctx, logger, info)
+		if err != nil {
+			logger.Fatal("Failed to start prometheus scraper", zap.Error(err))
+		}
+	}
+
 	// Handle SIGTERM
 	sigterm := make(chan os.Signal, 1)
 	signal.Notify(sigterm, syscall.SIGTERM)

+ 24 - 0
node/cmd/ccq/status.go

@@ -1,11 +1,13 @@
 package ccq
 
 import (
+	"context"
 	"fmt"
 	"net/http"
 	"time"
 
 	"github.com/certusone/wormhole/node/pkg/common"
+	promremotew "github.com/certusone/wormhole/node/pkg/telemetry/prom_remote_write"
 	"github.com/gorilla/mux"
 	"github.com/prometheus/client_golang/prometheus/promhttp"
 	"go.uber.org/zap"
@@ -36,3 +38,25 @@ func (s *statusServer) handleHealth(w http.ResponseWriter, r *http.Request) {
 	w.WriteHeader(http.StatusOK)
 	fmt.Fprintf(w, "ok")
 }
+
+func RunPrometheusScraper(ctx context.Context, logger *zap.Logger, info promremotew.PromTelemetryInfo) error {
+	promLogger := logger.With(zap.String("component", "prometheus_scraper"))
+	errC := make(chan error)
+	common.StartRunnable(ctx, errC, false, "prometheus_scraper", func(ctx context.Context) error {
+		t := time.NewTicker(15 * time.Second)
+
+		for {
+			select {
+			case <-ctx.Done():
+				return nil
+			case <-t.C:
+				err := promremotew.ScrapeAndSendLocalMetrics(ctx, info, promLogger)
+				if err != nil {
+					promLogger.Error("ScrapeAndSendLocalMetrics error", zap.Error(err))
+					return err
+				}
+			}
+		}
+	})
+	return nil
+}

+ 38 - 0
node/cmd/guardiand/node.go

@@ -39,6 +39,7 @@ import (
 	"github.com/certusone/wormhole/node/pkg/node"
 	"github.com/certusone/wormhole/node/pkg/p2p"
 	"github.com/certusone/wormhole/node/pkg/supervisor"
+	promremotew "github.com/certusone/wormhole/node/pkg/telemetry/prom_remote_write"
 	libp2p_crypto "github.com/libp2p/go-libp2p/core/crypto"
 	"github.com/libp2p/go-libp2p/core/peer"
 	"github.com/spf13/cobra"
@@ -198,6 +199,9 @@ var (
 	// Loki cloud logging parameters
 	telemetryLokiURL *string
 
+	// Prometheus remote write URL
+	promRemoteURL *string
+
 	chainGovernorEnabled *bool
 
 	ccqEnabled           *bool
@@ -364,6 +368,8 @@ func init() {
 
 	telemetryLokiURL = NodeCmd.Flags().String("telemetryLokiURL", "", "Loki cloud logging URL")
 
+	promRemoteURL = NodeCmd.Flags().String("promRemoteURL", "", "Prometheus remote write URL (Grafana)")
+
 	chainGovernorEnabled = NodeCmd.Flags().Bool("chainGovernorEnabled", false, "Run the chain governor")
 
 	ccqEnabled = NodeCmd.Flags().Bool("ccqEnabled", false, "Enable cross chain query support")
@@ -1033,6 +1039,38 @@ func runNode(cmd *cobra.Command, args []string) {
 		if err != nil {
 			logger.Fatal("failed to connect to wormchain", zap.Error(err), zap.String("component", "gwrelayer"))
 		}
+
+	}
+	usingPromRemoteWrite := *promRemoteURL != ""
+	if usingPromRemoteWrite {
+		var info promremotew.PromTelemetryInfo
+		info.PromRemoteURL = *promRemoteURL
+		info.Labels = map[string]string{
+			"node_name":     *nodeName,
+			"guardian_addr": ethcrypto.PubkeyToAddress(gk.PublicKey).String(),
+			"network":       *p2pNetworkID,
+			"version":       version.Version(),
+			"product":       "wormhole",
+		}
+
+		promLogger := logger.With(zap.String("component", "prometheus_scraper"))
+		errC := make(chan error)
+		common.StartRunnable(rootCtx, errC, false, "prometheus_scraper", func(ctx context.Context) error {
+			t := time.NewTicker(15 * time.Second)
+
+			for {
+				select {
+				case <-ctx.Done():
+					return nil
+				case <-t.C:
+					err := promremotew.ScrapeAndSendLocalMetrics(ctx, info, promLogger)
+					if err != nil {
+						promLogger.Error("ScrapeAndSendLocalMetrics error", zap.Error(err))
+						continue
+					}
+				}
+			}
+		})
 	}
 
 	var watcherConfigs = []watchers.WatcherConfig{}

+ 1 - 0
node/go.sum

@@ -2725,6 +2725,7 @@ github.com/securego/gosec/v2 v2.12.0/go.mod h1:iTpT+eKTw59bSgklBHlSnH5O2tNygHMDx
 github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY=
 github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
 github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
+github.com/sercand/kuberesolver v2.4.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ=
 github.com/sercand/kuberesolver/v4 v4.0.0 h1:frL7laPDG/lFm5n98ODmWnn+cvPpzlkf3LhzuPhcHP4=
 github.com/sercand/kuberesolver/v4 v4.0.0/go.mod h1:F4RGyuRmMAjeXHKL+w4P7AwUnPceEAPAhxUgXZjKgvM=
 github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=

+ 657 - 0
node/pkg/proto/prometheus/v1/remote.pb.go

@@ -0,0 +1,657 @@
+// Copyright 2016 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// NOTICE: THIS FILE HAS BEEN MODIFIED FROM THE ORIGINAL
+// Changes were made to use go protobuf instead of gogo protobuf.
+// Original code is here: https://github.com/prometheus/prometheus/blob/e4ec263bcc11493953c75d1b2e7bc78fd0463e05/prompb/remote.proto
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.27.1
+// 	protoc        (unknown)
+// source: prometheus/v1/remote.proto
+
+package prometheusv1
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sync "sync"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ReadRequest_ResponseType int32
+
+const (
+	// Server will return a single ReadResponse message with matched series that includes list of raw samples.
+	// It's recommended to use streamed response types instead.
+	//
+	// Response headers:
+	// Content-Type: "application/x-protobuf"
+	// Content-Encoding: "snappy"
+	ReadRequest_RESPONSE_TYPE_SAMPLES_UNSPECIFIED ReadRequest_ResponseType = 0
+	// Server will stream a delimited ChunkedReadResponse message that
+	// contains XOR or HISTOGRAM(!) encoded chunks for a single series.
+	// Each message is following varint size and fixed size bigendian
+	// uint32 for CRC32 Castagnoli checksum.
+	//
+	// Response headers:
+	// Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse"
+	// Content-Encoding: ""
+	ReadRequest_RESPONSE_TYPE_STREAMED_XOR_CHUNKS ReadRequest_ResponseType = 1
+)
+
+// Enum value maps for ReadRequest_ResponseType.
+var (
+	ReadRequest_ResponseType_name = map[int32]string{
+		0: "RESPONSE_TYPE_SAMPLES_UNSPECIFIED",
+		1: "RESPONSE_TYPE_STREAMED_XOR_CHUNKS",
+	}
+	ReadRequest_ResponseType_value = map[string]int32{
+		"RESPONSE_TYPE_SAMPLES_UNSPECIFIED": 0,
+		"RESPONSE_TYPE_STREAMED_XOR_CHUNKS": 1,
+	}
+)
+
+func (x ReadRequest_ResponseType) Enum() *ReadRequest_ResponseType {
+	p := new(ReadRequest_ResponseType)
+	*p = x
+	return p
+}
+
+func (x ReadRequest_ResponseType) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ReadRequest_ResponseType) Descriptor() protoreflect.EnumDescriptor {
+	return file_prometheus_v1_remote_proto_enumTypes[0].Descriptor()
+}
+
+func (ReadRequest_ResponseType) Type() protoreflect.EnumType {
+	return &file_prometheus_v1_remote_proto_enumTypes[0]
+}
+
+func (x ReadRequest_ResponseType) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ReadRequest_ResponseType.Descriptor instead.
+func (ReadRequest_ResponseType) EnumDescriptor() ([]byte, []int) {
+	return file_prometheus_v1_remote_proto_rawDescGZIP(), []int{1, 0}
+}
+
+type WriteRequest struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Timeseries []*TimeSeries     `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries,omitempty"`
+	Metadata   []*MetricMetadata `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty"`
+}
+
+func (x *WriteRequest) Reset() {
+	*x = WriteRequest{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_prometheus_v1_remote_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *WriteRequest) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*WriteRequest) ProtoMessage() {}
+
+func (x *WriteRequest) ProtoReflect() protoreflect.Message {
+	mi := &file_prometheus_v1_remote_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use WriteRequest.ProtoReflect.Descriptor instead.
+func (*WriteRequest) Descriptor() ([]byte, []int) {
+	return file_prometheus_v1_remote_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *WriteRequest) GetTimeseries() []*TimeSeries {
+	if x != nil {
+		return x.Timeseries
+	}
+	return nil
+}
+
+func (x *WriteRequest) GetMetadata() []*MetricMetadata {
+	if x != nil {
+		return x.Metadata
+	}
+	return nil
+}
+
+// ReadRequest represents a remote read request.
+type ReadRequest struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Queries []*Query `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"`
+	// accepted_response_types allows negotiating the content type of the response.
+	//
+	// Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
+	// implemented by server, error is returned.
+	// For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
+	AcceptedResponseTypes []ReadRequest_ResponseType `protobuf:"varint,2,rep,packed,name=accepted_response_types,json=acceptedResponseTypes,proto3,enum=prometheus.v1.ReadRequest_ResponseType" json:"accepted_response_types,omitempty"`
+}
+
+func (x *ReadRequest) Reset() {
+	*x = ReadRequest{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_prometheus_v1_remote_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ReadRequest) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ReadRequest) ProtoMessage() {}
+
+func (x *ReadRequest) ProtoReflect() protoreflect.Message {
+	mi := &file_prometheus_v1_remote_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ReadRequest.ProtoReflect.Descriptor instead.
+func (*ReadRequest) Descriptor() ([]byte, []int) {
+	return file_prometheus_v1_remote_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ReadRequest) GetQueries() []*Query {
+	if x != nil {
+		return x.Queries
+	}
+	return nil
+}
+
+func (x *ReadRequest) GetAcceptedResponseTypes() []ReadRequest_ResponseType {
+	if x != nil {
+		return x.AcceptedResponseTypes
+	}
+	return nil
+}
+
+// ReadResponse is a response when response_type equals SAMPLES.
+type ReadResponse struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// In same order as the request's queries.
+	Results []*QueryResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"`
+}
+
+func (x *ReadResponse) Reset() {
+	*x = ReadResponse{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_prometheus_v1_remote_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ReadResponse) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ReadResponse) ProtoMessage() {}
+
+func (x *ReadResponse) ProtoReflect() protoreflect.Message {
+	mi := &file_prometheus_v1_remote_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ReadResponse.ProtoReflect.Descriptor instead.
+func (*ReadResponse) Descriptor() ([]byte, []int) {
+	return file_prometheus_v1_remote_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ReadResponse) GetResults() []*QueryResult {
+	if x != nil {
+		return x.Results
+	}
+	return nil
+}
+
+type Query struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	StartTimestampMs int64           `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"`
+	EndTimestampMs   int64           `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"`
+	Matchers         []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers,proto3" json:"matchers,omitempty"`
+	Hints            *ReadHints      `protobuf:"bytes,4,opt,name=hints,proto3" json:"hints,omitempty"`
+}
+
+func (x *Query) Reset() {
+	*x = Query{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_prometheus_v1_remote_proto_msgTypes[3]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Query) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Query) ProtoMessage() {}
+
+func (x *Query) ProtoReflect() protoreflect.Message {
+	mi := &file_prometheus_v1_remote_proto_msgTypes[3]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Query.ProtoReflect.Descriptor instead.
+func (*Query) Descriptor() ([]byte, []int) {
+	return file_prometheus_v1_remote_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Query) GetStartTimestampMs() int64 {
+	if x != nil {
+		return x.StartTimestampMs
+	}
+	return 0
+}
+
+func (x *Query) GetEndTimestampMs() int64 {
+	if x != nil {
+		return x.EndTimestampMs
+	}
+	return 0
+}
+
+func (x *Query) GetMatchers() []*LabelMatcher {
+	if x != nil {
+		return x.Matchers
+	}
+	return nil
+}
+
+func (x *Query) GetHints() *ReadHints {
+	if x != nil {
+		return x.Hints
+	}
+	return nil
+}
+
+type QueryResult struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Samples within a time series must be ordered by time.
+	Timeseries []*TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries,omitempty"`
+}
+
+func (x *QueryResult) Reset() {
+	*x = QueryResult{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_prometheus_v1_remote_proto_msgTypes[4]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *QueryResult) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QueryResult) ProtoMessage() {}
+
+func (x *QueryResult) ProtoReflect() protoreflect.Message {
+	mi := &file_prometheus_v1_remote_proto_msgTypes[4]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use QueryResult.ProtoReflect.Descriptor instead.
+func (*QueryResult) Descriptor() ([]byte, []int) {
+	return file_prometheus_v1_remote_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *QueryResult) GetTimeseries() []*TimeSeries {
+	if x != nil {
+		return x.Timeseries
+	}
+	return nil
+}
+
+// ChunkedReadResponse is a response when response_type equals STREAMED_XOR_CHUNKS.
+// We strictly stream full series after series, optionally split by time. This means that a single frame can contain
+// partition of the single series, but once a new series is started to be streamed it means that no more chunks will
+// be sent for previous one. Series are returned sorted in the same way TSDB block are internally.
+type ChunkedReadResponse struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	ChunkedSeries []*ChunkedSeries `protobuf:"bytes,1,rep,name=chunked_series,json=chunkedSeries,proto3" json:"chunked_series,omitempty"`
+	// query_index represents an index of the query from ReadRequest.queries these chunks relates to.
+	QueryIndex int64 `protobuf:"varint,2,opt,name=query_index,json=queryIndex,proto3" json:"query_index,omitempty"`
+}
+
+func (x *ChunkedReadResponse) Reset() {
+	*x = ChunkedReadResponse{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_prometheus_v1_remote_proto_msgTypes[5]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ChunkedReadResponse) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ChunkedReadResponse) ProtoMessage() {}
+
+func (x *ChunkedReadResponse) ProtoReflect() protoreflect.Message {
+	mi := &file_prometheus_v1_remote_proto_msgTypes[5]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ChunkedReadResponse.ProtoReflect.Descriptor instead.
+func (*ChunkedReadResponse) Descriptor() ([]byte, []int) {
+	return file_prometheus_v1_remote_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *ChunkedReadResponse) GetChunkedSeries() []*ChunkedSeries {
+	if x != nil {
+		return x.ChunkedSeries
+	}
+	return nil
+}
+
+func (x *ChunkedReadResponse) GetQueryIndex() int64 {
+	if x != nil {
+		return x.QueryIndex
+	}
+	return 0
+}
+
+var File_prometheus_v1_remote_proto protoreflect.FileDescriptor
+
+var file_prometheus_v1_remote_proto_rawDesc = []byte{
+	0x0a, 0x1a, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, 0x76, 0x31, 0x2f,
+	0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x70, 0x72,
+	0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x19, 0x70, 0x72, 0x6f,
+	0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8a, 0x01, 0x0a, 0x0c, 0x57, 0x72, 0x69, 0x74, 0x65,
+	0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73,
+	0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x72,
+	0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, 0x65,
+	0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x65, 0x72, 0x69,
+	0x65, 0x73, 0x12, 0x39, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03,
+	0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75,
+	0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64,
+	0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4a, 0x04, 0x08,
+	0x02, 0x10, 0x03, 0x22, 0xfc, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75,
+	0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01,
+	0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75,
+	0x73, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72,
+	0x69, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x17, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f,
+	0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x02,
+	0x20, 0x03, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75,
+	0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+	0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x15, 0x61,
+	0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54,
+	0x79, 0x70, 0x65, 0x73, 0x22, 0x5c, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+	0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x21, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45,
+	0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x41, 0x4d, 0x50, 0x4c, 0x45, 0x53, 0x5f, 0x55, 0x4e,
+	0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x25, 0x0a, 0x21, 0x52,
+	0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52,
+	0x45, 0x41, 0x4d, 0x45, 0x44, 0x5f, 0x58, 0x4f, 0x52, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x53,
+	0x10, 0x01, 0x22, 0x44, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+	0x73, 0x65, 0x12, 0x34, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20,
+	0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
+	0x2e, 0x76, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52,
+	0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0xc8, 0x01, 0x0a, 0x05, 0x51, 0x75, 0x65,
+	0x72, 0x79, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+	0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10,
+	0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73,
+	0x12, 0x28, 0x0a, 0x10, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
+	0x70, 0x5f, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x54,
+	0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x61,
+	0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70,
+	0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x61, 0x62,
+	0x65, 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68,
+	0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x05, 0x68, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01,
+	0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e,
+	0x76, 0x31, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x48, 0x69, 0x6e, 0x74, 0x73, 0x52, 0x05, 0x68, 0x69,
+	0x6e, 0x74, 0x73, 0x22, 0x48, 0x0a, 0x0b, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75,
+	0x6c, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73,
+	0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+	0x65, 0x75, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65,
+	0x73, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x22, 0x7b, 0x0a,
+	0x13, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70,
+	0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x0e, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x65, 0x64, 0x5f,
+	0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70,
+	0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x68, 0x75,
+	0x6e, 0x6b, 0x65, 0x64, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x0d, 0x63, 0x68, 0x75, 0x6e,
+	0x6b, 0x65, 0x64, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x71, 0x75, 0x65,
+	0x72, 0x79, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a,
+	0x71, 0x75, 0x65, 0x72, 0x79, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x42, 0x49, 0x5a, 0x47, 0x67, 0x69,
+	0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x65, 0x72, 0x74, 0x75, 0x73, 0x6f,
+	0x6e, 0x65, 0x2f, 0x77, 0x6f, 0x72, 0x6d, 0x68, 0x6f, 0x6c, 0x65, 0x2f, 0x6e, 0x6f, 0x64, 0x65,
+	0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65,
+	0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+	0x65, 0x75, 0x73, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_prometheus_v1_remote_proto_rawDescOnce sync.Once
+	file_prometheus_v1_remote_proto_rawDescData = file_prometheus_v1_remote_proto_rawDesc
+)
+
+func file_prometheus_v1_remote_proto_rawDescGZIP() []byte {
+	file_prometheus_v1_remote_proto_rawDescOnce.Do(func() {
+		file_prometheus_v1_remote_proto_rawDescData = protoimpl.X.CompressGZIP(file_prometheus_v1_remote_proto_rawDescData)
+	})
+	return file_prometheus_v1_remote_proto_rawDescData
+}
+
+var file_prometheus_v1_remote_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_prometheus_v1_remote_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
+var file_prometheus_v1_remote_proto_goTypes = []interface{}{
+	(ReadRequest_ResponseType)(0), // 0: prometheus.v1.ReadRequest.ResponseType
+	(*WriteRequest)(nil),          // 1: prometheus.v1.WriteRequest
+	(*ReadRequest)(nil),           // 2: prometheus.v1.ReadRequest
+	(*ReadResponse)(nil),          // 3: prometheus.v1.ReadResponse
+	(*Query)(nil),                 // 4: prometheus.v1.Query
+	(*QueryResult)(nil),           // 5: prometheus.v1.QueryResult
+	(*ChunkedReadResponse)(nil),   // 6: prometheus.v1.ChunkedReadResponse
+	(*TimeSeries)(nil),            // 7: prometheus.v1.TimeSeries
+	(*MetricMetadata)(nil),        // 8: prometheus.v1.MetricMetadata
+	(*LabelMatcher)(nil),          // 9: prometheus.v1.LabelMatcher
+	(*ReadHints)(nil),             // 10: prometheus.v1.ReadHints
+	(*ChunkedSeries)(nil),         // 11: prometheus.v1.ChunkedSeries
+}
+var file_prometheus_v1_remote_proto_depIdxs = []int32{
+	7,  // 0: prometheus.v1.WriteRequest.timeseries:type_name -> prometheus.v1.TimeSeries
+	8,  // 1: prometheus.v1.WriteRequest.metadata:type_name -> prometheus.v1.MetricMetadata
+	4,  // 2: prometheus.v1.ReadRequest.queries:type_name -> prometheus.v1.Query
+	0,  // 3: prometheus.v1.ReadRequest.accepted_response_types:type_name -> prometheus.v1.ReadRequest.ResponseType
+	5,  // 4: prometheus.v1.ReadResponse.results:type_name -> prometheus.v1.QueryResult
+	9,  // 5: prometheus.v1.Query.matchers:type_name -> prometheus.v1.LabelMatcher
+	10, // 6: prometheus.v1.Query.hints:type_name -> prometheus.v1.ReadHints
+	7,  // 7: prometheus.v1.QueryResult.timeseries:type_name -> prometheus.v1.TimeSeries
+	11, // 8: prometheus.v1.ChunkedReadResponse.chunked_series:type_name -> prometheus.v1.ChunkedSeries
+	9,  // [9:9] is the sub-list for method output_type
+	9,  // [9:9] is the sub-list for method input_type
+	9,  // [9:9] is the sub-list for extension type_name
+	9,  // [9:9] is the sub-list for extension extendee
+	0,  // [0:9] is the sub-list for field type_name
+}
+
+func init() { file_prometheus_v1_remote_proto_init() }
+func file_prometheus_v1_remote_proto_init() {
+	if File_prometheus_v1_remote_proto != nil {
+		return
+	}
+	file_prometheus_v1_types_proto_init()
+	if !protoimpl.UnsafeEnabled {
+		file_prometheus_v1_remote_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*WriteRequest); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_prometheus_v1_remote_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ReadRequest); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_prometheus_v1_remote_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ReadResponse); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_prometheus_v1_remote_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Query); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_prometheus_v1_remote_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*QueryResult); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_prometheus_v1_remote_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ChunkedReadResponse); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_prometheus_v1_remote_proto_rawDesc,
+			NumEnums:      1,
+			NumMessages:   6,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_prometheus_v1_remote_proto_goTypes,
+		DependencyIndexes: file_prometheus_v1_remote_proto_depIdxs,
+		EnumInfos:         file_prometheus_v1_remote_proto_enumTypes,
+		MessageInfos:      file_prometheus_v1_remote_proto_msgTypes,
+	}.Build()
+	File_prometheus_v1_remote_proto = out.File
+	file_prometheus_v1_remote_proto_rawDesc = nil
+	file_prometheus_v1_remote_proto_goTypes = nil
+	file_prometheus_v1_remote_proto_depIdxs = nil
+}

+ 1615 - 0
node/pkg/proto/prometheus/v1/types.pb.go

@@ -0,0 +1,1615 @@
+// Copyright 2017 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// NOTICE: THIS FILE HAS BEEN MODIFIED FROM THE ORIGINAL
+// Changes were made to use go protobuf instead of gogo protobuf.
+// Original code is here: https://github.com/prometheus/prometheus/blob/e4ec263bcc11493953c75d1b2e7bc78fd0463e05/prompb/types.proto
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.27.1
+// 	protoc        (unknown)
+// source: prometheus/v1/types.proto
+
+package prometheusv1
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sync "sync"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type MetricMetadata_MetricType int32
+
+const (
+	MetricMetadata_METRIC_TYPE_UNKNOWN_UNSPECIFIED MetricMetadata_MetricType = 0
+	MetricMetadata_METRIC_TYPE_COUNTER             MetricMetadata_MetricType = 1
+	MetricMetadata_METRIC_TYPE_GAUGE               MetricMetadata_MetricType = 2
+	MetricMetadata_METRIC_TYPE_HISTOGRAM           MetricMetadata_MetricType = 3
+	MetricMetadata_METRIC_TYPE_GAUGEHISTOGRAM      MetricMetadata_MetricType = 4
+	MetricMetadata_METRIC_TYPE_SUMMARY             MetricMetadata_MetricType = 5
+	MetricMetadata_METRIC_TYPE_INFO                MetricMetadata_MetricType = 6
+	MetricMetadata_METRIC_TYPE_STATESET            MetricMetadata_MetricType = 7
+)
+
+// Enum value maps for MetricMetadata_MetricType.
+var (
+	MetricMetadata_MetricType_name = map[int32]string{
+		0: "METRIC_TYPE_UNKNOWN_UNSPECIFIED",
+		1: "METRIC_TYPE_COUNTER",
+		2: "METRIC_TYPE_GAUGE",
+		3: "METRIC_TYPE_HISTOGRAM",
+		4: "METRIC_TYPE_GAUGEHISTOGRAM",
+		5: "METRIC_TYPE_SUMMARY",
+		6: "METRIC_TYPE_INFO",
+		7: "METRIC_TYPE_STATESET",
+	}
+	MetricMetadata_MetricType_value = map[string]int32{
+		"METRIC_TYPE_UNKNOWN_UNSPECIFIED": 0,
+		"METRIC_TYPE_COUNTER":             1,
+		"METRIC_TYPE_GAUGE":               2,
+		"METRIC_TYPE_HISTOGRAM":           3,
+		"METRIC_TYPE_GAUGEHISTOGRAM":      4,
+		"METRIC_TYPE_SUMMARY":             5,
+		"METRIC_TYPE_INFO":                6,
+		"METRIC_TYPE_STATESET":            7,
+	}
+)
+
+func (x MetricMetadata_MetricType) Enum() *MetricMetadata_MetricType {
+	p := new(MetricMetadata_MetricType)
+	*p = x
+	return p
+}
+
+func (x MetricMetadata_MetricType) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (MetricMetadata_MetricType) Descriptor() protoreflect.EnumDescriptor {
+	return file_prometheus_v1_types_proto_enumTypes[0].Descriptor()
+}
+
+func (MetricMetadata_MetricType) Type() protoreflect.EnumType {
+	return &file_prometheus_v1_types_proto_enumTypes[0]
+}
+
+func (x MetricMetadata_MetricType) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use MetricMetadata_MetricType.Descriptor instead.
+func (MetricMetadata_MetricType) EnumDescriptor() ([]byte, []int) {
+	return file_prometheus_v1_types_proto_rawDescGZIP(), []int{0, 0}
+}
+
+type Histogram_ResetHint int32
+
+const (
+	Histogram_RESET_HINT_UNKNOWN_UNSPECIFIED Histogram_ResetHint = 0 // Need to test for a counter reset explicitly.
+	Histogram_RESET_HINT_YES                 Histogram_ResetHint = 1 // This is the 1st histogram after a counter reset.
+	Histogram_RESET_HINT_NO                  Histogram_ResetHint = 2 // There was no counter reset between this and the previous Histogram.
+	Histogram_RESET_HINT_GAUGE               Histogram_ResetHint = 3 // This is a gauge histogram where counter resets don't happen.
+)
+
+// Enum value maps for Histogram_ResetHint.
+var (
+	Histogram_ResetHint_name = map[int32]string{
+		0: "RESET_HINT_UNKNOWN_UNSPECIFIED",
+		1: "RESET_HINT_YES",
+		2: "RESET_HINT_NO",
+		3: "RESET_HINT_GAUGE",
+	}
+	Histogram_ResetHint_value = map[string]int32{
+		"RESET_HINT_UNKNOWN_UNSPECIFIED": 0,
+		"RESET_HINT_YES":                 1,
+		"RESET_HINT_NO":                  2,
+		"RESET_HINT_GAUGE":               3,
+	}
+)
+
+func (x Histogram_ResetHint) Enum() *Histogram_ResetHint {
+	p := new(Histogram_ResetHint)
+	*p = x
+	return p
+}
+
+func (x Histogram_ResetHint) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Histogram_ResetHint) Descriptor() protoreflect.EnumDescriptor {
+	return file_prometheus_v1_types_proto_enumTypes[1].Descriptor()
+}
+
+func (Histogram_ResetHint) Type() protoreflect.EnumType {
+	return &file_prometheus_v1_types_proto_enumTypes[1]
+}
+
+func (x Histogram_ResetHint) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Histogram_ResetHint.Descriptor instead.
+func (Histogram_ResetHint) EnumDescriptor() ([]byte, []int) {
+	return file_prometheus_v1_types_proto_rawDescGZIP(), []int{3, 0}
+}
+
+type LabelMatcher_Type int32
+
+const (
+	LabelMatcher_TYPE_EQ_UNSPECIFIED LabelMatcher_Type = 0
+	LabelMatcher_TYPE_NEQ            LabelMatcher_Type = 1
+	LabelMatcher_TYPE_RE             LabelMatcher_Type = 2
+	LabelMatcher_TYPE_NRE            LabelMatcher_Type = 3
+)
+
+// Enum value maps for LabelMatcher_Type.
+var (
+	LabelMatcher_Type_name = map[int32]string{
+		0: "TYPE_EQ_UNSPECIFIED",
+		1: "TYPE_NEQ",
+		2: "TYPE_RE",
+		3: "TYPE_NRE",
+	}
+	LabelMatcher_Type_value = map[string]int32{
+		"TYPE_EQ_UNSPECIFIED": 0,
+		"TYPE_NEQ":            1,
+		"TYPE_RE":             2,
+		"TYPE_NRE":            3,
+	}
+)
+
+func (x LabelMatcher_Type) Enum() *LabelMatcher_Type {
+	p := new(LabelMatcher_Type)
+	*p = x
+	return p
+}
+
+func (x LabelMatcher_Type) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (LabelMatcher_Type) Descriptor() protoreflect.EnumDescriptor {
+	return file_prometheus_v1_types_proto_enumTypes[2].Descriptor()
+}
+
+func (LabelMatcher_Type) Type() protoreflect.EnumType {
+	return &file_prometheus_v1_types_proto_enumTypes[2]
+}
+
+func (x LabelMatcher_Type) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use LabelMatcher_Type.Descriptor instead.
+func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) {
+	return file_prometheus_v1_types_proto_rawDescGZIP(), []int{8, 0}
+}
+
+// We require this to match chunkenc.Encoding.
+type Chunk_Encoding int32
+
+const (
+	Chunk_ENCODING_UNKNOWN_UNSPECIFIED Chunk_Encoding = 0
+	Chunk_ENCODING_XOR                 Chunk_Encoding = 1
+	Chunk_ENCODING_HISTOGRAM           Chunk_Encoding = 2
+	Chunk_ENCODING_FLOAT_HISTOGRAM     Chunk_Encoding = 3
+)
+
+// Enum value maps for Chunk_Encoding.
+var (
+	Chunk_Encoding_name = map[int32]string{
+		0: "ENCODING_UNKNOWN_UNSPECIFIED",
+		1: "ENCODING_XOR",
+		2: "ENCODING_HISTOGRAM",
+		3: "ENCODING_FLOAT_HISTOGRAM",
+	}
+	Chunk_Encoding_value = map[string]int32{
+		"ENCODING_UNKNOWN_UNSPECIFIED": 0,
+		"ENCODING_XOR":                 1,
+		"ENCODING_HISTOGRAM":           2,
+		"ENCODING_FLOAT_HISTOGRAM":     3,
+	}
+)
+
+func (x Chunk_Encoding) Enum() *Chunk_Encoding {
+	p := new(Chunk_Encoding)
+	*p = x
+	return p
+}
+
+func (x Chunk_Encoding) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Chunk_Encoding) Descriptor() protoreflect.EnumDescriptor {
+	return file_prometheus_v1_types_proto_enumTypes[3].Descriptor()
+}
+
+func (Chunk_Encoding) Type() protoreflect.EnumType {
+	return &file_prometheus_v1_types_proto_enumTypes[3]
+}
+
+func (x Chunk_Encoding) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Chunk_Encoding.Descriptor instead.
+func (Chunk_Encoding) EnumDescriptor() ([]byte, []int) {
+	return file_prometheus_v1_types_proto_rawDescGZIP(), []int{10, 0}
+}
+
+type MetricMetadata struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Represents the metric type, these match the set from Prometheus.
+	// Refer to model/textparse/interface.go for details.
+	Type             MetricMetadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=prometheus.v1.MetricMetadata_MetricType" json:"type,omitempty"`
+	MetricFamilyName string                    `protobuf:"bytes,2,opt,name=metric_family_name,json=metricFamilyName,proto3" json:"metric_family_name,omitempty"`
+	Help             string                    `protobuf:"bytes,4,opt,name=help,proto3" json:"help,omitempty"`
+	Unit             string                    `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"`
+}
+
+func (x *MetricMetadata) Reset() {
+	*x = MetricMetadata{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_prometheus_v1_types_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *MetricMetadata) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MetricMetadata) ProtoMessage() {}
+
+func (x *MetricMetadata) ProtoReflect() protoreflect.Message {
+	mi := &file_prometheus_v1_types_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use MetricMetadata.ProtoReflect.Descriptor instead.
+func (*MetricMetadata) Descriptor() ([]byte, []int) {
+	return file_prometheus_v1_types_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *MetricMetadata) GetType() MetricMetadata_MetricType {
+	if x != nil {
+		return x.Type
+	}
+	return MetricMetadata_METRIC_TYPE_UNKNOWN_UNSPECIFIED
+}
+
+func (x *MetricMetadata) GetMetricFamilyName() string {
+	if x != nil {
+		return x.MetricFamilyName
+	}
+	return ""
+}
+
+func (x *MetricMetadata) GetHelp() string {
+	if x != nil {
+		return x.Help
+	}
+	return ""
+}
+
+func (x *MetricMetadata) GetUnit() string {
+	if x != nil {
+		return x.Unit
+	}
+	return ""
+}
+
+type Sample struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+	// timestamp is in ms format, see model/timestamp/timestamp.go for
+	// conversion from time.Time to Prometheus timestamp.
+	Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+}
+
+func (x *Sample) Reset() {
+	*x = Sample{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_prometheus_v1_types_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Sample) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Sample) ProtoMessage() {}
+
+func (x *Sample) ProtoReflect() protoreflect.Message {
+	mi := &file_prometheus_v1_types_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Sample.ProtoReflect.Descriptor instead.
+func (*Sample) Descriptor() ([]byte, []int) {
+	return file_prometheus_v1_types_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Sample) GetValue() float64 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+func (x *Sample) GetTimestamp() int64 {
+	if x != nil {
+		return x.Timestamp
+	}
+	return 0
+}
+
+type Exemplar struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Optional, can be empty.
+	Labels []*Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty"`
+	Value  float64  `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
+	// timestamp is in ms format, see model/timestamp/timestamp.go for
+	// conversion from time.Time to Prometheus timestamp.
+	Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+}
+
+func (x *Exemplar) Reset() {
+	*x = Exemplar{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_prometheus_v1_types_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Exemplar) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Exemplar) ProtoMessage() {}
+
+func (x *Exemplar) ProtoReflect() protoreflect.Message {
+	mi := &file_prometheus_v1_types_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead.
+func (*Exemplar) Descriptor() ([]byte, []int) {
+	return file_prometheus_v1_types_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Exemplar) GetLabels() []*Label {
+	if x != nil {
+		return x.Labels
+	}
+	return nil
+}
+
+func (x *Exemplar) GetValue() float64 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+func (x *Exemplar) GetTimestamp() int64 {
+	if x != nil {
+		return x.Timestamp
+	}
+	return 0
+}
+
+// A native histogram, also known as a sparse histogram.
+// Original design doc:
+// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit
+// The appendix of this design doc also explains the concept of float
+// histograms. This Histogram message can represent both, the usual
+// integer histogram as well as a float histogram.
+type Histogram struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Types that are assignable to Count:
+	//
+	//	*Histogram_CountInt
+	//	*Histogram_CountFloat
+	Count isHistogram_Count `protobuf_oneof:"count"`
+	Sum   float64           `protobuf:"fixed64,3,opt,name=sum,proto3" json:"sum,omitempty"` // Sum of observations in the histogram.
+	// The schema defines the bucket schema. Currently, valid numbers
+	// are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1
+	// is a bucket boundary in each case, and then each power of two is
+	// divided into 2^n logarithmic buckets. Or in other words, each
+	// bucket boundary is the previous boundary times 2^(2^-n). In the
+	// future, more bucket schemas may be added using numbers < -4 or >
+	// 8.
+	Schema        int32   `protobuf:"zigzag32,4,opt,name=schema,proto3" json:"schema,omitempty"`
+	ZeroThreshold float64 `protobuf:"fixed64,5,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"` // Breadth of the zero bucket.
+	// Types that are assignable to ZeroCount:
+	//
+	//	*Histogram_ZeroCountInt
+	//	*Histogram_ZeroCountFloat
+	ZeroCount isHistogram_ZeroCount `protobuf_oneof:"zero_count"`
+	// Negative Buckets.
+	NegativeSpans []*BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans,omitempty"`
+	// Use either "negative_deltas" or "negative_counts", the former for
+	// regular histograms with integer counts, the latter for float
+	// histograms.
+	NegativeDeltas []int64   `protobuf:"zigzag64,9,rep,packed,name=negative_deltas,json=negativeDeltas,proto3" json:"negative_deltas,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+	NegativeCounts []float64 `protobuf:"fixed64,10,rep,packed,name=negative_counts,json=negativeCounts,proto3" json:"negative_counts,omitempty"` // Absolute count of each bucket.
+	// Positive Buckets.
+	PositiveSpans []*BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans,omitempty"`
+	// Use either "positive_deltas" or "positive_counts", the former for
+	// regular histograms with integer counts, the latter for float
+	// histograms.
+	PositiveDeltas []int64             `protobuf:"zigzag64,12,rep,packed,name=positive_deltas,json=positiveDeltas,proto3" json:"positive_deltas,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+	PositiveCounts []float64           `protobuf:"fixed64,13,rep,packed,name=positive_counts,json=positiveCounts,proto3" json:"positive_counts,omitempty"`  // Absolute count of each bucket.
+	ResetHint      Histogram_ResetHint `protobuf:"varint,14,opt,name=reset_hint,json=resetHint,proto3,enum=prometheus.v1.Histogram_ResetHint" json:"reset_hint,omitempty"`
+	// timestamp is in ms format, see model/timestamp/timestamp.go for
+	// conversion from time.Time to Prometheus timestamp.
+	Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+}
+
+func (x *Histogram) Reset() {
+	*x = Histogram{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_prometheus_v1_types_proto_msgTypes[3]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Histogram) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Histogram) ProtoMessage() {}
+
+func (x *Histogram) ProtoReflect() protoreflect.Message {
+	mi := &file_prometheus_v1_types_proto_msgTypes[3]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Histogram.ProtoReflect.Descriptor instead.
+func (*Histogram) Descriptor() ([]byte, []int) {
+	return file_prometheus_v1_types_proto_rawDescGZIP(), []int{3}
+}
+
+func (m *Histogram) GetCount() isHistogram_Count {
+	if m != nil {
+		return m.Count
+	}
+	return nil
+}
+
+func (x *Histogram) GetCountInt() uint64 {
+	if x, ok := x.GetCount().(*Histogram_CountInt); ok {
+		return x.CountInt
+	}
+	return 0
+}
+
+func (x *Histogram) GetCountFloat() float64 {
+	if x, ok := x.GetCount().(*Histogram_CountFloat); ok {
+		return x.CountFloat
+	}
+	return 0
+}
+
+func (x *Histogram) GetSum() float64 {
+	if x != nil {
+		return x.Sum
+	}
+	return 0
+}
+
+func (x *Histogram) GetSchema() int32 {
+	if x != nil {
+		return x.Schema
+	}
+	return 0
+}
+
+func (x *Histogram) GetZeroThreshold() float64 {
+	if x != nil {
+		return x.ZeroThreshold
+	}
+	return 0
+}
+
+func (m *Histogram) GetZeroCount() isHistogram_ZeroCount {
+	if m != nil {
+		return m.ZeroCount
+	}
+	return nil
+}
+
+func (x *Histogram) GetZeroCountInt() uint64 {
+	if x, ok := x.GetZeroCount().(*Histogram_ZeroCountInt); ok {
+		return x.ZeroCountInt
+	}
+	return 0
+}
+
+func (x *Histogram) GetZeroCountFloat() float64 {
+	if x, ok := x.GetZeroCount().(*Histogram_ZeroCountFloat); ok {
+		return x.ZeroCountFloat
+	}
+	return 0
+}
+
+func (x *Histogram) GetNegativeSpans() []*BucketSpan {
+	if x != nil {
+		return x.NegativeSpans
+	}
+	return nil
+}
+
+func (x *Histogram) GetNegativeDeltas() []int64 {
+	if x != nil {
+		return x.NegativeDeltas
+	}
+	return nil
+}
+
+func (x *Histogram) GetNegativeCounts() []float64 {
+	if x != nil {
+		return x.NegativeCounts
+	}
+	return nil
+}
+
+func (x *Histogram) GetPositiveSpans() []*BucketSpan {
+	if x != nil {
+		return x.PositiveSpans
+	}
+	return nil
+}
+
+func (x *Histogram) GetPositiveDeltas() []int64 {
+	if x != nil {
+		return x.PositiveDeltas
+	}
+	return nil
+}
+
+func (x *Histogram) GetPositiveCounts() []float64 {
+	if x != nil {
+		return x.PositiveCounts
+	}
+	return nil
+}
+
+func (x *Histogram) GetResetHint() Histogram_ResetHint {
+	if x != nil {
+		return x.ResetHint
+	}
+	return Histogram_RESET_HINT_UNKNOWN_UNSPECIFIED
+}
+
+func (x *Histogram) GetTimestamp() int64 {
+	if x != nil {
+		return x.Timestamp
+	}
+	return 0
+}
+
+type isHistogram_Count interface {
+	isHistogram_Count()
+}
+
+type Histogram_CountInt struct {
+	CountInt uint64 `protobuf:"varint,1,opt,name=count_int,json=countInt,proto3,oneof"`
+}
+
+type Histogram_CountFloat struct {
+	CountFloat float64 `protobuf:"fixed64,2,opt,name=count_float,json=countFloat,proto3,oneof"`
+}
+
+func (*Histogram_CountInt) isHistogram_Count() {}
+
+func (*Histogram_CountFloat) isHistogram_Count() {}
+
+type isHistogram_ZeroCount interface {
+	isHistogram_ZeroCount()
+}
+
+type Histogram_ZeroCountInt struct {
+	ZeroCountInt uint64 `protobuf:"varint,6,opt,name=zero_count_int,json=zeroCountInt,proto3,oneof"`
+}
+
+type Histogram_ZeroCountFloat struct {
+	ZeroCountFloat float64 `protobuf:"fixed64,7,opt,name=zero_count_float,json=zeroCountFloat,proto3,oneof"`
+}
+
+func (*Histogram_ZeroCountInt) isHistogram_ZeroCount() {}
+
+func (*Histogram_ZeroCountFloat) isHistogram_ZeroCount() {}
+
+// A BucketSpan defines a number of consecutive buckets with their
+// offset. Logically, it would be more straightforward to include the
+// bucket counts in the Span. However, the protobuf representation is
+// more compact in the way the data is structured here (with all the
+// buckets in a single array separate from the Spans).
+type BucketSpan struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Offset int32  `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"` // Gap to previous span, or starting point for 1st span (which can be negative).
+	Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"`   // Length of consecutive buckets.
+}
+
+func (x *BucketSpan) Reset() {
+	*x = BucketSpan{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_prometheus_v1_types_proto_msgTypes[4]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *BucketSpan) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BucketSpan) ProtoMessage() {}
+
+func (x *BucketSpan) ProtoReflect() protoreflect.Message {
+	mi := &file_prometheus_v1_types_proto_msgTypes[4]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use BucketSpan.ProtoReflect.Descriptor instead.
+func (*BucketSpan) Descriptor() ([]byte, []int) {
+	return file_prometheus_v1_types_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *BucketSpan) GetOffset() int32 {
+	if x != nil {
+		return x.Offset
+	}
+	return 0
+}
+
+func (x *BucketSpan) GetLength() uint32 {
+	if x != nil {
+		return x.Length
+	}
+	return 0
+}
+
+// TimeSeries represents samples and labels for a single time series.
+type TimeSeries struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// For a timeseries to be valid, and for the samples and exemplars
+	// to be ingested by the remote system properly, the labels field is required.
+	Labels     []*Label     `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty"`
+	Samples    []*Sample    `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples,omitempty"`
+	Exemplars  []*Exemplar  `protobuf:"bytes,3,rep,name=exemplars,proto3" json:"exemplars,omitempty"`
+	Histograms []*Histogram `protobuf:"bytes,4,rep,name=histograms,proto3" json:"histograms,omitempty"`
+}
+
+func (x *TimeSeries) Reset() {
+	*x = TimeSeries{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_prometheus_v1_types_proto_msgTypes[5]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *TimeSeries) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TimeSeries) ProtoMessage() {}
+
+func (x *TimeSeries) ProtoReflect() protoreflect.Message {
+	mi := &file_prometheus_v1_types_proto_msgTypes[5]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use TimeSeries.ProtoReflect.Descriptor instead.
+func (*TimeSeries) Descriptor() ([]byte, []int) {
+	return file_prometheus_v1_types_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *TimeSeries) GetLabels() []*Label {
+	if x != nil {
+		return x.Labels
+	}
+	return nil
+}
+
+func (x *TimeSeries) GetSamples() []*Sample {
+	if x != nil {
+		return x.Samples
+	}
+	return nil
+}
+
+func (x *TimeSeries) GetExemplars() []*Exemplar {
+	if x != nil {
+		return x.Exemplars
+	}
+	return nil
+}
+
+func (x *TimeSeries) GetHistograms() []*Histogram {
+	if x != nil {
+		return x.Histograms
+	}
+	return nil
+}
+
+type Label struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Name  string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *Label) Reset() {
+	*x = Label{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_prometheus_v1_types_proto_msgTypes[6]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Label) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Label) ProtoMessage() {}
+
+func (x *Label) ProtoReflect() protoreflect.Message {
+	mi := &file_prometheus_v1_types_proto_msgTypes[6]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Label.ProtoReflect.Descriptor instead.
+func (*Label) Descriptor() ([]byte, []int) {
+	return file_prometheus_v1_types_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *Label) GetName() string {
+	if x != nil {
+		return x.Name
+	}
+	return ""
+}
+
+func (x *Label) GetValue() string {
+	if x != nil {
+		return x.Value
+	}
+	return ""
+}
+
+type Labels struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Labels []*Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty"`
+}
+
+func (x *Labels) Reset() {
+	*x = Labels{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_prometheus_v1_types_proto_msgTypes[7]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Labels) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Labels) ProtoMessage() {}
+
+func (x *Labels) ProtoReflect() protoreflect.Message {
+	mi := &file_prometheus_v1_types_proto_msgTypes[7]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Labels.ProtoReflect.Descriptor instead.
+func (*Labels) Descriptor() ([]byte, []int) {
+	return file_prometheus_v1_types_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *Labels) GetLabels() []*Label {
+	if x != nil {
+		return x.Labels
+	}
+	return nil
+}
+
+// Matcher specifies a rule, which can match or set of labels or not.
+type LabelMatcher struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Type  LabelMatcher_Type `protobuf:"varint,1,opt,name=type,proto3,enum=prometheus.v1.LabelMatcher_Type" json:"type,omitempty"`
+	Name  string            `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+	Value string            `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *LabelMatcher) Reset() {
+	*x = LabelMatcher{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_prometheus_v1_types_proto_msgTypes[8]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *LabelMatcher) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LabelMatcher) ProtoMessage() {}
+
+func (x *LabelMatcher) ProtoReflect() protoreflect.Message {
+	mi := &file_prometheus_v1_types_proto_msgTypes[8]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use LabelMatcher.ProtoReflect.Descriptor instead.
+func (*LabelMatcher) Descriptor() ([]byte, []int) {
+	return file_prometheus_v1_types_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *LabelMatcher) GetType() LabelMatcher_Type {
+	if x != nil {
+		return x.Type
+	}
+	return LabelMatcher_TYPE_EQ_UNSPECIFIED
+}
+
+func (x *LabelMatcher) GetName() string {
+	if x != nil {
+		return x.Name
+	}
+	return ""
+}
+
+func (x *LabelMatcher) GetValue() string {
+	if x != nil {
+		return x.Value
+	}
+	return ""
+}
+
+type ReadHints struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	StepMs   int64    `protobuf:"varint,1,opt,name=step_ms,json=stepMs,proto3" json:"step_ms,omitempty"`    // Query step size in milliseconds.
+	Func     string   `protobuf:"bytes,2,opt,name=func,proto3" json:"func,omitempty"`                       // String representation of surrounding function or aggregation.
+	StartMs  int64    `protobuf:"varint,3,opt,name=start_ms,json=startMs,proto3" json:"start_ms,omitempty"` // Start time in milliseconds.
+	EndMs    int64    `protobuf:"varint,4,opt,name=end_ms,json=endMs,proto3" json:"end_ms,omitempty"`       // End time in milliseconds.
+	Grouping []string `protobuf:"bytes,5,rep,name=grouping,proto3" json:"grouping,omitempty"`               // List of label names used in aggregation.
+	By       bool     `protobuf:"varint,6,opt,name=by,proto3" json:"by,omitempty"`                          // Indicate whether it is without or by.
+	RangeMs  int64    `protobuf:"varint,7,opt,name=range_ms,json=rangeMs,proto3" json:"range_ms,omitempty"` // Range vector selector range in milliseconds.
+}
+
+func (x *ReadHints) Reset() {
+	*x = ReadHints{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_prometheus_v1_types_proto_msgTypes[9]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ReadHints) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ReadHints) ProtoMessage() {}
+
+func (x *ReadHints) ProtoReflect() protoreflect.Message {
+	mi := &file_prometheus_v1_types_proto_msgTypes[9]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ReadHints.ProtoReflect.Descriptor instead.
+func (*ReadHints) Descriptor() ([]byte, []int) {
+	return file_prometheus_v1_types_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *ReadHints) GetStepMs() int64 {
+	if x != nil {
+		return x.StepMs
+	}
+	return 0
+}
+
+func (x *ReadHints) GetFunc() string {
+	if x != nil {
+		return x.Func
+	}
+	return ""
+}
+
+func (x *ReadHints) GetStartMs() int64 {
+	if x != nil {
+		return x.StartMs
+	}
+	return 0
+}
+
+func (x *ReadHints) GetEndMs() int64 {
+	if x != nil {
+		return x.EndMs
+	}
+	return 0
+}
+
+func (x *ReadHints) GetGrouping() []string {
+	if x != nil {
+		return x.Grouping
+	}
+	return nil
+}
+
+func (x *ReadHints) GetBy() bool {
+	if x != nil {
+		return x.By
+	}
+	return false
+}
+
+func (x *ReadHints) GetRangeMs() int64 {
+	if x != nil {
+		return x.RangeMs
+	}
+	return 0
+}
+
+// Chunk represents a TSDB chunk.
+// Time range [min, max] is inclusive.
+type Chunk struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	MinTimeMs int64          `protobuf:"varint,1,opt,name=min_time_ms,json=minTimeMs,proto3" json:"min_time_ms,omitempty"`
+	MaxTimeMs int64          `protobuf:"varint,2,opt,name=max_time_ms,json=maxTimeMs,proto3" json:"max_time_ms,omitempty"`
+	Type      Chunk_Encoding `protobuf:"varint,3,opt,name=type,proto3,enum=prometheus.v1.Chunk_Encoding" json:"type,omitempty"`
+	Data      []byte         `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"`
+}
+
+func (x *Chunk) Reset() {
+	*x = Chunk{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_prometheus_v1_types_proto_msgTypes[10]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Chunk) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Chunk) ProtoMessage() {}
+
+func (x *Chunk) ProtoReflect() protoreflect.Message {
+	mi := &file_prometheus_v1_types_proto_msgTypes[10]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Chunk.ProtoReflect.Descriptor instead.
+func (*Chunk) Descriptor() ([]byte, []int) {
+	return file_prometheus_v1_types_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *Chunk) GetMinTimeMs() int64 {
+	if x != nil {
+		return x.MinTimeMs
+	}
+	return 0
+}
+
+func (x *Chunk) GetMaxTimeMs() int64 {
+	if x != nil {
+		return x.MaxTimeMs
+	}
+	return 0
+}
+
+func (x *Chunk) GetType() Chunk_Encoding {
+	if x != nil {
+		return x.Type
+	}
+	return Chunk_ENCODING_UNKNOWN_UNSPECIFIED
+}
+
+func (x *Chunk) GetData() []byte {
+	if x != nil {
+		return x.Data
+	}
+	return nil
+}
+
+// ChunkedSeries represents single, encoded time series.
+type ChunkedSeries struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Labels should be sorted.
+	Labels []*Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty"`
+	// Chunks will be in start time order and may overlap.
+	Chunks []*Chunk `protobuf:"bytes,2,rep,name=chunks,proto3" json:"chunks,omitempty"`
+}
+
+func (x *ChunkedSeries) Reset() {
+	*x = ChunkedSeries{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_prometheus_v1_types_proto_msgTypes[11]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ChunkedSeries) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ChunkedSeries) ProtoMessage() {}
+
+func (x *ChunkedSeries) ProtoReflect() protoreflect.Message {
+	mi := &file_prometheus_v1_types_proto_msgTypes[11]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ChunkedSeries.ProtoReflect.Descriptor instead.
+func (*ChunkedSeries) Descriptor() ([]byte, []int) {
+	return file_prometheus_v1_types_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *ChunkedSeries) GetLabels() []*Label {
+	if x != nil {
+		return x.Labels
+	}
+	return nil
+}
+
+func (x *ChunkedSeries) GetChunks() []*Chunk {
+	if x != nil {
+		return x.Chunks
+	}
+	return nil
+}
+
+var File_prometheus_v1_types_proto protoreflect.FileDescriptor
+
+var file_prometheus_v1_types_proto_rawDesc = []byte{
+	0x0a, 0x19, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, 0x76, 0x31, 0x2f,
+	0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x70, 0x72, 0x6f,
+	0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x76, 0x31, 0x22, 0x8c, 0x03, 0x0a, 0x0e, 0x4d,
+	0x65, 0x74, 0x72, 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3c, 0x0a,
+	0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x70, 0x72,
+	0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72,
+	0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69,
+	0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6d,
+	0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x5f, 0x6e, 0x61, 0x6d,
+	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46,
+	0x61, 0x6d, 0x69, 0x6c, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c,
+	0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, 0x12, 0x0a,
+	0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69,
+	0x74, 0x22, 0xe5, 0x01, 0x0a, 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65,
+	0x12, 0x23, 0x0a, 0x1f, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
+	0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
+	0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f,
+	0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x01, 0x12, 0x15,
+	0x0a, 0x11, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x41,
+	0x55, 0x47, 0x45, 0x10, 0x02, 0x12, 0x19, 0x0a, 0x15, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f,
+	0x54, 0x59, 0x50, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x03,
+	0x12, 0x1e, 0x0a, 0x1a, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
+	0x47, 0x41, 0x55, 0x47, 0x45, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04,
+	0x12, 0x17, 0x0a, 0x13, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
+	0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x4d, 0x45, 0x54,
+	0x52, 0x49, 0x43, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x46, 0x4f, 0x10, 0x06, 0x12,
+	0x18, 0x0a, 0x14, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53,
+	0x54, 0x41, 0x54, 0x45, 0x53, 0x45, 0x54, 0x10, 0x07, 0x22, 0x3c, 0x0a, 0x06, 0x53, 0x61, 0x6d,
+	0x70, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01,
+	0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d,
+	0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69,
+	0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x6c, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70,
+	0x6c, 0x61, 0x72, 0x12, 0x2c, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x01, 0x20,
+	0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
+	0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c,
+	0x73, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01,
+	0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73,
+	0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65,
+	0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x80, 0x06, 0x0a, 0x09, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67,
+	0x72, 0x61, 0x6d, 0x12, 0x1d, 0x0a, 0x09, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x74,
+	0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x08, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49,
+	0x6e, 0x74, 0x12, 0x21, 0x0a, 0x0b, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61,
+	0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+	0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x75, 0x6d, 0x18, 0x03, 0x20, 0x01,
+	0x28, 0x01, 0x52, 0x03, 0x73, 0x75, 0x6d, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d,
+	0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12,
+	0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c,
+	0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72,
+	0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63,
+	0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x48, 0x01,
+	0x52, 0x0c, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x74, 0x12, 0x2a,
+	0x0a, 0x10, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f,
+	0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x01, 0x48, 0x01, 0x52, 0x0e, 0x7a, 0x65, 0x72, 0x6f,
+	0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x40, 0x0a, 0x0e, 0x6e, 0x65,
+	0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x03,
+	0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e,
+	0x76, 0x31, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0d, 0x6e,
+	0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f,
+	0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x18,
+	0x09, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44,
+	0x65, 0x6c, 0x74, 0x61, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76,
+	0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0e,
+	0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x40,
+	0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73,
+	0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+	0x65, 0x75, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61,
+	0x6e, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73,
+	0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c,
+	0x74, 0x61, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74,
+	0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6f, 0x73,
+	0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x0d, 0x20, 0x03,
+	0x28, 0x01, 0x52, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e,
+	0x74, 0x73, 0x12, 0x41, 0x0a, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x74, 0x5f, 0x68, 0x69, 0x6e, 0x74,
+	0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+	0x65, 0x75, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d,
+	0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x69, 0x6e, 0x74, 0x52, 0x09, 0x72, 0x65, 0x73, 0x65,
+	0x74, 0x48, 0x69, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+	0x6d, 0x70, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74,
+	0x61, 0x6d, 0x70, 0x22, 0x6c, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x69, 0x6e, 0x74,
+	0x12, 0x22, 0x0a, 0x1e, 0x52, 0x45, 0x53, 0x45, 0x54, 0x5f, 0x48, 0x49, 0x4e, 0x54, 0x5f, 0x55,
+	0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49,
+	0x45, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x53, 0x45, 0x54, 0x5f, 0x48, 0x49,
+	0x4e, 0x54, 0x5f, 0x59, 0x45, 0x53, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x52, 0x45, 0x53, 0x45,
+	0x54, 0x5f, 0x48, 0x49, 0x4e, 0x54, 0x5f, 0x4e, 0x4f, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x52,
+	0x45, 0x53, 0x45, 0x54, 0x5f, 0x48, 0x49, 0x4e, 0x54, 0x5f, 0x47, 0x41, 0x55, 0x47, 0x45, 0x10,
+	0x03, 0x42, 0x07, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x0c, 0x0a, 0x0a, 0x7a, 0x65,
+	0x72, 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b,
+	0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74,
+	0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16,
+	0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06,
+	0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0xdc, 0x01, 0x0a, 0x0a, 0x54, 0x69, 0x6d, 0x65, 0x53,
+	0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18,
+	0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
+	0x75, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x06, 0x6c, 0x61, 0x62,
+	0x65, 0x6c, 0x73, 0x12, 0x2f, 0x0a, 0x07, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x18, 0x02,
+	0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75,
+	0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x07, 0x73, 0x61, 0x6d,
+	0x70, 0x6c, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
+	0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74,
+	0x68, 0x65, 0x75, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
+	0x52, 0x09, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x12, 0x38, 0x0a, 0x0a, 0x68,
+	0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32,
+	0x18, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x76, 0x31, 0x2e,
+	0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x0a, 0x68, 0x69, 0x73, 0x74, 0x6f,
+	0x67, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x31, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12,
+	0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+	0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x36, 0x0a, 0x06, 0x4c, 0x61, 0x62, 0x65,
+	0x6c, 0x73, 0x12, 0x2c, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03,
+	0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e,
+	0x76, 0x31, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73,
+	0x22, 0xb8, 0x01, 0x0a, 0x0c, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65,
+	0x72, 0x12, 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
+	0x20, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x76, 0x31, 0x2e,
+	0x4c, 0x61, 0x62, 0x65, 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x54, 0x79, 0x70,
+	0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x22, 0x48, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x59, 0x50,
+	0x45, 0x5f, 0x45, 0x51, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44,
+	0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4e, 0x45, 0x51, 0x10, 0x01,
+	0x12, 0x0b, 0x0a, 0x07, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x10, 0x02, 0x12, 0x0c, 0x0a,
+	0x08, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4e, 0x52, 0x45, 0x10, 0x03, 0x22, 0xb1, 0x01, 0x0a, 0x09,
+	0x52, 0x65, 0x61, 0x64, 0x48, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x74, 0x65,
+	0x70, 0x5f, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x73, 0x74, 0x65, 0x70,
+	0x4d, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x75, 0x6e, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x04, 0x66, 0x75, 0x6e, 0x63, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f,
+	0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4d,
+	0x73, 0x12, 0x15, 0x0a, 0x06, 0x65, 0x6e, 0x64, 0x5f, 0x6d, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28,
+	0x03, 0x52, 0x05, 0x65, 0x6e, 0x64, 0x4d, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x67, 0x72, 0x6f, 0x75,
+	0x70, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x67, 0x72, 0x6f, 0x75,
+	0x70, 0x69, 0x6e, 0x67, 0x12, 0x0e, 0x0a, 0x02, 0x62, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08,
+	0x52, 0x02, 0x62, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x6d, 0x73,
+	0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x73, 0x22,
+	0x84, 0x02, 0x0a, 0x05, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x1e, 0x0a, 0x0b, 0x6d, 0x69, 0x6e,
+	0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09,
+	0x6d, 0x69, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x4d, 0x73, 0x12, 0x1e, 0x0a, 0x0b, 0x6d, 0x61, 0x78,
+	0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09,
+	0x6d, 0x61, 0x78, 0x54, 0x69, 0x6d, 0x65, 0x4d, 0x73, 0x12, 0x31, 0x0a, 0x04, 0x74, 0x79, 0x70,
+	0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1d, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74,
+	0x68, 0x65, 0x75, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x45, 0x6e,
+	0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04,
+	0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61,
+	0x22, 0x74, 0x0a, 0x08, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x20, 0x0a, 0x1c,
+	0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
+	0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10,
+	0x0a, 0x0c, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x58, 0x4f, 0x52, 0x10, 0x01,
+	0x12, 0x16, 0x0a, 0x12, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x48, 0x49, 0x53,
+	0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x02, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x4e, 0x43, 0x4f,
+	0x44, 0x49, 0x4e, 0x47, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f,
+	0x47, 0x52, 0x41, 0x4d, 0x10, 0x03, 0x22, 0x6b, 0x0a, 0x0d, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x65,
+	0x64, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c,
+	0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74,
+	0x68, 0x65, 0x75, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x06, 0x6c,
+	0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x2c, 0x0a, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18,
+	0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
+	0x75, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x06, 0x63, 0x68, 0x75,
+	0x6e, 0x6b, 0x73, 0x42, 0x49, 0x5a, 0x47, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
+	0x6d, 0x2f, 0x63, 0x65, 0x72, 0x74, 0x75, 0x73, 0x6f, 0x6e, 0x65, 0x2f, 0x77, 0x6f, 0x72, 0x6d,
+	0x68, 0x6f, 0x6c, 0x65, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, 0x76,
+	0x31, 0x3b, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x76, 0x31, 0x62, 0x06,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_prometheus_v1_types_proto_rawDescOnce sync.Once
+	file_prometheus_v1_types_proto_rawDescData = file_prometheus_v1_types_proto_rawDesc
+)
+
+func file_prometheus_v1_types_proto_rawDescGZIP() []byte {
+	file_prometheus_v1_types_proto_rawDescOnce.Do(func() {
+		file_prometheus_v1_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_prometheus_v1_types_proto_rawDescData)
+	})
+	return file_prometheus_v1_types_proto_rawDescData
+}
+
+var file_prometheus_v1_types_proto_enumTypes = make([]protoimpl.EnumInfo, 4)
+var file_prometheus_v1_types_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
+var file_prometheus_v1_types_proto_goTypes = []interface{}{
+	(MetricMetadata_MetricType)(0), // 0: prometheus.v1.MetricMetadata.MetricType
+	(Histogram_ResetHint)(0),       // 1: prometheus.v1.Histogram.ResetHint
+	(LabelMatcher_Type)(0),         // 2: prometheus.v1.LabelMatcher.Type
+	(Chunk_Encoding)(0),            // 3: prometheus.v1.Chunk.Encoding
+	(*MetricMetadata)(nil),         // 4: prometheus.v1.MetricMetadata
+	(*Sample)(nil),                 // 5: prometheus.v1.Sample
+	(*Exemplar)(nil),               // 6: prometheus.v1.Exemplar
+	(*Histogram)(nil),              // 7: prometheus.v1.Histogram
+	(*BucketSpan)(nil),             // 8: prometheus.v1.BucketSpan
+	(*TimeSeries)(nil),             // 9: prometheus.v1.TimeSeries
+	(*Label)(nil),                  // 10: prometheus.v1.Label
+	(*Labels)(nil),                 // 11: prometheus.v1.Labels
+	(*LabelMatcher)(nil),           // 12: prometheus.v1.LabelMatcher
+	(*ReadHints)(nil),              // 13: prometheus.v1.ReadHints
+	(*Chunk)(nil),                  // 14: prometheus.v1.Chunk
+	(*ChunkedSeries)(nil),          // 15: prometheus.v1.ChunkedSeries
+}
+var file_prometheus_v1_types_proto_depIdxs = []int32{
+	0,  // 0: prometheus.v1.MetricMetadata.type:type_name -> prometheus.v1.MetricMetadata.MetricType
+	10, // 1: prometheus.v1.Exemplar.labels:type_name -> prometheus.v1.Label
+	8,  // 2: prometheus.v1.Histogram.negative_spans:type_name -> prometheus.v1.BucketSpan
+	8,  // 3: prometheus.v1.Histogram.positive_spans:type_name -> prometheus.v1.BucketSpan
+	1,  // 4: prometheus.v1.Histogram.reset_hint:type_name -> prometheus.v1.Histogram.ResetHint
+	10, // 5: prometheus.v1.TimeSeries.labels:type_name -> prometheus.v1.Label
+	5,  // 6: prometheus.v1.TimeSeries.samples:type_name -> prometheus.v1.Sample
+	6,  // 7: prometheus.v1.TimeSeries.exemplars:type_name -> prometheus.v1.Exemplar
+	7,  // 8: prometheus.v1.TimeSeries.histograms:type_name -> prometheus.v1.Histogram
+	10, // 9: prometheus.v1.Labels.labels:type_name -> prometheus.v1.Label
+	2,  // 10: prometheus.v1.LabelMatcher.type:type_name -> prometheus.v1.LabelMatcher.Type
+	3,  // 11: prometheus.v1.Chunk.type:type_name -> prometheus.v1.Chunk.Encoding
+	10, // 12: prometheus.v1.ChunkedSeries.labels:type_name -> prometheus.v1.Label
+	14, // 13: prometheus.v1.ChunkedSeries.chunks:type_name -> prometheus.v1.Chunk
+	14, // [14:14] is the sub-list for method output_type
+	14, // [14:14] is the sub-list for method input_type
+	14, // [14:14] is the sub-list for extension type_name
+	14, // [14:14] is the sub-list for extension extendee
+	0,  // [0:14] is the sub-list for field type_name
+}
+
+func init() { file_prometheus_v1_types_proto_init() }
+func file_prometheus_v1_types_proto_init() {
+	if File_prometheus_v1_types_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_prometheus_v1_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*MetricMetadata); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_prometheus_v1_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Sample); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_prometheus_v1_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Exemplar); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_prometheus_v1_types_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Histogram); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_prometheus_v1_types_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*BucketSpan); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_prometheus_v1_types_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*TimeSeries); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_prometheus_v1_types_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Label); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_prometheus_v1_types_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Labels); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_prometheus_v1_types_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*LabelMatcher); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_prometheus_v1_types_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ReadHints); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_prometheus_v1_types_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Chunk); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_prometheus_v1_types_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ChunkedSeries); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	file_prometheus_v1_types_proto_msgTypes[3].OneofWrappers = []interface{}{
+		(*Histogram_CountInt)(nil),
+		(*Histogram_CountFloat)(nil),
+		(*Histogram_ZeroCountInt)(nil),
+		(*Histogram_ZeroCountFloat)(nil),
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_prometheus_v1_types_proto_rawDesc,
+			NumEnums:      4,
+			NumMessages:   12,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_prometheus_v1_types_proto_goTypes,
+		DependencyIndexes: file_prometheus_v1_types_proto_depIdxs,
+		EnumInfos:         file_prometheus_v1_types_proto_enumTypes,
+		MessageInfos:      file_prometheus_v1_types_proto_msgTypes,
+	}.Build()
+	File_prometheus_v1_types_proto = out.File
+	file_prometheus_v1_types_proto_rawDesc = nil
+	file_prometheus_v1_types_proto_goTypes = nil
+	file_prometheus_v1_types_proto_depIdxs = nil
+}

+ 214 - 0
node/pkg/telemetry/prom_remote_write/format.go

@@ -0,0 +1,214 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// NOTICE: THIS FILE HAS BEEN MODIFIED FROM THE ORIGINAL
+// Changes were made to use go protobuf instead of gogo protobuf.
+// Original code is here: https://github.com/prometheus/prometheus/blob/e4ec263bcc11493953c75d1b2e7bc78fd0463e05/util/fmtutil/format.go
+
+package promremotew
+
+import (
+	"errors"
+	"fmt"
+	"io"
+
+	"sort"
+	"time"
+
+	prometheusv1 "github.com/certusone/wormhole/node/pkg/proto/prometheus/v1"
+
+	dto "github.com/prometheus/client_model/go"
+	"github.com/prometheus/common/expfmt"
+	"github.com/prometheus/common/model"
+)
+
+const (
+	sumStr    = "_sum"
+	countStr  = "_count"
+	bucketStr = "_bucket"
+)
+
+var MetricMetadataTypeValue = map[string]int32{
+	"UNKNOWN":        0,
+	"COUNTER":        1,
+	"GAUGE":          2,
+	"HISTOGRAM":      3,
+	"GAUGEHISTOGRAM": 4,
+	"SUMMARY":        5,
+	"INFO":           6,
+	"STATESET":       7,
+}
+
+// MetricTextToWriteRequest consumes an io.Reader and return the data in write request format.
+func MetricTextToWriteRequest(input io.Reader, labels map[string]string) (*prometheusv1.WriteRequest, error) {
+	var parser expfmt.TextParser
+	mf, err := parser.TextToMetricFamilies(input)
+	if err != nil {
+		return nil, err
+	}
+	return MetricFamiliesToWriteRequest(mf, labels)
+}
+
+// MetricFamiliesToWriteRequest convert metric family to a writerequest.
+func MetricFamiliesToWriteRequest(mf map[string]*dto.MetricFamily, extraLabels map[string]string) (*prometheusv1.WriteRequest, error) {
+	wr := &prometheusv1.WriteRequest{}
+
+	// build metric list
+	sortedMetricNames := make([]string, 0, len(mf))
+	for metric := range mf {
+		sortedMetricNames = append(sortedMetricNames, metric)
+	}
+	// sort metrics name in lexicographical order
+	sort.Strings(sortedMetricNames)
+
+	for _, metricName := range sortedMetricNames {
+		// Set metadata writerequest
+		mtype := MetricMetadataTypeValue[mf[metricName].Type.String()]
+		metadata := prometheusv1.MetricMetadata{
+			MetricFamilyName: mf[metricName].GetName(),
+			Type:             prometheusv1.MetricMetadata_MetricType(mtype),
+			Help:             mf[metricName].GetHelp(),
+		}
+		wr.Metadata = append(wr.Metadata, &metadata)
+
+		for _, metric := range mf[metricName].Metric {
+			labels := makeLabelsMap(metric, metricName, extraLabels)
+			if err := makeTimeseries(wr, labels, metric); err != nil {
+				return wr, err
+			}
+		}
+	}
+	return wr, nil
+}
+
+func toTimeseries(wr *prometheusv1.WriteRequest, labels map[string]string, timestamp int64, value float64) {
+	var ts prometheusv1.TimeSeries
+	ts.Labels = makeLabels(labels)
+	sample := prometheusv1.Sample{
+		Timestamp: timestamp,
+		Value:     value,
+	}
+	ts.Samples = append(ts.Samples, &sample)
+	// ts.Samples = []prometheusv1.Sample{
+	// 	{
+	// 		Timestamp: timestamp,
+	// 		Value:     value,
+	// 	},
+	// }
+	wr.Timeseries = append(wr.Timeseries, &ts)
+}
+
+func makeTimeseries(wr *prometheusv1.WriteRequest, labels map[string]string, m *dto.Metric) error {
+	var err error
+
+	timestamp := m.GetTimestampMs()
+	if timestamp == 0 {
+		timestamp = time.Now().UnixNano() / int64(time.Millisecond)
+	}
+
+	switch {
+	case m.Gauge != nil:
+		toTimeseries(wr, labels, timestamp, m.GetGauge().GetValue())
+	case m.Counter != nil:
+		toTimeseries(wr, labels, timestamp, m.GetCounter().GetValue())
+	case m.Summary != nil:
+		metricName := labels[model.MetricNameLabel]
+		// Preserve metric name order with first quantile labels timeseries then sum suffix timeserie and finally count suffix timeserie
+		// Add Summary quantile timeseries
+		quantileLabels := make(map[string]string, len(labels)+1)
+		for key, value := range labels {
+			quantileLabels[key] = value
+		}
+
+		for _, q := range m.GetSummary().Quantile {
+			quantileLabels[model.QuantileLabel] = fmt.Sprint(q.GetQuantile())
+			toTimeseries(wr, quantileLabels, timestamp, q.GetValue())
+		}
+		// Overwrite label model.MetricNameLabel for count and sum metrics
+		// Add Summary sum timeserie
+		labels[model.MetricNameLabel] = metricName + sumStr
+		toTimeseries(wr, labels, timestamp, m.GetSummary().GetSampleSum())
+		// Add Summary count timeserie
+		labels[model.MetricNameLabel] = metricName + countStr
+		toTimeseries(wr, labels, timestamp, float64(m.GetSummary().GetSampleCount()))
+
+	case m.Histogram != nil:
+		metricName := labels[model.MetricNameLabel]
+		// Preserve metric name order with first bucket suffix timeseries then sum suffix timeserie and finally count suffix timeserie
+		// Add Histogram bucket timeseries
+		bucketLabels := make(map[string]string, len(labels)+1)
+		for key, value := range labels {
+			bucketLabels[key] = value
+		}
+		for _, b := range m.GetHistogram().Bucket {
+			bucketLabels[model.MetricNameLabel] = metricName + bucketStr
+			bucketLabels[model.BucketLabel] = fmt.Sprint(b.GetUpperBound())
+			toTimeseries(wr, bucketLabels, timestamp, float64(b.GetCumulativeCount()))
+		}
+		// Overwrite label model.MetricNameLabel for count and sum metrics
+		// Add Histogram sum timeserie
+		labels[model.MetricNameLabel] = metricName + sumStr
+		toTimeseries(wr, labels, timestamp, m.GetHistogram().GetSampleSum())
+		// Add Histogram count timeserie
+		labels[model.MetricNameLabel] = metricName + countStr
+		toTimeseries(wr, labels, timestamp, float64(m.GetHistogram().GetSampleCount()))
+
+	case m.Untyped != nil:
+		toTimeseries(wr, labels, timestamp, m.GetUntyped().GetValue())
+	default:
+		err = errors.New("unsupported metric type")
+	}
+	return err
+}
+
+func makeLabels(labelsMap map[string]string) []*prometheusv1.Label {
+	// build labels name list
+	sortedLabelNames := make([]string, 0, len(labelsMap))
+	for label := range labelsMap {
+		sortedLabelNames = append(sortedLabelNames, label)
+	}
+	// sort labels name in lexicographical order
+	sort.Strings(sortedLabelNames)
+
+	labels := make([]*prometheusv1.Label, 0, len(labelsMap))
+	// var labels [len(sortedLabelNames)]prompb.Label
+	for _, label := range sortedLabelNames {
+		labels = append(labels, &prometheusv1.Label{
+			Name:  label,
+			Value: labelsMap[label],
+		})
+	}
+	return labels
+}
+
+func makeLabelsMap(m *dto.Metric, metricName string, extraLabels map[string]string) map[string]string {
+	// build labels map
+	labels := make(map[string]string, len(m.Label)+len(extraLabels))
+	labels[model.MetricNameLabel] = metricName
+
+	// add extra labels
+	for key, value := range extraLabels {
+		labels[key] = value
+	}
+
+	// add metric labels
+	for _, label := range m.Label {
+		labelname := label.GetName()
+		if labelname == model.JobLabel {
+			labelname = fmt.Sprintf("%s%s", model.ExportedLabelPrefix, labelname)
+		}
+		labels[labelname] = label.GetValue()
+	}
+
+	return labels
+}

+ 207 - 0
node/pkg/telemetry/prom_remote_write/prom_test.go

@@ -0,0 +1,207 @@
+package promremotew
+
+import (
+	"bytes"
+	"testing"
+
+	prometheusv1 "github.com/certusone/wormhole/node/pkg/proto/prometheus/v1"
+	"google.golang.org/protobuf/proto"
+
+	"github.com/stretchr/testify/require"
+)
+
+var writeRequestFixture = &prometheusv1.WriteRequest{
+	Metadata: []*prometheusv1.MetricMetadata{
+		&prometheusv1.MetricMetadata{
+			MetricFamilyName: "http_request_duration_seconds",
+			Type:             3,
+			Help:             "A histogram of the request duration.",
+		},
+		{
+			MetricFamilyName: "http_requests_total",
+			Type:             1,
+			Help:             "The total number of HTTP requests.",
+		},
+		{
+			MetricFamilyName: "rpc_duration_seconds",
+			Type:             5,
+			Help:             "A summary of the RPC duration in seconds.",
+		},
+		{
+			MetricFamilyName: "test_metric1",
+			Type:             2,
+			Help:             "This is a test metric.",
+		},
+	},
+	Timeseries: []*prometheusv1.TimeSeries{
+		{
+			Labels: []*prometheusv1.Label{
+				{Name: "__name__", Value: "http_request_duration_seconds_bucket"},
+				{Name: "job", Value: "promtool"},
+				{Name: "le", Value: "0.1"},
+			},
+			Samples: []*prometheusv1.Sample{{Value: 33444, Timestamp: 1}},
+		},
+		{
+			Labels: []*prometheusv1.Label{
+				{Name: "__name__", Value: "http_request_duration_seconds_bucket"},
+				{Name: "job", Value: "promtool"},
+				{Name: "le", Value: "0.5"},
+			},
+			Samples: []*prometheusv1.Sample{{Value: 129389, Timestamp: 1}},
+		},
+		{
+			Labels: []*prometheusv1.Label{
+				{Name: "__name__", Value: "http_request_duration_seconds_bucket"},
+				{Name: "job", Value: "promtool"},
+				{Name: "le", Value: "1"},
+			},
+			Samples: []*prometheusv1.Sample{{Value: 133988, Timestamp: 1}},
+		},
+		{
+			Labels: []*prometheusv1.Label{
+				{Name: "__name__", Value: "http_request_duration_seconds_bucket"},
+				{Name: "job", Value: "promtool"},
+				{Name: "le", Value: "+Inf"},
+			},
+			Samples: []*prometheusv1.Sample{{Value: 144320, Timestamp: 1}},
+		},
+		{
+			Labels: []*prometheusv1.Label{
+				{Name: "__name__", Value: "http_request_duration_seconds_sum"},
+				{Name: "job", Value: "promtool"},
+			},
+			Samples: []*prometheusv1.Sample{{Value: 53423, Timestamp: 1}},
+		},
+		{
+			Labels: []*prometheusv1.Label{
+				{Name: "__name__", Value: "http_request_duration_seconds_count"},
+				{Name: "job", Value: "promtool"},
+			},
+			Samples: []*prometheusv1.Sample{{Value: 144320, Timestamp: 1}},
+		},
+		{
+			Labels: []*prometheusv1.Label{
+				{Name: "__name__", Value: "http_requests_total"},
+				{Name: "code", Value: "200"},
+				{Name: "job", Value: "promtool"},
+				{Name: "method", Value: "post"},
+			},
+			Samples: []*prometheusv1.Sample{{Value: 1027, Timestamp: 1395066363000}},
+		},
+		{
+			Labels: []*prometheusv1.Label{
+				{Name: "__name__", Value: "http_requests_total"},
+				{Name: "code", Value: "400"},
+				{Name: "job", Value: "promtool"},
+				{Name: "method", Value: "post"},
+			},
+			Samples: []*prometheusv1.Sample{{Value: 3, Timestamp: 1395066363000}},
+		},
+		{
+			Labels: []*prometheusv1.Label{
+				{Name: "__name__", Value: "rpc_duration_seconds"},
+				{Name: "job", Value: "promtool"},
+				{Name: "quantile", Value: "0.01"},
+			},
+			Samples: []*prometheusv1.Sample{{Value: 3102, Timestamp: 1}},
+		},
+		{
+			Labels: []*prometheusv1.Label{
+				{Name: "__name__", Value: "rpc_duration_seconds"},
+				{Name: "job", Value: "promtool"},
+				{Name: "quantile", Value: "0.5"},
+			},
+			Samples: []*prometheusv1.Sample{{Value: 4773, Timestamp: 1}},
+		},
+		{
+			Labels: []*prometheusv1.Label{
+				{Name: "__name__", Value: "rpc_duration_seconds"},
+				{Name: "job", Value: "promtool"},
+				{Name: "quantile", Value: "0.99"},
+			},
+			Samples: []*prometheusv1.Sample{{Value: 76656, Timestamp: 1}},
+		},
+		{
+			Labels: []*prometheusv1.Label{
+				{Name: "__name__", Value: "rpc_duration_seconds_sum"},
+				{Name: "job", Value: "promtool"},
+			},
+			Samples: []*prometheusv1.Sample{{Value: 1.7560473e+07, Timestamp: 1}},
+		},
+		{
+			Labels: []*prometheusv1.Label{
+				{Name: "__name__", Value: "rpc_duration_seconds_count"},
+				{Name: "job", Value: "promtool"},
+			},
+			Samples: []*prometheusv1.Sample{{Value: 2693, Timestamp: 1}},
+		},
+		{
+			Labels: []*prometheusv1.Label{
+				{Name: "__name__", Value: "test_metric1"},
+				{Name: "b", Value: "c"},
+				{Name: "baz", Value: "qux"},
+				{Name: "d", Value: "e"},
+				{Name: "foo", Value: "bar"},
+				{Name: "job", Value: "promtool"},
+			},
+			Samples: []*prometheusv1.Sample{{Value: 1, Timestamp: 1}},
+		},
+		{
+			Labels: []*prometheusv1.Label{
+				{Name: "__name__", Value: "test_metric1"},
+				{Name: "b", Value: "c"},
+				{Name: "baz", Value: "qux"},
+				{Name: "d", Value: "e"},
+				{Name: "foo", Value: "bar"},
+				{Name: "job", Value: "promtool"},
+			},
+			Samples: []*prometheusv1.Sample{{Value: 2, Timestamp: 1}},
+		},
+	},
+}
+
+func TestParseAndPushMetricsTextAndFormat(t *testing.T) {
+	input := bytes.NewReader([]byte(`
+	# HELP http_request_duration_seconds A histogram of the request duration.
+	# TYPE http_request_duration_seconds histogram
+	http_request_duration_seconds_bucket{le="0.1"} 33444 1
+	http_request_duration_seconds_bucket{le="0.5"} 129389 1
+	http_request_duration_seconds_bucket{le="1"} 133988 1
+	http_request_duration_seconds_bucket{le="+Inf"} 144320 1
+	http_request_duration_seconds_sum 53423 1
+	http_request_duration_seconds_count 144320 1
+	# HELP http_requests_total The total number of HTTP requests.
+	# TYPE http_requests_total counter
+	http_requests_total{method="post",code="200"} 1027 1395066363000
+	http_requests_total{method="post",code="400"}    3 1395066363000
+	# HELP rpc_duration_seconds A summary of the RPC duration in seconds.
+	# TYPE rpc_duration_seconds summary
+	rpc_duration_seconds{quantile="0.01"} 3102 1
+	rpc_duration_seconds{quantile="0.5"} 4773 1
+	rpc_duration_seconds{quantile="0.99"} 76656 1
+	rpc_duration_seconds_sum 1.7560473e+07 1
+	rpc_duration_seconds_count 2693 1
+	# HELP test_metric1 This is a test metric.
+	# TYPE test_metric1 gauge
+	test_metric1{b="c",baz="qux",d="e",foo="bar"} 1 1
+	test_metric1{b="c",baz="qux",d="e",foo="bar"} 2 1
+	`))
+	labels := map[string]string{"job": "promtool"}
+
+	actual, err := MetricTextToWriteRequest(input, labels)
+	require.NoError(t, err)
+
+	require.Equal(t, writeRequestFixture, actual)
+}
+
+func TestMarshalUnmarshal(t *testing.T) {
+	timeseries := []*prometheusv1.TimeSeries{}
+	wr := prometheusv1.WriteRequest{Timeseries: timeseries}
+	bytes, err := proto.Marshal(&wr)
+	require.NoError(t, err)
+
+	newWr := prometheusv1.WriteRequest{}
+	err = proto.Unmarshal(bytes, &newWr)
+	require.NoError(t, err)
+}

+ 82 - 0
node/pkg/telemetry/prom_remote_write/scrape.go

@@ -0,0 +1,82 @@
+package promremotew
+
+import (
+	"bytes"
+	"context"
+	"net/http"
+
+	"github.com/golang/snappy"
+	"github.com/prometheus/client_golang/prometheus"
+	dto "github.com/prometheus/client_model/go"
+	"go.uber.org/zap"
+	"google.golang.org/protobuf/proto"
+)
+
+type PromTelemetryInfo struct {
+	PromRemoteURL string
+	Labels        map[string]string
+}
+
+func scrapeLocalMetricsViaGatherer() (map[string]*dto.MetricFamily, error) {
+	metrics, err := prometheus.DefaultGatherer.Gather()
+	if err != nil {
+		return nil, err
+	}
+	// Here we have an array of metrics.
+	mapByName := map[string]*dto.MetricFamily{}
+	for _, met := range metrics {
+		name := met.GetName()
+		if _, ok := mapByName[name]; !ok {
+			mapByName[name] = met
+		}
+	}
+	return mapByName, nil
+}
+
+func ScrapeAndSendLocalMetrics(ctx context.Context, info PromTelemetryInfo, logger *zap.Logger) error {
+	metrics, err := scrapeLocalMetricsViaGatherer()
+	if err != nil {
+		logger.Error("Could not scrape local metrics", zap.Error(err))
+		return err
+	}
+	// At this point we have a map of metrics by name.
+	// Need to convert to write request.
+	writeRequest, err := MetricFamiliesToWriteRequest(metrics, info.Labels)
+	if err != nil {
+		logger.Error("Could not create write request", zap.Error(err))
+		return err
+	}
+
+	raw, err := proto.Marshal(writeRequest)
+	if err != nil {
+		logger.Error("Could not marshal write request", zap.Error(err))
+		return err
+	}
+	oSnap := snappy.Encode(nil, raw)
+	bodyReader := bytes.NewReader(oSnap)
+
+	// Create the http request
+	// requestURL := fmt.Sprintf("https://%s:%s@%s", info.PromRemoteUser, info.PromRemoteKey, info.PromRemoteURL)
+	req, err := http.NewRequestWithContext(ctx, http.MethodPost, info.PromRemoteURL, bodyReader)
+	if err != nil {
+		logger.Error("Could not create request", zap.Error(err))
+		return err
+	}
+	req.Header.Set("Content-Encoding", "snappy")
+	req.Header.Set("Content-Type", "application/x-protobuf")
+	req.Header.Set("User-Agent", "Guardian")
+	req.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0")
+
+	res, err := http.DefaultClient.Do(req)
+	if err != nil {
+		logger.Error("Error creating http request", zap.Error(err))
+		return err
+	}
+
+	logger.Debug("Grafana result", zap.Int("status code", res.StatusCode))
+	if res.StatusCode != 200 {
+		logger.Error("Grafana returned non-200 status code", zap.Int("status code", res.StatusCode))
+		return err
+	}
+	return nil
+}

+ 93 - 0
proto/prometheus/v1/remote.proto

@@ -0,0 +1,93 @@
+// Copyright 2016 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// NOTICE: THIS FILE HAS BEEN MODIFIED FROM THE ORIGINAL
+// Changes were made to use go protobuf instead of gogo protobuf.
+// Original code is here: https://github.com/prometheus/prometheus/blob/e4ec263bcc11493953c75d1b2e7bc78fd0463e05/prompb/remote.proto
+
+syntax = "proto3";
+package prometheus.v1;
+
+//option go_package = "prompb";
+option go_package = "github.com/certusone/wormhole/node/pkg/proto/prometheus/v1;prometheusv1";
+
+import "prometheus/v1/types.proto";
+//import "gogoproto/gogo.proto";
+
+message WriteRequest {
+  repeated prometheus.v1.TimeSeries timeseries = 1 ;
+  // Cortex uses this field to determine the source of the write request.
+  // We reserve it to avoid any compatibility issues.
+  reserved  2;
+  repeated prometheus.v1.MetricMetadata metadata = 3 ;
+}
+
+// ReadRequest represents a remote read request.
+message ReadRequest {
+  repeated Query queries = 1;
+
+  enum ResponseType {
+    // Server will return a single ReadResponse message with matched series that includes list of raw samples.
+    // It's recommended to use streamed response types instead.
+    //
+    // Response headers:
+    // Content-Type: "application/x-protobuf"
+    // Content-Encoding: "snappy"
+    RESPONSE_TYPE_SAMPLES_UNSPECIFIED = 0;
+    // Server will stream a delimited ChunkedReadResponse message that
+    // contains XOR or HISTOGRAM(!) encoded chunks for a single series.
+    // Each message is following varint size and fixed size bigendian
+    // uint32 for CRC32 Castagnoli checksum.
+    //
+    // Response headers:
+    // Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse"
+    // Content-Encoding: ""
+    RESPONSE_TYPE_STREAMED_XOR_CHUNKS = 1;
+  }
+
+  // accepted_response_types allows negotiating the content type of the response.
+  //
+  // Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
+  // implemented by server, error is returned.
+  // For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
+  repeated ResponseType accepted_response_types = 2;
+}
+
+// ReadResponse is a response when response_type equals SAMPLES.
+message ReadResponse {
+  // In same order as the request's queries.
+  repeated QueryResult results = 1;
+}
+
+message Query {
+  int64 start_timestamp_ms = 1;
+  int64 end_timestamp_ms = 2;
+  repeated prometheus.v1.LabelMatcher matchers = 3;
+  prometheus.v1.ReadHints hints = 4;
+}
+
+message QueryResult {
+  // Samples within a time series must be ordered by time.
+  repeated prometheus.v1.TimeSeries timeseries = 1;
+}
+
+// ChunkedReadResponse is a response when response_type equals STREAMED_XOR_CHUNKS.
+// We strictly stream full series after series, optionally split by time. This means that a single frame can contain
+// partition of the single series, but once a new series is started to be streamed it means that no more chunks will
+// be sent for previous one. Series are returned sorted in the same way TSDB block are internally.
+message ChunkedReadResponse {
+  repeated prometheus.v1.ChunkedSeries chunked_series = 1;
+
+  // query_index represents an index of the query from ReadRequest.queries these chunks relates to.
+  int64 query_index = 2;
+}

+ 192 - 0
proto/prometheus/v1/types.proto

@@ -0,0 +1,192 @@
+// Copyright 2017 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// NOTICE: THIS FILE HAS BEEN MODIFIED FROM THE ORIGINAL
+// Changes were made to use go protobuf instead of gogo protobuf.
+// Original code is here: https://github.com/prometheus/prometheus/blob/e4ec263bcc11493953c75d1b2e7bc78fd0463e05/prompb/types.proto
+
+syntax = "proto3";
+package prometheus.v1;
+
+//option go_package = "prompb";
+option go_package = "github.com/certusone/wormhole/node/pkg/proto/prometheus/v1;prometheusv1";
+
+//import "gogoproto/gogo.proto";
+
+message MetricMetadata {
+  enum MetricType {
+    METRIC_TYPE_UNKNOWN_UNSPECIFIED        = 0;
+    METRIC_TYPE_COUNTER        = 1;
+    METRIC_TYPE_GAUGE          = 2;
+    METRIC_TYPE_HISTOGRAM      = 3;
+    METRIC_TYPE_GAUGEHISTOGRAM = 4;
+    METRIC_TYPE_SUMMARY        = 5;
+    METRIC_TYPE_INFO           = 6;
+    METRIC_TYPE_STATESET       = 7;
+  }
+
+  // Represents the metric type, these match the set from Prometheus.
+  // Refer to model/textparse/interface.go for details.
+  MetricType type = 1;
+  string metric_family_name = 2;
+  string help = 4;
+  string unit = 5;
+}
+
+message Sample {
+  double value    = 1;
+  // timestamp is in ms format, see model/timestamp/timestamp.go for
+  // conversion from time.Time to Prometheus timestamp.
+  int64 timestamp = 2;
+}
+
+message Exemplar {
+  // Optional, can be empty.
+  repeated Label labels = 1 ;
+  double value = 2;
+  // timestamp is in ms format, see model/timestamp/timestamp.go for
+  // conversion from time.Time to Prometheus timestamp.
+  int64 timestamp = 3;
+}
+
+// A native histogram, also known as a sparse histogram.
+// Original design doc:
+// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit
+// The appendix of this design doc also explains the concept of float
+// histograms. This Histogram message can represent both, the usual
+// integer histogram as well as a float histogram.
+message Histogram {
+  enum ResetHint {
+    RESET_HINT_UNKNOWN_UNSPECIFIED = 0; // Need to test for a counter reset explicitly.
+    RESET_HINT_YES     = 1; // This is the 1st histogram after a counter reset.
+    RESET_HINT_NO      = 2; // There was no counter reset between this and the previous Histogram.
+    RESET_HINT_GAUGE   = 3; // This is a gauge histogram where counter resets don't happen.
+  }
+
+  oneof count { // Count of observations in the histogram.
+    uint64 count_int   = 1;
+    double count_float = 2;
+  }
+  double sum = 3; // Sum of observations in the histogram.
+  // The schema defines the bucket schema. Currently, valid numbers
+  // are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1
+  // is a bucket boundary in each case, and then each power of two is
+  // divided into 2^n logarithmic buckets. Or in other words, each
+  // bucket boundary is the previous boundary times 2^(2^-n). In the
+  // future, more bucket schemas may be added using numbers < -4 or >
+  // 8.
+  sint32 schema             = 4;
+  double zero_threshold     = 5; // Breadth of the zero bucket.
+  oneof zero_count { // Count in zero bucket.
+    uint64 zero_count_int     = 6;
+    double zero_count_float   = 7;
+  }
+
+  // Negative Buckets.
+  repeated BucketSpan negative_spans =  8 ;
+  // Use either "negative_deltas" or "negative_counts", the former for
+  // regular histograms with integer counts, the latter for float
+  // histograms.
+  repeated sint64 negative_deltas    =  9; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+  repeated double negative_counts    = 10; // Absolute count of each bucket.
+
+  // Positive Buckets.
+  repeated BucketSpan positive_spans = 11 ;
+  // Use either "positive_deltas" or "positive_counts", the former for
+  // regular histograms with integer counts, the latter for float
+  // histograms.
+  repeated sint64 positive_deltas    = 12; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+  repeated double positive_counts    = 13; // Absolute count of each bucket.
+
+  ResetHint reset_hint               = 14;
+  // timestamp is in ms format, see model/timestamp/timestamp.go for
+  // conversion from time.Time to Prometheus timestamp.
+  int64 timestamp = 15;
+}
+
+// A BucketSpan defines a number of consecutive buckets with their
+// offset. Logically, it would be more straightforward to include the
+// bucket counts in the Span. However, the protobuf representation is
+// more compact in the way the data is structured here (with all the
+// buckets in a single array separate from the Spans).
+message BucketSpan {
+  sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative).
+  uint32 length = 2; // Length of consecutive buckets.
+}
+
+// TimeSeries represents samples and labels for a single time series.
+message TimeSeries {
+  // For a timeseries to be valid, and for the samples and exemplars
+  // to be ingested by the remote system properly, the labels field is required.
+  repeated Label labels         = 1 ;
+  repeated Sample samples       = 2 ;
+  repeated Exemplar exemplars   = 3 ;
+  repeated Histogram histograms = 4 ;
+}
+
+message Label {
+  string name  = 1;
+  string value = 2;
+}
+
+message Labels {
+  repeated Label labels = 1 ;
+}
+
+// Matcher specifies a rule, which can match or set of labels or not.
+message LabelMatcher {
+  enum Type {
+    TYPE_EQ_UNSPECIFIED  = 0;
+    TYPE_NEQ = 1;
+    TYPE_RE  = 2;
+    TYPE_NRE = 3;
+  }
+  Type type    = 1;
+  string name  = 2;
+  string value = 3;
+}
+
+message ReadHints {
+  int64 step_ms = 1;  // Query step size in milliseconds.
+  string func = 2;    // String representation of surrounding function or aggregation.
+  int64 start_ms = 3; // Start time in milliseconds.
+  int64 end_ms = 4;   // End time in milliseconds.
+  repeated string grouping = 5; // List of label names used in aggregation.
+  bool by = 6; // Indicate whether it is without or by.
+  int64 range_ms = 7; // Range vector selector range in milliseconds.
+}
+
+// Chunk represents a TSDB chunk.
+// Time range [min, max] is inclusive.
+message Chunk {
+  int64 min_time_ms = 1;
+  int64 max_time_ms = 2;
+
+  // We require this to match chunkenc.Encoding.
+  enum Encoding {
+    ENCODING_UNKNOWN_UNSPECIFIED         = 0;
+    ENCODING_XOR             = 1;
+    ENCODING_HISTOGRAM       = 2;
+    ENCODING_FLOAT_HISTOGRAM = 3;
+  }
+  Encoding type  = 3;
+  bytes data     = 4;
+}
+
+// ChunkedSeries represents single, encoded time series.
+message ChunkedSeries {
+  // Labels should be sorted.
+  repeated Label labels = 1 ;
+  // Chunks will be in start time order and may overlap.
+  repeated Chunk chunks = 2 ;
+}